1 /* SPDX-License-Identifier: LGPL-2.1+ */
6 #include "alloc-util.h"
7 #include "blockdev-util.h"
8 #include "bpf-devices.h"
9 #include "bpf-firewall.h"
10 #include "btrfs-util.h"
11 #include "bus-error.h"
12 #include "cgroup-util.h"
17 #include "nulstr-util.h"
18 #include "parse-util.h"
19 #include "path-util.h"
20 #include "process-util.h"
21 #include "procfs-util.h"
23 #include "stat-util.h"
24 #include "stdio-util.h"
25 #include "string-table.h"
26 #include "string-util.h"
29 #define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
31 /* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
32 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
33 * out specific attributes from us. */
34 #define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
36 bool manager_owns_host_root_cgroup(Manager
*m
) {
39 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
40 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
41 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
42 * we run in any kind of container virtualization. */
44 if (MANAGER_IS_USER(m
))
47 if (detect_container() > 0)
50 return empty_or_root(m
->cgroup_root
);
53 bool unit_has_host_root_cgroup(Unit
*u
) {
56 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
57 * the manager manages the root cgroup. */
59 if (!manager_owns_host_root_cgroup(u
->manager
))
62 return unit_has_name(u
, SPECIAL_ROOT_SLICE
);
65 static int set_attribute_and_warn(Unit
*u
, const char *controller
, const char *attribute
, const char *value
) {
68 r
= cg_set_attribute(controller
, u
->cgroup_path
, attribute
, value
);
70 log_unit_full(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
71 strna(attribute
), isempty(u
->cgroup_path
) ? "/" : u
->cgroup_path
, (int) strcspn(value
, NEWLINE
), value
);
76 static void cgroup_compat_warn(void) {
77 static bool cgroup_compat_warned
= false;
79 if (cgroup_compat_warned
)
82 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
83 "See cgroup-compat debug messages for details.");
85 cgroup_compat_warned
= true;
88 #define log_cgroup_compat(unit, fmt, ...) do { \
89 cgroup_compat_warn(); \
90 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
93 void cgroup_context_init(CGroupContext
*c
) {
96 /* Initialize everything to the kernel defaults. */
98 *c
= (CGroupContext
) {
99 .cpu_weight
= CGROUP_WEIGHT_INVALID
,
100 .startup_cpu_weight
= CGROUP_WEIGHT_INVALID
,
101 .cpu_quota_per_sec_usec
= USEC_INFINITY
,
102 .cpu_quota_period_usec
= USEC_INFINITY
,
104 .cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
105 .startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
107 .memory_high
= CGROUP_LIMIT_MAX
,
108 .memory_max
= CGROUP_LIMIT_MAX
,
109 .memory_swap_max
= CGROUP_LIMIT_MAX
,
111 .memory_limit
= CGROUP_LIMIT_MAX
,
113 .io_weight
= CGROUP_WEIGHT_INVALID
,
114 .startup_io_weight
= CGROUP_WEIGHT_INVALID
,
116 .blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
117 .startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
119 .tasks_max
= CGROUP_LIMIT_MAX
,
123 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
127 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
132 void cgroup_context_free_io_device_weight(CGroupContext
*c
, CGroupIODeviceWeight
*w
) {
136 LIST_REMOVE(device_weights
, c
->io_device_weights
, w
);
141 void cgroup_context_free_io_device_latency(CGroupContext
*c
, CGroupIODeviceLatency
*l
) {
145 LIST_REMOVE(device_latencies
, c
->io_device_latencies
, l
);
150 void cgroup_context_free_io_device_limit(CGroupContext
*c
, CGroupIODeviceLimit
*l
) {
154 LIST_REMOVE(device_limits
, c
->io_device_limits
, l
);
159 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
163 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
168 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
172 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
177 void cgroup_context_done(CGroupContext
*c
) {
180 while (c
->io_device_weights
)
181 cgroup_context_free_io_device_weight(c
, c
->io_device_weights
);
183 while (c
->io_device_latencies
)
184 cgroup_context_free_io_device_latency(c
, c
->io_device_latencies
);
186 while (c
->io_device_limits
)
187 cgroup_context_free_io_device_limit(c
, c
->io_device_limits
);
189 while (c
->blockio_device_weights
)
190 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
192 while (c
->blockio_device_bandwidths
)
193 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
195 while (c
->device_allow
)
196 cgroup_context_free_device_allow(c
, c
->device_allow
);
198 c
->ip_address_allow
= ip_address_access_free_all(c
->ip_address_allow
);
199 c
->ip_address_deny
= ip_address_access_free_all(c
->ip_address_deny
);
202 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
203 CGroupIODeviceLimit
*il
;
204 CGroupIODeviceWeight
*iw
;
205 CGroupIODeviceLatency
*l
;
206 CGroupBlockIODeviceBandwidth
*b
;
207 CGroupBlockIODeviceWeight
*w
;
208 CGroupDeviceAllow
*a
;
209 IPAddressAccessItem
*iaai
;
210 char u
[FORMAT_TIMESPAN_MAX
];
211 char v
[FORMAT_TIMESPAN_MAX
];
216 prefix
= strempty(prefix
);
219 "%sCPUAccounting=%s\n"
220 "%sIOAccounting=%s\n"
221 "%sBlockIOAccounting=%s\n"
222 "%sMemoryAccounting=%s\n"
223 "%sTasksAccounting=%s\n"
224 "%sIPAccounting=%s\n"
225 "%sCPUWeight=%" PRIu64
"\n"
226 "%sStartupCPUWeight=%" PRIu64
"\n"
227 "%sCPUShares=%" PRIu64
"\n"
228 "%sStartupCPUShares=%" PRIu64
"\n"
229 "%sCPUQuotaPerSecSec=%s\n"
230 "%sCPUQuotaPeriodSec=%s\n"
231 "%sIOWeight=%" PRIu64
"\n"
232 "%sStartupIOWeight=%" PRIu64
"\n"
233 "%sBlockIOWeight=%" PRIu64
"\n"
234 "%sStartupBlockIOWeight=%" PRIu64
"\n"
235 "%sMemoryMin=%" PRIu64
"\n"
236 "%sMemoryLow=%" PRIu64
"\n"
237 "%sMemoryHigh=%" PRIu64
"\n"
238 "%sMemoryMax=%" PRIu64
"\n"
239 "%sMemorySwapMax=%" PRIu64
"\n"
240 "%sMemoryLimit=%" PRIu64
"\n"
241 "%sTasksMax=%" PRIu64
"\n"
242 "%sDevicePolicy=%s\n"
244 prefix
, yes_no(c
->cpu_accounting
),
245 prefix
, yes_no(c
->io_accounting
),
246 prefix
, yes_no(c
->blockio_accounting
),
247 prefix
, yes_no(c
->memory_accounting
),
248 prefix
, yes_no(c
->tasks_accounting
),
249 prefix
, yes_no(c
->ip_accounting
),
250 prefix
, c
->cpu_weight
,
251 prefix
, c
->startup_cpu_weight
,
252 prefix
, c
->cpu_shares
,
253 prefix
, c
->startup_cpu_shares
,
254 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
255 prefix
, format_timespan(v
, sizeof(v
), c
->cpu_quota_period_usec
, 1),
256 prefix
, c
->io_weight
,
257 prefix
, c
->startup_io_weight
,
258 prefix
, c
->blockio_weight
,
259 prefix
, c
->startup_blockio_weight
,
260 prefix
, c
->memory_min
,
261 prefix
, c
->memory_low
,
262 prefix
, c
->memory_high
,
263 prefix
, c
->memory_max
,
264 prefix
, c
->memory_swap_max
,
265 prefix
, c
->memory_limit
,
266 prefix
, c
->tasks_max
,
267 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
268 prefix
, yes_no(c
->delegate
));
271 _cleanup_free_
char *t
= NULL
;
273 (void) cg_mask_to_string(c
->delegate_controllers
, &t
);
275 fprintf(f
, "%sDelegateControllers=%s\n",
280 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
282 "%sDeviceAllow=%s %s%s%s\n",
285 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
287 LIST_FOREACH(device_weights
, iw
, c
->io_device_weights
)
289 "%sIODeviceWeight=%s %" PRIu64
"\n",
294 LIST_FOREACH(device_latencies
, l
, c
->io_device_latencies
)
296 "%sIODeviceLatencyTargetSec=%s %s\n",
299 format_timespan(u
, sizeof(u
), l
->target_usec
, 1));
301 LIST_FOREACH(device_limits
, il
, c
->io_device_limits
) {
302 char buf
[FORMAT_BYTES_MAX
];
303 CGroupIOLimitType type
;
305 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
306 if (il
->limits
[type
] != cgroup_io_limit_defaults
[type
])
310 cgroup_io_limit_type_to_string(type
),
312 format_bytes(buf
, sizeof(buf
), il
->limits
[type
]));
315 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
317 "%sBlockIODeviceWeight=%s %" PRIu64
,
322 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
323 char buf
[FORMAT_BYTES_MAX
];
325 if (b
->rbps
!= CGROUP_LIMIT_MAX
)
327 "%sBlockIOReadBandwidth=%s %s\n",
330 format_bytes(buf
, sizeof(buf
), b
->rbps
));
331 if (b
->wbps
!= CGROUP_LIMIT_MAX
)
333 "%sBlockIOWriteBandwidth=%s %s\n",
336 format_bytes(buf
, sizeof(buf
), b
->wbps
));
339 LIST_FOREACH(items
, iaai
, c
->ip_address_allow
) {
340 _cleanup_free_
char *k
= NULL
;
342 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
343 fprintf(f
, "%sIPAddressAllow=%s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
346 LIST_FOREACH(items
, iaai
, c
->ip_address_deny
) {
347 _cleanup_free_
char *k
= NULL
;
349 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
350 fprintf(f
, "%sIPAddressDeny=%s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
354 int cgroup_add_device_allow(CGroupContext
*c
, const char *dev
, const char *mode
) {
355 _cleanup_free_ CGroupDeviceAllow
*a
= NULL
;
356 _cleanup_free_
char *d
= NULL
;
360 assert(isempty(mode
) || in_charset(mode
, "rwm"));
362 a
= new(CGroupDeviceAllow
, 1);
370 *a
= (CGroupDeviceAllow
) {
372 .r
= isempty(mode
) || strchr(mode
, 'r'),
373 .w
= isempty(mode
) || strchr(mode
, 'w'),
374 .m
= isempty(mode
) || strchr(mode
, 'm'),
377 LIST_PREPEND(device_allow
, c
->device_allow
, a
);
383 static void cgroup_xattr_apply(Unit
*u
) {
384 char ids
[SD_ID128_STRING_MAX
];
389 if (!MANAGER_IS_SYSTEM(u
->manager
))
392 if (sd_id128_is_null(u
->invocation_id
))
395 r
= cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
396 "trusted.invocation_id",
397 sd_id128_to_string(u
->invocation_id
, ids
), 32,
400 log_unit_debug_errno(u
, r
, "Failed to set invocation ID on control group %s, ignoring: %m", u
->cgroup_path
);
403 static int lookup_block_device(const char *p
, dev_t
*ret
) {
411 r
= device_path_parse_major_minor(p
, &mode
, &rdev
);
412 if (r
== -ENODEV
) { /* not a parsable device node, need to go to disk */
414 if (stat(p
, &st
) < 0)
415 return log_warning_errno(errno
, "Couldn't stat device '%s': %m", p
);
416 rdev
= (dev_t
)st
.st_rdev
;
417 dev
= (dev_t
)st
.st_dev
;
420 return log_warning_errno(r
, "Failed to parse major/minor from path '%s': %m", p
);
423 log_warning("Device node '%s' is a character device, but block device needed.", p
);
425 } else if (S_ISBLK(mode
))
427 else if (major(dev
) != 0)
428 *ret
= dev
; /* If this is not a device node then use the block device this file is stored on */
430 /* If this is btrfs, getting the backing block device is a bit harder */
431 r
= btrfs_get_block_device(p
, ret
);
432 if (r
< 0 && r
!= -ENOTTY
)
433 return log_warning_errno(r
, "Failed to determine block device backing btrfs file system '%s': %m", p
);
435 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p
);
440 /* If this is a LUKS device, try to get the originating block device */
441 (void) block_get_originating(*ret
, ret
);
443 /* If this is a partition, try to get the originating block device */
444 (void) block_get_whole_disk(*ret
, ret
);
448 static int whitelist_device(BPFProgram
*prog
, const char *path
, const char *node
, const char *acc
) {
456 /* Some special handling for /dev/block/%u:%u, /dev/char/%u:%u, /run/systemd/inaccessible/chr and
457 * /run/systemd/inaccessible/blk paths. Instead of stat()ing these we parse out the major/minor directly. This
458 * means clients can use these path without the device node actually around */
459 r
= device_path_parse_major_minor(node
, &mode
, &rdev
);
462 return log_warning_errno(r
, "Couldn't parse major/minor from device path '%s': %m", node
);
465 if (stat(node
, &st
) < 0)
466 return log_warning_errno(errno
, "Couldn't stat device %s: %m", node
);
468 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
469 log_warning("%s is not a device.", node
);
472 rdev
= (dev_t
) st
.st_rdev
;
476 if (cg_all_unified() > 0) {
480 return cgroup_bpf_whitelist_device(prog
, S_ISCHR(mode
) ? BPF_DEVCG_DEV_CHAR
: BPF_DEVCG_DEV_BLOCK
,
481 major(rdev
), minor(rdev
), acc
);
484 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
488 S_ISCHR(mode
) ? 'c' : 'b',
489 major(rdev
), minor(rdev
),
492 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL here. */
494 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
496 return log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
, -EPERM
) ? LOG_DEBUG
: LOG_WARNING
,
497 r
, "Failed to set devices.allow on %s: %m", path
);
503 static int whitelist_major(BPFProgram
*prog
, const char *path
, const char *name
, char type
, const char *acc
) {
504 _cleanup_fclose_
FILE *f
= NULL
;
505 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4];
512 assert(IN_SET(type
, 'b', 'c'));
514 if (streq(name
, "*")) {
515 /* If the name is a wildcard, then apply this list to all devices of this type */
517 if (cg_all_unified() > 0) {
521 (void) cgroup_bpf_whitelist_class(prog
, type
== 'c' ? BPF_DEVCG_DEV_CHAR
: BPF_DEVCG_DEV_BLOCK
, acc
);
523 xsprintf(buf
, "%c *:* %s", type
, acc
);
525 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
527 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
528 "Failed to set devices.allow on %s: %m", path
);
533 if (safe_atou(name
, &maj
) >= 0 && DEVICE_MAJOR_VALID(maj
)) {
534 /* The name is numeric and suitable as major. In that case, let's take is major, and create the entry
537 if (cg_all_unified() > 0) {
541 (void) cgroup_bpf_whitelist_major(prog
,
542 type
== 'c' ? BPF_DEVCG_DEV_CHAR
: BPF_DEVCG_DEV_BLOCK
,
545 xsprintf(buf
, "%c %u:* %s", type
, maj
, acc
);
547 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
549 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
550 "Failed to set devices.allow on %s: %m", path
);
556 f
= fopen("/proc/devices", "re");
558 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
561 _cleanup_free_
char *line
= NULL
;
564 r
= read_line(f
, LONG_LINE_MAX
, &line
);
566 return log_warning_errno(r
, "Failed to read /proc/devices: %m");
570 if (type
== 'c' && streq(line
, "Character devices:")) {
575 if (type
== 'b' && streq(line
, "Block devices:")) {
590 w
= strpbrk(p
, WHITESPACE
);
595 r
= safe_atou(p
, &maj
);
602 w
+= strspn(w
, WHITESPACE
);
604 if (fnmatch(name
, w
, 0) != 0)
607 if (cg_all_unified() > 0) {
611 (void) cgroup_bpf_whitelist_major(prog
,
612 type
== 'c' ? BPF_DEVCG_DEV_CHAR
: BPF_DEVCG_DEV_BLOCK
,
621 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL
624 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
626 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
, -EPERM
) ? LOG_DEBUG
: LOG_WARNING
,
627 r
, "Failed to set devices.allow on %s: %m", path
);
634 static bool cgroup_context_has_cpu_weight(CGroupContext
*c
) {
635 return c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
||
636 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
;
639 static bool cgroup_context_has_cpu_shares(CGroupContext
*c
) {
640 return c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
641 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
;
644 static uint64_t cgroup_context_cpu_weight(CGroupContext
*c
, ManagerState state
) {
645 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
646 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
)
647 return c
->startup_cpu_weight
;
648 else if (c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
)
649 return c
->cpu_weight
;
651 return CGROUP_WEIGHT_DEFAULT
;
654 static uint64_t cgroup_context_cpu_shares(CGroupContext
*c
, ManagerState state
) {
655 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
656 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
657 return c
->startup_cpu_shares
;
658 else if (c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
659 return c
->cpu_shares
;
661 return CGROUP_CPU_SHARES_DEFAULT
;
664 usec_t
cgroup_cpu_adjust_period(usec_t period
, usec_t quota
, usec_t resolution
, usec_t max_period
) {
665 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
666 * need to be higher than that boundary. quota is specified in USecPerSec.
667 * Additionally, period must be at most max_period. */
670 return MIN(MAX3(period
, resolution
, resolution
* USEC_PER_SEC
/ quota
), max_period
);
673 static usec_t
cgroup_cpu_adjust_period_and_log(Unit
*u
, usec_t period
, usec_t quota
) {
676 if (quota
== USEC_INFINITY
)
677 /* Always use default period for infinity quota. */
678 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
680 if (period
== USEC_INFINITY
)
681 /* Default period was requested. */
682 period
= CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
684 /* Clamp to interval [1ms, 1s] */
685 new_period
= cgroup_cpu_adjust_period(period
, quota
, USEC_PER_MSEC
, USEC_PER_SEC
);
687 if (new_period
!= period
) {
688 char v
[FORMAT_TIMESPAN_MAX
];
689 log_unit_full(u
, u
->warned_clamping_cpu_quota_period
? LOG_DEBUG
: LOG_WARNING
, 0,
690 "Clamping CPU interval for cpu.max: period is now %s",
691 format_timespan(v
, sizeof(v
), new_period
, 1));
692 u
->warned_clamping_cpu_quota_period
= true;
698 static void cgroup_apply_unified_cpu_weight(Unit
*u
, uint64_t weight
) {
699 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
701 xsprintf(buf
, "%" PRIu64
"\n", weight
);
702 (void) set_attribute_and_warn(u
, "cpu", "cpu.weight", buf
);
705 static void cgroup_apply_unified_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
706 char buf
[(DECIMAL_STR_MAX(usec_t
) + 1) * 2 + 1];
708 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
709 if (quota
!= USEC_INFINITY
)
710 xsprintf(buf
, USEC_FMT
" " USEC_FMT
"\n",
711 MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
), period
);
713 xsprintf(buf
, "max " USEC_FMT
"\n", period
);
714 (void) set_attribute_and_warn(u
, "cpu", "cpu.max", buf
);
717 static void cgroup_apply_legacy_cpu_shares(Unit
*u
, uint64_t shares
) {
718 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
720 xsprintf(buf
, "%" PRIu64
"\n", shares
);
721 (void) set_attribute_and_warn(u
, "cpu", "cpu.shares", buf
);
724 static void cgroup_apply_legacy_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
725 char buf
[DECIMAL_STR_MAX(usec_t
) + 2];
727 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
729 xsprintf(buf
, USEC_FMT
"\n", period
);
730 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_period_us", buf
);
732 if (quota
!= USEC_INFINITY
) {
733 xsprintf(buf
, USEC_FMT
"\n", MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
));
734 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", buf
);
736 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", "-1\n");
739 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares
) {
740 return CLAMP(shares
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_CPU_SHARES_DEFAULT
,
741 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
744 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight
) {
745 return CLAMP(weight
* CGROUP_CPU_SHARES_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
746 CGROUP_CPU_SHARES_MIN
, CGROUP_CPU_SHARES_MAX
);
749 static bool cgroup_context_has_io_config(CGroupContext
*c
) {
750 return c
->io_accounting
||
751 c
->io_weight
!= CGROUP_WEIGHT_INVALID
||
752 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
753 c
->io_device_weights
||
754 c
->io_device_latencies
||
758 static bool cgroup_context_has_blockio_config(CGroupContext
*c
) {
759 return c
->blockio_accounting
||
760 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
761 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
762 c
->blockio_device_weights
||
763 c
->blockio_device_bandwidths
;
766 static uint64_t cgroup_context_io_weight(CGroupContext
*c
, ManagerState state
) {
767 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
768 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
)
769 return c
->startup_io_weight
;
770 else if (c
->io_weight
!= CGROUP_WEIGHT_INVALID
)
773 return CGROUP_WEIGHT_DEFAULT
;
776 static uint64_t cgroup_context_blkio_weight(CGroupContext
*c
, ManagerState state
) {
777 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
778 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
779 return c
->startup_blockio_weight
;
780 else if (c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
781 return c
->blockio_weight
;
783 return CGROUP_BLKIO_WEIGHT_DEFAULT
;
786 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight
) {
787 return CLAMP(blkio_weight
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_BLKIO_WEIGHT_DEFAULT
,
788 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
791 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight
) {
792 return CLAMP(io_weight
* CGROUP_BLKIO_WEIGHT_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
793 CGROUP_BLKIO_WEIGHT_MIN
, CGROUP_BLKIO_WEIGHT_MAX
);
796 static void cgroup_apply_io_device_weight(Unit
*u
, const char *dev_path
, uint64_t io_weight
) {
797 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
801 r
= lookup_block_device(dev_path
, &dev
);
805 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), io_weight
);
806 (void) set_attribute_and_warn(u
, "io", "io.weight", buf
);
809 static void cgroup_apply_blkio_device_weight(Unit
*u
, const char *dev_path
, uint64_t blkio_weight
) {
810 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
814 r
= lookup_block_device(dev_path
, &dev
);
818 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), blkio_weight
);
819 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight_device", buf
);
822 static void cgroup_apply_io_device_latency(Unit
*u
, const char *dev_path
, usec_t target
) {
823 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
827 r
= lookup_block_device(dev_path
, &dev
);
831 if (target
!= USEC_INFINITY
)
832 xsprintf(buf
, "%u:%u target=%" PRIu64
"\n", major(dev
), minor(dev
), target
);
834 xsprintf(buf
, "%u:%u target=max\n", major(dev
), minor(dev
));
836 (void) set_attribute_and_warn(u
, "io", "io.latency", buf
);
839 static void cgroup_apply_io_device_limit(Unit
*u
, const char *dev_path
, uint64_t *limits
) {
840 char limit_bufs
[_CGROUP_IO_LIMIT_TYPE_MAX
][DECIMAL_STR_MAX(uint64_t)];
841 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
842 CGroupIOLimitType type
;
846 r
= lookup_block_device(dev_path
, &dev
);
850 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
851 if (limits
[type
] != cgroup_io_limit_defaults
[type
])
852 xsprintf(limit_bufs
[type
], "%" PRIu64
, limits
[type
]);
854 xsprintf(limit_bufs
[type
], "%s", limits
[type
] == CGROUP_LIMIT_MAX
? "max" : "0");
856 xsprintf(buf
, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev
), minor(dev
),
857 limit_bufs
[CGROUP_IO_RBPS_MAX
], limit_bufs
[CGROUP_IO_WBPS_MAX
],
858 limit_bufs
[CGROUP_IO_RIOPS_MAX
], limit_bufs
[CGROUP_IO_WIOPS_MAX
]);
859 (void) set_attribute_and_warn(u
, "io", "io.max", buf
);
862 static void cgroup_apply_blkio_device_limit(Unit
*u
, const char *dev_path
, uint64_t rbps
, uint64_t wbps
) {
863 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
867 r
= lookup_block_device(dev_path
, &dev
);
871 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), rbps
);
872 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.read_bps_device", buf
);
874 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), wbps
);
875 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.write_bps_device", buf
);
878 static bool cgroup_context_has_unified_memory_config(CGroupContext
*c
) {
879 return c
->memory_min
> 0 || c
->memory_low
> 0 || c
->memory_high
!= CGROUP_LIMIT_MAX
|| c
->memory_max
!= CGROUP_LIMIT_MAX
|| c
->memory_swap_max
!= CGROUP_LIMIT_MAX
;
882 static void cgroup_apply_unified_memory_limit(Unit
*u
, const char *file
, uint64_t v
) {
883 char buf
[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
885 if (v
!= CGROUP_LIMIT_MAX
)
886 xsprintf(buf
, "%" PRIu64
"\n", v
);
888 (void) set_attribute_and_warn(u
, "memory", file
, buf
);
891 static void cgroup_apply_firewall(Unit
*u
) {
894 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
896 if (bpf_firewall_compile(u
) < 0)
899 (void) bpf_firewall_install(u
);
902 static void cgroup_context_apply(
904 CGroupMask apply_mask
,
905 ManagerState state
) {
909 bool is_host_root
, is_local_root
;
914 /* Nothing to do? Exit early! */
918 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
919 * attributes should only be managed for cgroups further down the tree. */
920 is_local_root
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
921 is_host_root
= unit_has_host_root_cgroup(u
);
923 assert_se(c
= unit_get_cgroup_context(u
));
924 assert_se(path
= u
->cgroup_path
);
926 if (is_local_root
) /* Make sure we don't try to display messages with an empty path. */
929 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
930 * then), and missing cgroups, i.e. EROFS and ENOENT. */
932 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
933 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
934 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
935 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
936 * we couldn't even write to them if we wanted to). */
937 if ((apply_mask
& CGROUP_MASK_CPU
) && !is_local_root
) {
939 if (cg_all_unified() > 0) {
942 if (cgroup_context_has_cpu_weight(c
))
943 weight
= cgroup_context_cpu_weight(c
, state
);
944 else if (cgroup_context_has_cpu_shares(c
)) {
947 shares
= cgroup_context_cpu_shares(c
, state
);
948 weight
= cgroup_cpu_shares_to_weight(shares
);
950 log_cgroup_compat(u
, "Applying [Startup]CPUShares=%" PRIu64
" as [Startup]CPUWeight=%" PRIu64
" on %s",
951 shares
, weight
, path
);
953 weight
= CGROUP_WEIGHT_DEFAULT
;
955 cgroup_apply_unified_cpu_weight(u
, weight
);
956 cgroup_apply_unified_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
961 if (cgroup_context_has_cpu_weight(c
)) {
964 weight
= cgroup_context_cpu_weight(c
, state
);
965 shares
= cgroup_cpu_weight_to_shares(weight
);
967 log_cgroup_compat(u
, "Applying [Startup]CPUWeight=%" PRIu64
" as [Startup]CPUShares=%" PRIu64
" on %s",
968 weight
, shares
, path
);
969 } else if (cgroup_context_has_cpu_shares(c
))
970 shares
= cgroup_context_cpu_shares(c
, state
);
972 shares
= CGROUP_CPU_SHARES_DEFAULT
;
974 cgroup_apply_legacy_cpu_shares(u
, shares
);
975 cgroup_apply_legacy_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
979 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
980 * controller), and in case of containers we want to leave control of these attributes to the container manager
981 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
982 if ((apply_mask
& CGROUP_MASK_IO
) && !is_local_root
) {
983 char buf
[8+DECIMAL_STR_MAX(uint64_t)+1];
984 bool has_io
, has_blockio
;
987 has_io
= cgroup_context_has_io_config(c
);
988 has_blockio
= cgroup_context_has_blockio_config(c
);
991 weight
= cgroup_context_io_weight(c
, state
);
992 else if (has_blockio
) {
993 uint64_t blkio_weight
;
995 blkio_weight
= cgroup_context_blkio_weight(c
, state
);
996 weight
= cgroup_weight_blkio_to_io(blkio_weight
);
998 log_cgroup_compat(u
, "Applying [Startup]BlockIOWeight=%" PRIu64
" as [Startup]IOWeight=%" PRIu64
,
999 blkio_weight
, weight
);
1001 weight
= CGROUP_WEIGHT_DEFAULT
;
1003 xsprintf(buf
, "default %" PRIu64
"\n", weight
);
1004 (void) set_attribute_and_warn(u
, "io", "io.weight", buf
);
1007 CGroupIODeviceLatency
*latency
;
1008 CGroupIODeviceLimit
*limit
;
1009 CGroupIODeviceWeight
*w
;
1011 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
)
1012 cgroup_apply_io_device_weight(u
, w
->path
, w
->weight
);
1014 LIST_FOREACH(device_limits
, limit
, c
->io_device_limits
)
1015 cgroup_apply_io_device_limit(u
, limit
->path
, limit
->limits
);
1017 LIST_FOREACH(device_latencies
, latency
, c
->io_device_latencies
)
1018 cgroup_apply_io_device_latency(u
, latency
->path
, latency
->target_usec
);
1020 } else if (has_blockio
) {
1021 CGroupBlockIODeviceWeight
*w
;
1022 CGroupBlockIODeviceBandwidth
*b
;
1024 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
1025 weight
= cgroup_weight_blkio_to_io(w
->weight
);
1027 log_cgroup_compat(u
, "Applying BlockIODeviceWeight=%" PRIu64
" as IODeviceWeight=%" PRIu64
" for %s",
1028 w
->weight
, weight
, w
->path
);
1030 cgroup_apply_io_device_weight(u
, w
->path
, weight
);
1033 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
1034 uint64_t limits
[_CGROUP_IO_LIMIT_TYPE_MAX
];
1035 CGroupIOLimitType type
;
1037 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
1038 limits
[type
] = cgroup_io_limit_defaults
[type
];
1040 limits
[CGROUP_IO_RBPS_MAX
] = b
->rbps
;
1041 limits
[CGROUP_IO_WBPS_MAX
] = b
->wbps
;
1043 log_cgroup_compat(u
, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as IO{Read|Write}BandwidthMax= for %s",
1044 b
->rbps
, b
->wbps
, b
->path
);
1046 cgroup_apply_io_device_limit(u
, b
->path
, limits
);
1051 if (apply_mask
& CGROUP_MASK_BLKIO
) {
1052 bool has_io
, has_blockio
;
1054 has_io
= cgroup_context_has_io_config(c
);
1055 has_blockio
= cgroup_context_has_blockio_config(c
);
1057 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
1058 * left to our container manager, too. */
1059 if (!is_local_root
) {
1060 char buf
[DECIMAL_STR_MAX(uint64_t)+1];
1066 io_weight
= cgroup_context_io_weight(c
, state
);
1067 weight
= cgroup_weight_io_to_blkio(cgroup_context_io_weight(c
, state
));
1069 log_cgroup_compat(u
, "Applying [Startup]IOWeight=%" PRIu64
" as [Startup]BlockIOWeight=%" PRIu64
,
1071 } else if (has_blockio
)
1072 weight
= cgroup_context_blkio_weight(c
, state
);
1074 weight
= CGROUP_BLKIO_WEIGHT_DEFAULT
;
1076 xsprintf(buf
, "%" PRIu64
"\n", weight
);
1077 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight", buf
);
1080 CGroupIODeviceWeight
*w
;
1082 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
) {
1083 weight
= cgroup_weight_io_to_blkio(w
->weight
);
1085 log_cgroup_compat(u
, "Applying IODeviceWeight=%" PRIu64
" as BlockIODeviceWeight=%" PRIu64
" for %s",
1086 w
->weight
, weight
, w
->path
);
1088 cgroup_apply_blkio_device_weight(u
, w
->path
, weight
);
1090 } else if (has_blockio
) {
1091 CGroupBlockIODeviceWeight
*w
;
1093 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
1094 cgroup_apply_blkio_device_weight(u
, w
->path
, w
->weight
);
1098 /* The bandwith limits are something that make sense to be applied to the host's root but not container
1099 * roots, as there we want the container manager to handle it */
1100 if (is_host_root
|| !is_local_root
) {
1102 CGroupIODeviceLimit
*l
;
1104 LIST_FOREACH(device_limits
, l
, c
->io_device_limits
) {
1105 log_cgroup_compat(u
, "Applying IO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as BlockIO{Read|Write}BandwidthMax= for %s",
1106 l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
], l
->path
);
1108 cgroup_apply_blkio_device_limit(u
, l
->path
, l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
]);
1110 } else if (has_blockio
) {
1111 CGroupBlockIODeviceBandwidth
*b
;
1113 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
)
1114 cgroup_apply_blkio_device_limit(u
, b
->path
, b
->rbps
, b
->wbps
);
1119 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
1120 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
1121 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
1122 * write to this if we wanted to.) */
1123 if ((apply_mask
& CGROUP_MASK_MEMORY
) && !is_local_root
) {
1125 if (cg_all_unified() > 0) {
1126 uint64_t max
, swap_max
= CGROUP_LIMIT_MAX
;
1128 if (cgroup_context_has_unified_memory_config(c
)) {
1129 max
= c
->memory_max
;
1130 swap_max
= c
->memory_swap_max
;
1132 max
= c
->memory_limit
;
1134 if (max
!= CGROUP_LIMIT_MAX
)
1135 log_cgroup_compat(u
, "Applying MemoryLimit=%" PRIu64
" as MemoryMax=", max
);
1138 cgroup_apply_unified_memory_limit(u
, "memory.min", c
->memory_min
);
1139 cgroup_apply_unified_memory_limit(u
, "memory.low", c
->memory_low
);
1140 cgroup_apply_unified_memory_limit(u
, "memory.high", c
->memory_high
);
1141 cgroup_apply_unified_memory_limit(u
, "memory.max", max
);
1142 cgroup_apply_unified_memory_limit(u
, "memory.swap.max", swap_max
);
1145 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
1148 if (cgroup_context_has_unified_memory_config(c
)) {
1149 val
= c
->memory_max
;
1150 log_cgroup_compat(u
, "Applying MemoryMax=%" PRIi64
" as MemoryLimit=", val
);
1152 val
= c
->memory_limit
;
1154 if (val
== CGROUP_LIMIT_MAX
)
1155 strncpy(buf
, "-1\n", sizeof(buf
));
1157 xsprintf(buf
, "%" PRIu64
"\n", val
);
1159 (void) set_attribute_and_warn(u
, "memory", "memory.limit_in_bytes", buf
);
1163 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
1164 * containers, where we leave this to the manager */
1165 if ((apply_mask
& (CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
)) &&
1166 (is_host_root
|| cg_all_unified() > 0 || !is_local_root
)) {
1167 _cleanup_(bpf_program_unrefp
) BPFProgram
*prog
= NULL
;
1168 CGroupDeviceAllow
*a
;
1170 if (cg_all_unified() > 0) {
1171 r
= cgroup_init_device_bpf(&prog
, c
->device_policy
, c
->device_allow
);
1173 log_unit_warning_errno(u
, r
, "Failed to initialize device control bpf program: %m");
1175 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL
1178 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
1179 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
1181 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
1183 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
, -EPERM
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1184 "Failed to reset devices.allow/devices.deny: %m");
1187 if (c
->device_policy
== CGROUP_CLOSED
||
1188 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
1189 static const char auto_devices
[] =
1190 "/dev/null\0" "rwm\0"
1191 "/dev/zero\0" "rwm\0"
1192 "/dev/full\0" "rwm\0"
1193 "/dev/random\0" "rwm\0"
1194 "/dev/urandom\0" "rwm\0"
1195 "/dev/tty\0" "rwm\0"
1196 "/dev/ptmx\0" "rwm\0"
1197 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
1198 "/run/systemd/inaccessible/chr\0" "rwm\0"
1199 "/run/systemd/inaccessible/blk\0" "rwm\0";
1203 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
1204 (void) whitelist_device(prog
, path
, x
, y
);
1206 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
1207 (void) whitelist_major(prog
, path
, "pts", 'c', "rw");
1210 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
1226 if (path_startswith(a
->path
, "/dev/"))
1227 (void) whitelist_device(prog
, path
, a
->path
, acc
);
1228 else if ((val
= startswith(a
->path
, "block-")))
1229 (void) whitelist_major(prog
, path
, val
, 'b', acc
);
1230 else if ((val
= startswith(a
->path
, "char-")))
1231 (void) whitelist_major(prog
, path
, val
, 'c', acc
);
1233 log_unit_debug(u
, "Ignoring device '%s' while writing cgroup attribute.", a
->path
);
1236 r
= cgroup_apply_device_bpf(u
, prog
, c
->device_policy
, c
->device_allow
);
1238 static bool warned
= false;
1240 log_full_errno(warned
? LOG_DEBUG
: LOG_WARNING
, r
,
1241 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1242 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1243 "(This warning is only shown for the first loaded unit using device ACL.)", u
->id
);
1249 if (apply_mask
& CGROUP_MASK_PIDS
) {
1252 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1253 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1254 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1255 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1256 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1257 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1258 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1259 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1260 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1261 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1262 * which is desirable so that there's an offical way to release control of the sysctl from
1263 * systemd: set the limit to unbounded and reload. */
1265 if (c
->tasks_max
!= CGROUP_LIMIT_MAX
) {
1266 u
->manager
->sysctl_pid_max_changed
= true;
1267 r
= procfs_tasks_set_limit(c
->tasks_max
);
1268 } else if (u
->manager
->sysctl_pid_max_changed
)
1269 r
= procfs_tasks_set_limit(TASKS_MAX
);
1273 log_unit_full(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
,
1274 "Failed to write to tasks limit sysctls: %m");
1277 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
1278 * leave it for the container manager. */
1279 if (!is_local_root
) {
1280 if (c
->tasks_max
!= CGROUP_LIMIT_MAX
) {
1281 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
1283 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
1284 (void) set_attribute_and_warn(u
, "pids", "pids.max", buf
);
1286 (void) set_attribute_and_warn(u
, "pids", "pids.max", "max\n");
1290 if (apply_mask
& CGROUP_MASK_BPF_FIREWALL
)
1291 cgroup_apply_firewall(u
);
1294 static bool unit_get_needs_bpf_firewall(Unit
*u
) {
1299 c
= unit_get_cgroup_context(u
);
1303 if (c
->ip_accounting
||
1304 c
->ip_address_allow
||
1308 /* If any parent slice has an IP access list defined, it applies too */
1309 for (p
= UNIT_DEREF(u
->slice
); p
; p
= UNIT_DEREF(p
->slice
)) {
1310 c
= unit_get_cgroup_context(p
);
1314 if (c
->ip_address_allow
||
1322 static CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
1323 CGroupMask mask
= 0;
1325 /* Figure out which controllers we need, based on the cgroup context object */
1327 if (c
->cpu_accounting
)
1328 mask
|= get_cpu_accounting_mask();
1330 if (cgroup_context_has_cpu_weight(c
) ||
1331 cgroup_context_has_cpu_shares(c
) ||
1332 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
1333 mask
|= CGROUP_MASK_CPU
;
1335 if (cgroup_context_has_io_config(c
) || cgroup_context_has_blockio_config(c
))
1336 mask
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
1338 if (c
->memory_accounting
||
1339 c
->memory_limit
!= CGROUP_LIMIT_MAX
||
1340 cgroup_context_has_unified_memory_config(c
))
1341 mask
|= CGROUP_MASK_MEMORY
;
1343 if (c
->device_allow
||
1344 c
->device_policy
!= CGROUP_AUTO
)
1345 mask
|= CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
;
1347 if (c
->tasks_accounting
||
1348 c
->tasks_max
!= CGROUP_LIMIT_MAX
)
1349 mask
|= CGROUP_MASK_PIDS
;
1351 return CGROUP_MASK_EXTEND_JOINED(mask
);
1354 static CGroupMask
unit_get_bpf_mask(Unit
*u
) {
1355 CGroupMask mask
= 0;
1357 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
1360 if (unit_get_needs_bpf_firewall(u
))
1361 mask
|= CGROUP_MASK_BPF_FIREWALL
;
1366 CGroupMask
unit_get_own_mask(Unit
*u
) {
1369 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
1370 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
1372 if (u
->load_state
!= UNIT_LOADED
)
1375 c
= unit_get_cgroup_context(u
);
1379 return (cgroup_context_get_mask(c
) | unit_get_bpf_mask(u
) | unit_get_delegate_mask(u
)) & ~unit_get_ancestor_disable_mask(u
);
1382 CGroupMask
unit_get_delegate_mask(Unit
*u
) {
1385 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1386 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1388 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1390 if (!unit_cgroup_delegate(u
))
1393 if (cg_all_unified() <= 0) {
1396 e
= unit_get_exec_context(u
);
1397 if (e
&& !exec_context_maintains_privileges(e
))
1401 assert_se(c
= unit_get_cgroup_context(u
));
1402 return CGROUP_MASK_EXTEND_JOINED(c
->delegate_controllers
);
1405 CGroupMask
unit_get_members_mask(Unit
*u
) {
1408 /* Returns the mask of controllers all of the unit's children require, merged */
1410 if (u
->cgroup_members_mask_valid
)
1411 return u
->cgroup_members_mask
; /* Use cached value if possible */
1413 u
->cgroup_members_mask
= 0;
1415 if (u
->type
== UNIT_SLICE
) {
1420 HASHMAP_FOREACH_KEY(v
, member
, u
->dependencies
[UNIT_BEFORE
], i
) {
1421 if (UNIT_DEREF(member
->slice
) == u
)
1422 u
->cgroup_members_mask
|= unit_get_subtree_mask(member
); /* note that this calls ourselves again, for the children */
1426 u
->cgroup_members_mask_valid
= true;
1427 return u
->cgroup_members_mask
;
1430 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
1433 /* Returns the mask of controllers all of the unit's siblings
1434 * require, i.e. the members mask of the unit's parent slice
1435 * if there is one. */
1437 if (UNIT_ISSET(u
->slice
))
1438 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
1440 return unit_get_subtree_mask(u
); /* we are the top-level slice */
1443 CGroupMask
unit_get_disable_mask(Unit
*u
) {
1446 c
= unit_get_cgroup_context(u
);
1450 return c
->disable_controllers
;
1453 CGroupMask
unit_get_ancestor_disable_mask(Unit
*u
) {
1457 mask
= unit_get_disable_mask(u
);
1459 /* Returns the mask of controllers which are marked as forcibly
1460 * disabled in any ancestor unit or the unit in question. */
1462 if (UNIT_ISSET(u
->slice
))
1463 mask
|= unit_get_ancestor_disable_mask(UNIT_DEREF(u
->slice
));
1468 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
1470 /* Returns the mask of this subtree, meaning of the group
1471 * itself and its children. */
1473 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
1476 CGroupMask
unit_get_target_mask(Unit
*u
) {
1479 /* This returns the cgroup mask of all controllers to enable
1480 * for a specific cgroup, i.e. everything it needs itself,
1481 * plus all that its children need, plus all that its siblings
1482 * need. This is primarily useful on the legacy cgroup
1483 * hierarchy, where we need to duplicate each cgroup in each
1484 * hierarchy that shall be enabled for it. */
1486 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
1487 mask
&= u
->manager
->cgroup_supported
;
1488 mask
&= ~unit_get_ancestor_disable_mask(u
);
1493 CGroupMask
unit_get_enable_mask(Unit
*u
) {
1496 /* This returns the cgroup mask of all controllers to enable
1497 * for the children of a specific cgroup. This is primarily
1498 * useful for the unified cgroup hierarchy, where each cgroup
1499 * controls which controllers are enabled for its children. */
1501 mask
= unit_get_members_mask(u
);
1502 mask
&= u
->manager
->cgroup_supported
;
1503 mask
&= ~unit_get_ancestor_disable_mask(u
);
1508 void unit_invalidate_cgroup_members_masks(Unit
*u
) {
1511 /* Recurse invalidate the member masks cache all the way up the tree */
1512 u
->cgroup_members_mask_valid
= false;
1514 if (UNIT_ISSET(u
->slice
))
1515 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u
->slice
));
1518 const char *unit_get_realized_cgroup_path(Unit
*u
, CGroupMask mask
) {
1520 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
1524 if (u
->cgroup_path
&&
1525 u
->cgroup_realized
&&
1526 FLAGS_SET(u
->cgroup_realized_mask
, mask
))
1527 return u
->cgroup_path
;
1529 u
= UNIT_DEREF(u
->slice
);
1535 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
1536 return unit_get_realized_cgroup_path(userdata
, mask
);
1539 char *unit_default_cgroup_path(const Unit
*u
) {
1540 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
1545 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1546 return strdup(u
->manager
->cgroup_root
);
1548 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
1549 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
1554 escaped
= cg_escape(u
->id
);
1559 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/",
1562 return strjoin(u
->manager
->cgroup_root
, "/", escaped
);
1565 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
1566 _cleanup_free_
char *p
= NULL
;
1578 if (streq_ptr(u
->cgroup_path
, p
))
1582 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
1587 unit_release_cgroup(u
);
1589 u
->cgroup_path
= TAKE_PTR(p
);
1594 int unit_watch_cgroup(Unit
*u
) {
1595 _cleanup_free_
char *events
= NULL
;
1600 if (!u
->cgroup_path
)
1603 if (u
->cgroup_inotify_wd
>= 0)
1606 /* Only applies to the unified hierarchy */
1607 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
1609 return log_error_errno(r
, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1613 /* Don't watch the root slice, it's pointless. */
1614 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1617 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
1621 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events", &events
);
1625 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
1626 if (u
->cgroup_inotify_wd
< 0) {
1628 if (errno
== ENOENT
) /* If the directory is already
1629 * gone we don't need to track
1630 * it, so this is not an error */
1633 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
1636 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
1638 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
1643 int unit_pick_cgroup_path(Unit
*u
) {
1644 _cleanup_free_
char *path
= NULL
;
1652 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1655 path
= unit_default_cgroup_path(u
);
1659 r
= unit_set_cgroup_path(u
, path
);
1661 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
1663 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
1668 static int unit_create_cgroup(
1670 CGroupMask target_mask
,
1671 CGroupMask enable_mask
,
1672 ManagerState state
) {
1679 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1682 /* Figure out our cgroup path */
1683 r
= unit_pick_cgroup_path(u
);
1687 /* First, create our own group */
1688 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
1690 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
1693 /* Start watching it */
1694 (void) unit_watch_cgroup(u
);
1696 /* Preserve enabled controllers in delegated units, adjust others. */
1697 if (created
|| !u
->cgroup_realized
|| !unit_cgroup_delegate(u
)) {
1698 CGroupMask result_mask
= 0;
1700 /* Enable all controllers we need */
1701 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
, &result_mask
);
1703 log_unit_warning_errno(u
, r
, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
1705 /* If we just turned off a controller, this might release the controller for our parent too, let's
1706 * enqueue the parent for re-realization in that case again. */
1707 if (UNIT_ISSET(u
->slice
)) {
1708 CGroupMask turned_off
;
1710 turned_off
= (u
->cgroup_realized
? u
->cgroup_enabled_mask
& ~result_mask
: 0);
1711 if (turned_off
!= 0) {
1714 /* Force the parent to propagate the enable mask to the kernel again, by invalidating
1715 * the controller we just turned off. */
1717 for (parent
= UNIT_DEREF(u
->slice
); parent
; parent
= UNIT_DEREF(parent
->slice
))
1718 unit_invalidate_cgroup(parent
, turned_off
);
1722 /* Remember what's actually enabled now */
1723 u
->cgroup_enabled_mask
= result_mask
;
1726 /* Keep track that this is now realized */
1727 u
->cgroup_realized
= true;
1728 u
->cgroup_realized_mask
= target_mask
;
1730 if (u
->type
!= UNIT_SLICE
&& !unit_cgroup_delegate(u
)) {
1732 /* Then, possibly move things over, but not if
1733 * subgroups may contain processes, which is the case
1734 * for slice and delegation units. */
1735 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
1737 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
1740 /* Set attributes */
1741 cgroup_context_apply(u
, target_mask
, state
);
1742 cgroup_xattr_apply(u
);
1747 static int unit_attach_pid_to_cgroup_via_bus(Unit
*u
, pid_t pid
, const char *suffix_path
) {
1748 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1754 if (MANAGER_IS_SYSTEM(u
->manager
))
1757 if (!u
->manager
->system_bus
)
1760 if (!u
->cgroup_path
)
1763 /* Determine this unit's cgroup path relative to our cgroup root */
1764 pp
= path_startswith(u
->cgroup_path
, u
->manager
->cgroup_root
);
1768 pp
= strjoina("/", pp
, suffix_path
);
1769 path_simplify(pp
, false);
1771 r
= sd_bus_call_method(u
->manager
->system_bus
,
1772 "org.freedesktop.systemd1",
1773 "/org/freedesktop/systemd1",
1774 "org.freedesktop.systemd1.Manager",
1775 "AttachProcessesToUnit",
1778 NULL
/* empty unit name means client's unit, i.e. us */, pp
, 1, (uint32_t) pid
);
1780 return log_unit_debug_errno(u
, r
, "Failed to attach unit process " PID_FMT
" via the bus: %s", pid
, bus_error_message(&error
, r
));
1785 int unit_attach_pids_to_cgroup(Unit
*u
, Set
*pids
, const char *suffix_path
) {
1786 CGroupMask delegated_mask
;
1794 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1797 if (set_isempty(pids
))
1800 r
= unit_realize_cgroup(u
);
1804 if (isempty(suffix_path
))
1807 p
= strjoina(u
->cgroup_path
, "/", suffix_path
);
1809 delegated_mask
= unit_get_delegate_mask(u
);
1812 SET_FOREACH(pidp
, pids
, i
) {
1813 pid_t pid
= PTR_TO_PID(pidp
);
1816 /* First, attach the PID to the main cgroup hierarchy */
1817 q
= cg_attach(SYSTEMD_CGROUP_CONTROLLER
, p
, pid
);
1819 log_unit_debug_errno(u
, q
, "Couldn't move process " PID_FMT
" to requested cgroup '%s': %m", pid
, p
);
1821 if (MANAGER_IS_USER(u
->manager
) && IN_SET(q
, -EPERM
, -EACCES
)) {
1824 /* If we are in a user instance, and we can't move the process ourselves due to
1825 * permission problems, let's ask the system instance about it instead. Since it's more
1826 * privileged it might be able to move the process across the leaves of a subtree who's
1827 * top node is not owned by us. */
1829 z
= unit_attach_pid_to_cgroup_via_bus(u
, pid
, suffix_path
);
1831 log_unit_debug_errno(u
, z
, "Couldn't move process " PID_FMT
" to requested cgroup '%s' via the system bus either: %m", pid
, p
);
1833 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1837 r
= q
; /* Remember first error */
1842 q
= cg_all_unified();
1848 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1849 * innermost realized one */
1851 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
1852 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
1853 const char *realized
;
1855 if (!(u
->manager
->cgroup_supported
& bit
))
1858 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1859 if (delegated_mask
& u
->cgroup_realized_mask
& bit
) {
1860 q
= cg_attach(cgroup_controller_to_string(c
), p
, pid
);
1862 continue; /* Success! */
1864 log_unit_debug_errno(u
, q
, "Failed to attach PID " PID_FMT
" to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1865 pid
, p
, cgroup_controller_to_string(c
));
1868 /* So this controller is either not delegate or realized, or something else weird happened. In
1869 * that case let's attach the PID at least to the closest cgroup up the tree that is
1871 realized
= unit_get_realized_cgroup_path(u
, bit
);
1873 continue; /* Not even realized in the root slice? Then let's not bother */
1875 q
= cg_attach(cgroup_controller_to_string(c
), realized
, pid
);
1877 log_unit_debug_errno(u
, q
, "Failed to attach PID " PID_FMT
" to realized cgroup %s in controller %s, ignoring: %m",
1878 pid
, realized
, cgroup_controller_to_string(c
));
1885 static bool unit_has_mask_realized(
1887 CGroupMask target_mask
,
1888 CGroupMask enable_mask
) {
1892 /* Returns true if this unit is fully realized. We check four things:
1894 * 1. Whether the cgroup was created at all
1895 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
1896 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
1897 * 4. Whether the invalidation mask is currently zero
1899 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
1900 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
1901 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
1902 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
1903 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
1904 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
1905 * simply don't matter. */
1907 return u
->cgroup_realized
&&
1908 ((u
->cgroup_realized_mask
^ target_mask
) & CGROUP_MASK_V1
) == 0 &&
1909 ((u
->cgroup_enabled_mask
^ enable_mask
) & CGROUP_MASK_V2
) == 0 &&
1910 u
->cgroup_invalidated_mask
== 0;
1913 static bool unit_has_mask_disables_realized(
1915 CGroupMask target_mask
,
1916 CGroupMask enable_mask
) {
1920 /* Returns true if all controllers which should be disabled are indeed disabled.
1922 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
1923 * already removed. */
1925 return !u
->cgroup_realized
||
1926 (FLAGS_SET(u
->cgroup_realized_mask
, target_mask
& CGROUP_MASK_V1
) &&
1927 FLAGS_SET(u
->cgroup_enabled_mask
, enable_mask
& CGROUP_MASK_V2
));
1930 static bool unit_has_mask_enables_realized(
1932 CGroupMask target_mask
,
1933 CGroupMask enable_mask
) {
1937 /* Returns true if all controllers which should be enabled are indeed enabled.
1939 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
1940 * we want to add is already added. */
1942 return u
->cgroup_realized
&&
1943 ((u
->cgroup_realized_mask
| target_mask
) & CGROUP_MASK_V1
) == (u
->cgroup_realized_mask
& CGROUP_MASK_V1
) &&
1944 ((u
->cgroup_enabled_mask
| enable_mask
) & CGROUP_MASK_V2
) == (u
->cgroup_enabled_mask
& CGROUP_MASK_V2
);
1947 void unit_add_to_cgroup_realize_queue(Unit
*u
) {
1950 if (u
->in_cgroup_realize_queue
)
1953 LIST_PREPEND(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
1954 u
->in_cgroup_realize_queue
= true;
1957 static void unit_remove_from_cgroup_realize_queue(Unit
*u
) {
1960 if (!u
->in_cgroup_realize_queue
)
1963 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
1964 u
->in_cgroup_realize_queue
= false;
1967 /* Controllers can only be enabled breadth-first, from the root of the
1968 * hierarchy downwards to the unit in question. */
1969 static int unit_realize_cgroup_now_enable(Unit
*u
, ManagerState state
) {
1970 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
1975 /* First go deal with this unit's parent, or we won't be able to enable
1976 * any new controllers at this layer. */
1977 if (UNIT_ISSET(u
->slice
)) {
1978 r
= unit_realize_cgroup_now_enable(UNIT_DEREF(u
->slice
), state
);
1983 target_mask
= unit_get_target_mask(u
);
1984 enable_mask
= unit_get_enable_mask(u
);
1986 /* We can only enable in this direction, don't try to disable anything.
1988 if (unit_has_mask_enables_realized(u
, target_mask
, enable_mask
))
1991 new_target_mask
= u
->cgroup_realized_mask
| target_mask
;
1992 new_enable_mask
= u
->cgroup_enabled_mask
| enable_mask
;
1994 return unit_create_cgroup(u
, new_target_mask
, new_enable_mask
, state
);
1997 /* Controllers can only be disabled depth-first, from the leaves of the
1998 * hierarchy upwards to the unit in question. */
1999 static int unit_realize_cgroup_now_disable(Unit
*u
, ManagerState state
) {
2006 if (u
->type
!= UNIT_SLICE
)
2009 HASHMAP_FOREACH_KEY(v
, m
, u
->dependencies
[UNIT_BEFORE
], i
) {
2010 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
2013 if (UNIT_DEREF(m
->slice
) != u
)
2016 /* The cgroup for this unit might not actually be fully
2017 * realised yet, in which case it isn't holding any controllers
2019 if (!m
->cgroup_path
)
2022 /* We must disable those below us first in order to release the
2024 if (m
->type
== UNIT_SLICE
)
2025 (void) unit_realize_cgroup_now_disable(m
, state
);
2027 target_mask
= unit_get_target_mask(m
);
2028 enable_mask
= unit_get_enable_mask(m
);
2030 /* We can only disable in this direction, don't try to enable
2032 if (unit_has_mask_disables_realized(m
, target_mask
, enable_mask
))
2035 new_target_mask
= m
->cgroup_realized_mask
& target_mask
;
2036 new_enable_mask
= m
->cgroup_enabled_mask
& enable_mask
;
2038 r
= unit_create_cgroup(m
, new_target_mask
, new_enable_mask
, state
);
2046 /* Check if necessary controllers and attributes for a unit are in place.
2048 * - If so, do nothing.
2049 * - If not, create paths, move processes over, and set attributes.
2051 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
2052 * a depth-first way. As such the process looks like this:
2054 * Suppose we have a cgroup hierarchy which looks like this:
2067 * 1. We want to realise cgroup "d" now.
2068 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
2069 * 3. cgroup "k" just started requesting the memory controller.
2071 * To make this work we must do the following in order:
2073 * 1. Disable CPU controller in k, j
2074 * 2. Disable CPU controller in d
2075 * 3. Enable memory controller in root
2076 * 4. Enable memory controller in a
2077 * 5. Enable memory controller in d
2078 * 6. Enable memory controller in k
2080 * Notice that we need to touch j in one direction, but not the other. We also
2081 * don't go beyond d when disabling -- it's up to "a" to get realized if it
2082 * wants to disable further. The basic rules are therefore:
2084 * - If you're disabling something, you need to realise all of the cgroups from
2085 * your recursive descendants to the root. This starts from the leaves.
2086 * - If you're enabling something, you need to realise from the root cgroup
2087 * downwards, but you don't need to iterate your recursive descendants.
2089 * Returns 0 on success and < 0 on failure. */
2090 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
2091 CGroupMask target_mask
, enable_mask
;
2096 unit_remove_from_cgroup_realize_queue(u
);
2098 target_mask
= unit_get_target_mask(u
);
2099 enable_mask
= unit_get_enable_mask(u
);
2101 if (unit_has_mask_realized(u
, target_mask
, enable_mask
))
2104 /* Disable controllers below us, if there are any */
2105 r
= unit_realize_cgroup_now_disable(u
, state
);
2109 /* Enable controllers above us, if there are any */
2110 if (UNIT_ISSET(u
->slice
)) {
2111 r
= unit_realize_cgroup_now_enable(UNIT_DEREF(u
->slice
), state
);
2116 /* Now actually deal with the cgroup we were trying to realise and set attributes */
2117 r
= unit_create_cgroup(u
, target_mask
, enable_mask
, state
);
2121 /* Now, reset the invalidation mask */
2122 u
->cgroup_invalidated_mask
= 0;
2126 unsigned manager_dispatch_cgroup_realize_queue(Manager
*m
) {
2134 state
= manager_state(m
);
2136 while ((i
= m
->cgroup_realize_queue
)) {
2137 assert(i
->in_cgroup_realize_queue
);
2139 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i
))) {
2140 /* Maybe things changed, and the unit is not actually active anymore? */
2141 unit_remove_from_cgroup_realize_queue(i
);
2145 r
= unit_realize_cgroup_now(i
, state
);
2147 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
2155 static void unit_add_siblings_to_cgroup_realize_queue(Unit
*u
) {
2158 /* This adds the siblings of the specified unit and the
2159 * siblings of all parent units to the cgroup queue. (But
2160 * neither the specified unit itself nor the parents.) */
2162 while ((slice
= UNIT_DEREF(u
->slice
))) {
2167 HASHMAP_FOREACH_KEY(v
, m
, u
->dependencies
[UNIT_BEFORE
], i
) {
2168 /* Skip units that have a dependency on the slice
2169 * but aren't actually in it. */
2170 if (UNIT_DEREF(m
->slice
) != slice
)
2173 /* No point in doing cgroup application for units
2174 * without active processes. */
2175 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
2178 /* If the unit doesn't need any new controllers
2179 * and has current ones realized, it doesn't need
2181 if (unit_has_mask_realized(m
,
2182 unit_get_target_mask(m
),
2183 unit_get_enable_mask(m
)))
2186 unit_add_to_cgroup_realize_queue(m
);
2193 int unit_realize_cgroup(Unit
*u
) {
2196 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2199 /* So, here's the deal: when realizing the cgroups for this
2200 * unit, we need to first create all parents, but there's more
2201 * actually: for the weight-based controllers we also need to
2202 * make sure that all our siblings (i.e. units that are in the
2203 * same slice as we are) have cgroups, too. Otherwise, things
2204 * would become very uneven as each of their processes would
2205 * get as much resources as all our group together. This call
2206 * will synchronously create the parent cgroups, but will
2207 * defer work on the siblings to the next event loop
2210 /* Add all sibling slices to the cgroup queue. */
2211 unit_add_siblings_to_cgroup_realize_queue(u
);
2213 /* And realize this one now (and apply the values) */
2214 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
2217 void unit_release_cgroup(Unit
*u
) {
2220 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
2221 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
2223 if (u
->cgroup_path
) {
2224 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
2225 u
->cgroup_path
= mfree(u
->cgroup_path
);
2228 if (u
->cgroup_inotify_wd
>= 0) {
2229 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
2230 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring: %m", u
->cgroup_inotify_wd
, u
->id
);
2232 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
2233 u
->cgroup_inotify_wd
= -1;
2237 void unit_prune_cgroup(Unit
*u
) {
2243 /* Removes the cgroup, if empty and possible, and stops watching it. */
2245 if (!u
->cgroup_path
)
2248 (void) unit_get_cpu_usage(u
, NULL
); /* Cache the last CPU usage value before we destroy the cgroup */
2250 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
2252 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
2254 log_unit_debug_errno(u
, r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
2261 unit_release_cgroup(u
);
2263 u
->cgroup_realized
= false;
2264 u
->cgroup_realized_mask
= 0;
2265 u
->cgroup_enabled_mask
= 0;
2267 u
->bpf_device_control_installed
= bpf_program_unref(u
->bpf_device_control_installed
);
2270 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
2271 _cleanup_fclose_
FILE *f
= NULL
;
2272 pid_t pid
= 0, npid
;
2278 if (!u
->cgroup_path
)
2281 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
2285 while (cg_read_pid(f
, &npid
) > 0) {
2290 if (pid_is_my_child(npid
) == 0)
2294 /* Dang, there's more than one daemonized PID
2295 in this group, so we don't know what process
2296 is the main process. */
2307 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
2308 _cleanup_closedir_
DIR *d
= NULL
;
2309 _cleanup_fclose_
FILE *f
= NULL
;
2315 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
2321 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
2322 r
= unit_watch_pid(u
, pid
, false);
2323 if (r
< 0 && ret
>= 0)
2327 if (r
< 0 && ret
>= 0)
2331 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
2338 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
2339 _cleanup_free_
char *p
= NULL
;
2341 p
= strjoin(path
, "/", fn
);
2347 r
= unit_watch_pids_in_path(u
, p
);
2348 if (r
< 0 && ret
>= 0)
2352 if (r
< 0 && ret
>= 0)
2359 int unit_synthesize_cgroup_empty_event(Unit
*u
) {
2364 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2365 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2366 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2368 if (!u
->cgroup_path
)
2371 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2374 if (r
> 0) /* On unified we have reliable notifications, and don't need this */
2377 if (!set_isempty(u
->pids
))
2380 unit_add_to_cgroup_empty_queue(u
);
2384 int unit_watch_all_pids(Unit
*u
) {
2389 /* Adds all PIDs from our cgroup to the set of PIDs we
2390 * watch. This is a fallback logic for cases where we do not
2391 * get reliable cgroup empty notifications: we try to use
2392 * SIGCHLD as replacement. */
2394 if (!u
->cgroup_path
)
2397 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2400 if (r
> 0) /* On unified we can use proper notifications */
2403 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
2406 static int on_cgroup_empty_event(sd_event_source
*s
, void *userdata
) {
2407 Manager
*m
= userdata
;
2414 u
= m
->cgroup_empty_queue
;
2418 assert(u
->in_cgroup_empty_queue
);
2419 u
->in_cgroup_empty_queue
= false;
2420 LIST_REMOVE(cgroup_empty_queue
, m
->cgroup_empty_queue
, u
);
2422 if (m
->cgroup_empty_queue
) {
2423 /* More stuff queued, let's make sure we remain enabled */
2424 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
2426 log_debug_errno(r
, "Failed to reenable cgroup empty event source, ignoring: %m");
2429 unit_add_to_gc_queue(u
);
2431 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
2432 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
2437 void unit_add_to_cgroup_empty_queue(Unit
*u
) {
2442 /* Note that there are four different ways how cgroup empty events reach us:
2444 * 1. On the unified hierarchy we get an inotify event on the cgroup
2446 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2448 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2450 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2451 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2453 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2454 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2455 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2456 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2457 * case for scope units). */
2459 if (u
->in_cgroup_empty_queue
)
2462 /* Let's verify that the cgroup is really empty */
2463 if (!u
->cgroup_path
)
2465 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
2467 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
2473 LIST_PREPEND(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
2474 u
->in_cgroup_empty_queue
= true;
2476 /* Trigger the defer event */
2477 r
= sd_event_source_set_enabled(u
->manager
->cgroup_empty_event_source
, SD_EVENT_ONESHOT
);
2479 log_debug_errno(r
, "Failed to enable cgroup empty event source: %m");
2482 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
2483 Manager
*m
= userdata
;
2490 union inotify_event_buffer buffer
;
2491 struct inotify_event
*e
;
2494 l
= read(fd
, &buffer
, sizeof(buffer
));
2496 if (IN_SET(errno
, EINTR
, EAGAIN
))
2499 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
2502 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
2506 /* Queue overflow has no watch descriptor */
2509 if (e
->mask
& IN_IGNORED
)
2510 /* The watch was just removed */
2513 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
2514 if (!u
) /* Not that inotify might deliver
2515 * events for a watch even after it
2516 * was removed, because it was queued
2517 * before the removal. Let's ignore
2518 * this here safely. */
2521 unit_add_to_cgroup_empty_queue(u
);
2526 static int cg_bpf_mask_supported(CGroupMask
*ret
) {
2527 CGroupMask mask
= 0;
2530 /* BPF-based firewall */
2531 r
= bpf_firewall_supported();
2533 mask
|= CGROUP_MASK_BPF_FIREWALL
;
2535 /* BPF-based device access control */
2536 r
= bpf_devices_supported();
2538 mask
|= CGROUP_MASK_BPF_DEVICES
;
2544 int manager_setup_cgroup(Manager
*m
) {
2545 _cleanup_free_
char *path
= NULL
;
2546 const char *scope_path
;
2554 /* 1. Determine hierarchy */
2555 m
->cgroup_root
= mfree(m
->cgroup_root
);
2556 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
2558 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
2560 /* Chop off the init scope, if we are already located in it */
2561 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2563 /* LEGACY: Also chop off the system slice if we are in
2564 * it. This is to support live upgrades from older systemd
2565 * versions where PID 1 was moved there. Also see
2566 * cg_get_root_path(). */
2567 if (!e
&& MANAGER_IS_SYSTEM(m
)) {
2568 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
2570 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
2575 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2576 * easily prepend it everywhere. */
2577 delete_trailing_chars(m
->cgroup_root
, "/");
2580 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
2582 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
2584 r
= cg_unified_flush();
2586 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
2588 all_unified
= cg_all_unified();
2589 if (all_unified
< 0)
2590 return log_error_errno(all_unified
, "Couldn't determine whether we are in all unified mode: %m");
2591 if (all_unified
> 0)
2592 log_debug("Unified cgroup hierarchy is located at %s.", path
);
2594 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2596 return log_error_errno(r
, "Failed to determine whether systemd's own controller is in unified mode: %m");
2598 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path
);
2600 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY
". File system hierarchy is at %s.", path
);
2603 /* 3. Allocate cgroup empty defer event source */
2604 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2605 r
= sd_event_add_defer(m
->event
, &m
->cgroup_empty_event_source
, on_cgroup_empty_event
, m
);
2607 return log_error_errno(r
, "Failed to create cgroup empty event source: %m");
2609 /* Schedule cgroup empty checks early, but after having processed service notification messages or
2610 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
2611 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
2612 r
= sd_event_source_set_priority(m
->cgroup_empty_event_source
, SD_EVENT_PRIORITY_NORMAL
-5);
2614 return log_error_errno(r
, "Failed to set priority of cgroup empty event source: %m");
2616 r
= sd_event_source_set_enabled(m
->cgroup_empty_event_source
, SD_EVENT_OFF
);
2618 return log_error_errno(r
, "Failed to disable cgroup empty event source: %m");
2620 (void) sd_event_source_set_description(m
->cgroup_empty_event_source
, "cgroup-empty");
2622 /* 4. Install notifier inotify object, or agent */
2623 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
2625 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
2627 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2628 safe_close(m
->cgroup_inotify_fd
);
2630 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
2631 if (m
->cgroup_inotify_fd
< 0)
2632 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
2634 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
2636 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
2638 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
2639 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
2640 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2641 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_NORMAL
-9);
2643 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
2645 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
2647 } else if (MANAGER_IS_SYSTEM(m
) && manager_owns_host_root_cgroup(m
) && !MANAGER_IS_TEST_RUN(m
)) {
2649 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2650 * since it does not generate events when control groups with children run empty. */
2652 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
2654 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
2656 log_debug("Installed release agent.");
2658 log_debug("Release agent already installed.");
2661 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2662 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2663 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2665 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2666 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2668 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
2670 /* 6. And pin it, so that it cannot be unmounted */
2671 safe_close(m
->pin_cgroupfs_fd
);
2672 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
2673 if (m
->pin_cgroupfs_fd
< 0)
2674 return log_error_errno(errno
, "Failed to open pin file: %m");
2676 } else if (!MANAGER_IS_TEST_RUN(m
))
2677 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
2679 /* 7. Always enable hierarchical support if it exists... */
2680 if (!all_unified
&& !MANAGER_IS_TEST_RUN(m
))
2681 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2683 /* 8. Figure out which controllers are supported */
2684 r
= cg_mask_supported(&m
->cgroup_supported
);
2686 return log_error_errno(r
, "Failed to determine supported controllers: %m");
2688 /* 9. Figure out which bpf-based pseudo-controllers are supported */
2689 r
= cg_bpf_mask_supported(&mask
);
2691 return log_error_errno(r
, "Failed to determine supported bpf-based pseudo-controllers: %m");
2692 m
->cgroup_supported
|= mask
;
2694 /* 10. Log which controllers are supported */
2695 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
2696 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& CGROUP_CONTROLLER_TO_MASK(c
)));
2701 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
2704 /* We can't really delete the group, since we are in it. But
2706 if (delete && m
->cgroup_root
&& m
->test_run_flags
!= MANAGER_TEST_RUN_MINIMAL
)
2707 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
2709 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2711 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
2713 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2714 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
2716 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
2718 m
->cgroup_root
= mfree(m
->cgroup_root
);
2721 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
2728 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
2732 p
= strdupa(cgroup
);
2736 e
= strrchr(p
, '/');
2738 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
2742 u
= hashmap_get(m
->cgroup_unit
, p
);
2748 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
2749 _cleanup_free_
char *cgroup
= NULL
;
2753 if (!pid_is_valid(pid
))
2756 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
) < 0)
2759 return manager_get_unit_by_cgroup(m
, cgroup
);
2762 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
2767 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2768 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2769 * relevant one as children of the process will be assigned to that one, too, before all else. */
2771 if (!pid_is_valid(pid
))
2774 if (pid
== getpid_cached())
2775 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
2777 u
= manager_get_unit_by_pid_cgroup(m
, pid
);
2781 u
= hashmap_get(m
->watch_pids
, PID_TO_PTR(pid
));
2785 array
= hashmap_get(m
->watch_pids
, PID_TO_PTR(-pid
));
2792 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
2798 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2799 * or from the --system instance */
2801 log_debug("Got cgroup empty notification for: %s", cgroup
);
2803 u
= manager_get_unit_by_cgroup(m
, cgroup
);
2807 unit_add_to_cgroup_empty_queue(u
);
2811 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
2812 _cleanup_free_
char *v
= NULL
;
2818 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
2821 if (!u
->cgroup_path
)
2824 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2825 if (unit_has_host_root_cgroup(u
))
2826 return procfs_memory_get_used(ret
);
2828 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
2831 r
= cg_all_unified();
2835 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
2837 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
2843 return safe_atou64(v
, ret
);
2846 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
2847 _cleanup_free_
char *v
= NULL
;
2853 if (!UNIT_CGROUP_BOOL(u
, tasks_accounting
))
2856 if (!u
->cgroup_path
)
2859 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2860 if (unit_has_host_root_cgroup(u
))
2861 return procfs_tasks_get_current(ret
);
2863 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
2866 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
2872 return safe_atou64(v
, ret
);
2875 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
2876 _cleanup_free_
char *v
= NULL
;
2883 if (!u
->cgroup_path
)
2886 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2887 if (unit_has_host_root_cgroup(u
))
2888 return procfs_cpu_get_usage(ret
);
2890 /* Requisite controllers for CPU accounting are not enabled */
2891 if ((get_cpu_accounting_mask() & ~u
->cgroup_realized_mask
) != 0)
2894 r
= cg_all_unified();
2898 _cleanup_free_
char *val
= NULL
;
2901 r
= cg_get_keyed_attribute("cpu", u
->cgroup_path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
2902 if (IN_SET(r
, -ENOENT
, -ENXIO
))
2907 r
= safe_atou64(val
, &us
);
2911 ns
= us
* NSEC_PER_USEC
;
2913 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
2919 r
= safe_atou64(v
, &ns
);
2928 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
2934 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2935 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2936 * call this function with a NULL return value. */
2938 if (!UNIT_CGROUP_BOOL(u
, cpu_accounting
))
2941 r
= unit_get_cpu_usage_raw(u
, &ns
);
2942 if (r
== -ENODATA
&& u
->cpu_usage_last
!= NSEC_INFINITY
) {
2943 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2947 *ret
= u
->cpu_usage_last
;
2953 if (ns
> u
->cpu_usage_base
)
2954 ns
-= u
->cpu_usage_base
;
2958 u
->cpu_usage_last
= ns
;
2965 int unit_get_ip_accounting(
2967 CGroupIPAccountingMetric metric
,
2974 assert(metric
>= 0);
2975 assert(metric
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
);
2978 if (!UNIT_CGROUP_BOOL(u
, ip_accounting
))
2981 fd
= IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_INGRESS_PACKETS
) ?
2982 u
->ip_accounting_ingress_map_fd
:
2983 u
->ip_accounting_egress_map_fd
;
2987 if (IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
2988 r
= bpf_firewall_read_accounting(fd
, &value
, NULL
);
2990 r
= bpf_firewall_read_accounting(fd
, NULL
, &value
);
2994 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2995 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2996 * ip_accounting_extra[] field, and add them in here transparently. */
2998 *ret
= value
+ u
->ip_accounting_extra
[metric
];
3003 int unit_reset_cpu_accounting(Unit
*u
) {
3009 u
->cpu_usage_last
= NSEC_INFINITY
;
3011 r
= unit_get_cpu_usage_raw(u
, &ns
);
3013 u
->cpu_usage_base
= 0;
3017 u
->cpu_usage_base
= ns
;
3021 int unit_reset_ip_accounting(Unit
*u
) {
3026 if (u
->ip_accounting_ingress_map_fd
>= 0)
3027 r
= bpf_firewall_reset_accounting(u
->ip_accounting_ingress_map_fd
);
3029 if (u
->ip_accounting_egress_map_fd
>= 0)
3030 q
= bpf_firewall_reset_accounting(u
->ip_accounting_egress_map_fd
);
3032 zero(u
->ip_accounting_extra
);
3034 return r
< 0 ? r
: q
;
3037 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
3040 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3046 /* always invalidate compat pairs together */
3047 if (m
& (CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
))
3048 m
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
3050 if (m
& (CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
))
3051 m
|= CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
;
3053 if (FLAGS_SET(u
->cgroup_invalidated_mask
, m
)) /* NOP? */
3056 u
->cgroup_invalidated_mask
|= m
;
3057 unit_add_to_cgroup_realize_queue(u
);
3060 void unit_invalidate_cgroup_bpf(Unit
*u
) {
3063 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3066 if (u
->cgroup_invalidated_mask
& CGROUP_MASK_BPF_FIREWALL
) /* NOP? */
3069 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
3070 unit_add_to_cgroup_realize_queue(u
);
3072 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
3073 * list of our children includes our own. */
3074 if (u
->type
== UNIT_SLICE
) {
3079 HASHMAP_FOREACH_KEY(v
, member
, u
->dependencies
[UNIT_BEFORE
], i
) {
3080 if (UNIT_DEREF(member
->slice
) == u
)
3081 unit_invalidate_cgroup_bpf(member
);
3086 bool unit_cgroup_delegate(Unit
*u
) {
3091 if (!UNIT_VTABLE(u
)->can_delegate
)
3094 c
= unit_get_cgroup_context(u
);
3101 void manager_invalidate_startup_units(Manager
*m
) {
3107 SET_FOREACH(u
, m
->startup_units
, i
)
3108 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_IO
|CGROUP_MASK_BLKIO
);
3111 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
3112 [CGROUP_AUTO
] = "auto",
3113 [CGROUP_CLOSED
] = "closed",
3114 [CGROUP_STRICT
] = "strict",
3117 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);