1 /* SPDX-License-Identifier: LGPL-2.1+ */
5 #include "sd-messages.h"
7 #include "alloc-util.h"
8 #include "blockdev-util.h"
9 #include "bpf-devices.h"
10 #include "bpf-firewall.h"
11 #include "btrfs-util.h"
12 #include "bus-error.h"
13 #include "cgroup-setup.h"
14 #include "cgroup-util.h"
19 #include "limits-util.h"
20 #include "parse-util.h"
21 #include "path-util.h"
22 #include "process-util.h"
23 #include "procfs-util.h"
25 #include "stat-util.h"
26 #include "stdio-util.h"
27 #include "string-table.h"
28 #include "string-util.h"
31 #define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
33 /* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
34 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
35 * out specific attributes from us. */
36 #define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
38 uint64_t tasks_max_resolve(const TasksMax
*tasks_max
) {
39 if (tasks_max
->scale
== 0)
40 return tasks_max
->value
;
42 return system_tasks_max_scale(tasks_max
->value
, tasks_max
->scale
);
45 bool manager_owns_host_root_cgroup(Manager
*m
) {
48 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
49 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
50 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
51 * we run in any kind of container virtualization. */
53 if (MANAGER_IS_USER(m
))
56 if (detect_container() > 0)
59 return empty_or_root(m
->cgroup_root
);
62 bool unit_has_host_root_cgroup(Unit
*u
) {
65 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
66 * the manager manages the root cgroup. */
68 if (!manager_owns_host_root_cgroup(u
->manager
))
71 return unit_has_name(u
, SPECIAL_ROOT_SLICE
);
74 static int set_attribute_and_warn(Unit
*u
, const char *controller
, const char *attribute
, const char *value
) {
77 r
= cg_set_attribute(controller
, u
->cgroup_path
, attribute
, value
);
79 log_unit_full(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
80 strna(attribute
), isempty(u
->cgroup_path
) ? "/" : u
->cgroup_path
, (int) strcspn(value
, NEWLINE
), value
);
85 static void cgroup_compat_warn(void) {
86 static bool cgroup_compat_warned
= false;
88 if (cgroup_compat_warned
)
91 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
92 "See cgroup-compat debug messages for details.");
94 cgroup_compat_warned
= true;
97 #define log_cgroup_compat(unit, fmt, ...) do { \
98 cgroup_compat_warn(); \
99 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
102 void cgroup_context_init(CGroupContext
*c
) {
105 /* Initialize everything to the kernel defaults. */
107 *c
= (CGroupContext
) {
108 .cpu_weight
= CGROUP_WEIGHT_INVALID
,
109 .startup_cpu_weight
= CGROUP_WEIGHT_INVALID
,
110 .cpu_quota_per_sec_usec
= USEC_INFINITY
,
111 .cpu_quota_period_usec
= USEC_INFINITY
,
113 .cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
114 .startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
116 .memory_high
= CGROUP_LIMIT_MAX
,
117 .memory_max
= CGROUP_LIMIT_MAX
,
118 .memory_swap_max
= CGROUP_LIMIT_MAX
,
120 .memory_limit
= CGROUP_LIMIT_MAX
,
122 .io_weight
= CGROUP_WEIGHT_INVALID
,
123 .startup_io_weight
= CGROUP_WEIGHT_INVALID
,
125 .blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
126 .startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
128 .tasks_max
= TASKS_MAX_UNSET
,
132 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
136 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
141 void cgroup_context_free_io_device_weight(CGroupContext
*c
, CGroupIODeviceWeight
*w
) {
145 LIST_REMOVE(device_weights
, c
->io_device_weights
, w
);
150 void cgroup_context_free_io_device_latency(CGroupContext
*c
, CGroupIODeviceLatency
*l
) {
154 LIST_REMOVE(device_latencies
, c
->io_device_latencies
, l
);
159 void cgroup_context_free_io_device_limit(CGroupContext
*c
, CGroupIODeviceLimit
*l
) {
163 LIST_REMOVE(device_limits
, c
->io_device_limits
, l
);
168 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
172 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
177 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
181 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
186 void cgroup_context_done(CGroupContext
*c
) {
189 while (c
->io_device_weights
)
190 cgroup_context_free_io_device_weight(c
, c
->io_device_weights
);
192 while (c
->io_device_latencies
)
193 cgroup_context_free_io_device_latency(c
, c
->io_device_latencies
);
195 while (c
->io_device_limits
)
196 cgroup_context_free_io_device_limit(c
, c
->io_device_limits
);
198 while (c
->blockio_device_weights
)
199 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
201 while (c
->blockio_device_bandwidths
)
202 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
204 while (c
->device_allow
)
205 cgroup_context_free_device_allow(c
, c
->device_allow
);
207 c
->ip_address_allow
= ip_address_access_free_all(c
->ip_address_allow
);
208 c
->ip_address_deny
= ip_address_access_free_all(c
->ip_address_deny
);
210 c
->ip_filters_ingress
= strv_free(c
->ip_filters_ingress
);
211 c
->ip_filters_egress
= strv_free(c
->ip_filters_egress
);
213 cpu_set_reset(&c
->cpuset_cpus
);
214 cpu_set_reset(&c
->cpuset_mems
);
217 static int unit_get_kernel_memory_limit(Unit
*u
, const char *file
, uint64_t *ret
) {
218 _cleanup_free_
char *raw_kval
= NULL
;
224 if (!u
->cgroup_realized
)
227 r
= cg_get_attribute("memory", u
->cgroup_path
, file
, &raw_kval
);
231 if (streq(raw_kval
, "max")) {
232 *ret
= CGROUP_LIMIT_MAX
;
236 r
= safe_atou64(raw_kval
, &kval
);
245 static int unit_compare_memory_limit(Unit
*u
, const char *property_name
, uint64_t *ret_unit_value
, uint64_t *ret_kernel_value
) {
252 /* Compare kernel memcg configuration against our internal systemd state. Unsupported (and will
253 * return -ENODATA) on cgroup v1.
258 * 0: If the kernel memory setting doesn't match our configuration.
259 * >0: If the kernel memory setting matches our configuration.
261 * The following values are only guaranteed to be populated on return >=0:
263 * - ret_unit_value will contain our internal expected value for the unit, page-aligned.
264 * - ret_kernel_value will contain the actual value presented by the kernel. */
268 r
= cg_all_unified();
270 return log_debug_errno(r
, "Failed to determine cgroup hierarchy version: %m");
272 /* Unsupported on v1.
274 * We don't return ENOENT, since that could actually mask a genuine problem where somebody else has
275 * silently masked the controller. */
279 /* The root slice doesn't have any controller files, so we can't compare anything. */
280 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
283 /* It's possible to have MemoryFoo set without systemd wanting to have the memory controller enabled,
284 * for example, in the case of DisableControllers= or cgroup_disable on the kernel command line. To
285 * avoid specious errors in these scenarios, check that we even expect the memory controller to be
287 m
= unit_get_target_mask(u
);
288 if (!FLAGS_SET(m
, CGROUP_MASK_MEMORY
))
291 c
= unit_get_cgroup_context(u
);
294 if (streq(property_name
, "MemoryLow")) {
295 unit_value
= unit_get_ancestor_memory_low(u
);
297 } else if (streq(property_name
, "MemoryMin")) {
298 unit_value
= unit_get_ancestor_memory_min(u
);
300 } else if (streq(property_name
, "MemoryHigh")) {
301 unit_value
= c
->memory_high
;
302 file
= "memory.high";
303 } else if (streq(property_name
, "MemoryMax")) {
304 unit_value
= c
->memory_max
;
306 } else if (streq(property_name
, "MemorySwapMax")) {
307 unit_value
= c
->memory_swap_max
;
308 file
= "memory.swap.max";
312 r
= unit_get_kernel_memory_limit(u
, file
, ret_kernel_value
);
314 return log_unit_debug_errno(u
, r
, "Failed to parse %s: %m", file
);
316 /* It's intended (soon) in a future kernel to not expose cgroup memory limits rounded to page
317 * boundaries, but instead separate the user-exposed limit, which is whatever userspace told us, from
318 * our internal page-counting. To support those future kernels, just check the value itself first
319 * without any page-alignment. */
320 if (*ret_kernel_value
== unit_value
) {
321 *ret_unit_value
= unit_value
;
325 /* The current kernel behaviour, by comparison, is that even if you write a particular number of
326 * bytes into a cgroup memory file, it always returns that number page-aligned down (since the kernel
327 * internally stores cgroup limits in pages). As such, so long as it aligns properly, everything is
329 if (unit_value
!= CGROUP_LIMIT_MAX
)
330 unit_value
= PAGE_ALIGN_DOWN(unit_value
);
332 *ret_unit_value
= unit_value
;
334 return *ret_kernel_value
== *ret_unit_value
;
337 #define FORMAT_CGROUP_DIFF_MAX 128
339 static char *format_cgroup_memory_limit_comparison(char *buf
, size_t l
, Unit
*u
, const char *property_name
) {
347 r
= unit_compare_memory_limit(u
, property_name
, &sval
, &kval
);
349 /* memory.swap.max is special in that it relies on CONFIG_MEMCG_SWAP (and the default swapaccount=1).
350 * In the absence of reliably being able to detect whether memcg swap support is available or not,
351 * only complain if the error is not ENOENT. */
352 if (r
> 0 || IN_SET(r
, -ENODATA
, -EOWNERDEAD
) ||
353 (r
== -ENOENT
&& streq(property_name
, "MemorySwapMax"))) {
359 snprintf(buf
, l
, " (error getting kernel value: %s)", strerror_safe(r
));
363 snprintf(buf
, l
, " (different value in kernel: %" PRIu64
")", kval
);
368 void cgroup_context_dump(Unit
*u
, FILE* f
, const char *prefix
) {
369 _cleanup_free_
char *disable_controllers_str
= NULL
, *cpuset_cpus
= NULL
, *cpuset_mems
= NULL
;
370 CGroupIODeviceLimit
*il
;
371 CGroupIODeviceWeight
*iw
;
372 CGroupIODeviceLatency
*l
;
373 CGroupBlockIODeviceBandwidth
*b
;
374 CGroupBlockIODeviceWeight
*w
;
375 CGroupDeviceAllow
*a
;
377 IPAddressAccessItem
*iaai
;
379 char q
[FORMAT_TIMESPAN_MAX
];
380 char v
[FORMAT_TIMESPAN_MAX
];
382 char cda
[FORMAT_CGROUP_DIFF_MAX
];
383 char cdb
[FORMAT_CGROUP_DIFF_MAX
];
384 char cdc
[FORMAT_CGROUP_DIFF_MAX
];
385 char cdd
[FORMAT_CGROUP_DIFF_MAX
];
386 char cde
[FORMAT_CGROUP_DIFF_MAX
];
391 c
= unit_get_cgroup_context(u
);
394 prefix
= strempty(prefix
);
396 (void) cg_mask_to_string(c
->disable_controllers
, &disable_controllers_str
);
398 cpuset_cpus
= cpu_set_to_range_string(&c
->cpuset_cpus
);
399 cpuset_mems
= cpu_set_to_range_string(&c
->cpuset_mems
);
402 "%sCPUAccounting: %s\n"
403 "%sIOAccounting: %s\n"
404 "%sBlockIOAccounting: %s\n"
405 "%sMemoryAccounting: %s\n"
406 "%sTasksAccounting: %s\n"
407 "%sIPAccounting: %s\n"
408 "%sCPUWeight: %" PRIu64
"\n"
409 "%sStartupCPUWeight: %" PRIu64
"\n"
410 "%sCPUShares: %" PRIu64
"\n"
411 "%sStartupCPUShares: %" PRIu64
"\n"
412 "%sCPUQuotaPerSecSec: %s\n"
413 "%sCPUQuotaPeriodSec: %s\n"
414 "%sAllowedCPUs: %s\n"
415 "%sAllowedMemoryNodes: %s\n"
416 "%sIOWeight: %" PRIu64
"\n"
417 "%sStartupIOWeight: %" PRIu64
"\n"
418 "%sBlockIOWeight: %" PRIu64
"\n"
419 "%sStartupBlockIOWeight: %" PRIu64
"\n"
420 "%sDefaultMemoryMin: %" PRIu64
"\n"
421 "%sDefaultMemoryLow: %" PRIu64
"\n"
422 "%sMemoryMin: %" PRIu64
"%s\n"
423 "%sMemoryLow: %" PRIu64
"%s\n"
424 "%sMemoryHigh: %" PRIu64
"%s\n"
425 "%sMemoryMax: %" PRIu64
"%s\n"
426 "%sMemorySwapMax: %" PRIu64
"%s\n"
427 "%sMemoryLimit: %" PRIu64
"\n"
428 "%sTasksMax: %" PRIu64
"\n"
429 "%sDevicePolicy: %s\n"
430 "%sDisableControllers: %s\n"
432 prefix
, yes_no(c
->cpu_accounting
),
433 prefix
, yes_no(c
->io_accounting
),
434 prefix
, yes_no(c
->blockio_accounting
),
435 prefix
, yes_no(c
->memory_accounting
),
436 prefix
, yes_no(c
->tasks_accounting
),
437 prefix
, yes_no(c
->ip_accounting
),
438 prefix
, c
->cpu_weight
,
439 prefix
, c
->startup_cpu_weight
,
440 prefix
, c
->cpu_shares
,
441 prefix
, c
->startup_cpu_shares
,
442 prefix
, format_timespan(q
, sizeof(q
), c
->cpu_quota_per_sec_usec
, 1),
443 prefix
, format_timespan(v
, sizeof(v
), c
->cpu_quota_period_usec
, 1),
444 prefix
, strempty(cpuset_cpus
),
445 prefix
, strempty(cpuset_mems
),
446 prefix
, c
->io_weight
,
447 prefix
, c
->startup_io_weight
,
448 prefix
, c
->blockio_weight
,
449 prefix
, c
->startup_blockio_weight
,
450 prefix
, c
->default_memory_min
,
451 prefix
, c
->default_memory_low
,
452 prefix
, c
->memory_min
, format_cgroup_memory_limit_comparison(cda
, sizeof(cda
), u
, "MemoryMin"),
453 prefix
, c
->memory_low
, format_cgroup_memory_limit_comparison(cdb
, sizeof(cdb
), u
, "MemoryLow"),
454 prefix
, c
->memory_high
, format_cgroup_memory_limit_comparison(cdc
, sizeof(cdc
), u
, "MemoryHigh"),
455 prefix
, c
->memory_max
, format_cgroup_memory_limit_comparison(cdd
, sizeof(cdd
), u
, "MemoryMax"),
456 prefix
, c
->memory_swap_max
, format_cgroup_memory_limit_comparison(cde
, sizeof(cde
), u
, "MemorySwapMax"),
457 prefix
, c
->memory_limit
,
458 prefix
, tasks_max_resolve(&c
->tasks_max
),
459 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
460 prefix
, strempty(disable_controllers_str
),
461 prefix
, yes_no(c
->delegate
));
464 _cleanup_free_
char *t
= NULL
;
466 (void) cg_mask_to_string(c
->delegate_controllers
, &t
);
468 fprintf(f
, "%sDelegateControllers: %s\n",
473 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
475 "%sDeviceAllow: %s %s%s%s\n",
478 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
480 LIST_FOREACH(device_weights
, iw
, c
->io_device_weights
)
482 "%sIODeviceWeight: %s %" PRIu64
"\n",
487 LIST_FOREACH(device_latencies
, l
, c
->io_device_latencies
)
489 "%sIODeviceLatencyTargetSec: %s %s\n",
492 format_timespan(q
, sizeof(q
), l
->target_usec
, 1));
494 LIST_FOREACH(device_limits
, il
, c
->io_device_limits
) {
495 char buf
[FORMAT_BYTES_MAX
];
496 CGroupIOLimitType type
;
498 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
499 if (il
->limits
[type
] != cgroup_io_limit_defaults
[type
])
503 cgroup_io_limit_type_to_string(type
),
505 format_bytes(buf
, sizeof(buf
), il
->limits
[type
]));
508 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
510 "%sBlockIODeviceWeight: %s %" PRIu64
,
515 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
516 char buf
[FORMAT_BYTES_MAX
];
518 if (b
->rbps
!= CGROUP_LIMIT_MAX
)
520 "%sBlockIOReadBandwidth: %s %s\n",
523 format_bytes(buf
, sizeof(buf
), b
->rbps
));
524 if (b
->wbps
!= CGROUP_LIMIT_MAX
)
526 "%sBlockIOWriteBandwidth: %s %s\n",
529 format_bytes(buf
, sizeof(buf
), b
->wbps
));
532 LIST_FOREACH(items
, iaai
, c
->ip_address_allow
) {
533 _cleanup_free_
char *k
= NULL
;
535 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
536 fprintf(f
, "%sIPAddressAllow: %s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
539 LIST_FOREACH(items
, iaai
, c
->ip_address_deny
) {
540 _cleanup_free_
char *k
= NULL
;
542 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
543 fprintf(f
, "%sIPAddressDeny: %s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
546 STRV_FOREACH(path
, c
->ip_filters_ingress
)
547 fprintf(f
, "%sIPIngressFilterPath: %s\n", prefix
, *path
);
549 STRV_FOREACH(path
, c
->ip_filters_egress
)
550 fprintf(f
, "%sIPEgressFilterPath: %s\n", prefix
, *path
);
553 int cgroup_add_device_allow(CGroupContext
*c
, const char *dev
, const char *mode
) {
554 _cleanup_free_ CGroupDeviceAllow
*a
= NULL
;
555 _cleanup_free_
char *d
= NULL
;
559 assert(isempty(mode
) || in_charset(mode
, "rwm"));
561 a
= new(CGroupDeviceAllow
, 1);
569 *a
= (CGroupDeviceAllow
) {
571 .r
= isempty(mode
) || strchr(mode
, 'r'),
572 .w
= isempty(mode
) || strchr(mode
, 'w'),
573 .m
= isempty(mode
) || strchr(mode
, 'm'),
576 LIST_PREPEND(device_allow
, c
->device_allow
, a
);
582 #define UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(entry) \
583 uint64_t unit_get_ancestor_##entry(Unit *u) { \
586 /* 1. Is entry set in this unit? If so, use that. \
587 * 2. Is the default for this entry set in any \
588 * ancestor? If so, use that. \
589 * 3. Otherwise, return CGROUP_LIMIT_MIN. */ \
593 c = unit_get_cgroup_context(u); \
594 if (c && c->entry##_set) \
597 while ((u = UNIT_DEREF(u->slice))) { \
598 c = unit_get_cgroup_context(u); \
599 if (c && c->default_##entry##_set) \
600 return c->default_##entry; \
603 /* We've reached the root, but nobody had default for \
604 * this entry set, so set it to the kernel default. */ \
605 return CGROUP_LIMIT_MIN; \
608 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_low
);
609 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_min
);
611 static void cgroup_xattr_apply(Unit
*u
) {
612 char ids
[SD_ID128_STRING_MAX
];
617 if (!MANAGER_IS_SYSTEM(u
->manager
))
620 if (sd_id128_is_null(u
->invocation_id
))
623 r
= cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
624 "trusted.invocation_id",
625 sd_id128_to_string(u
->invocation_id
, ids
), 32,
628 log_unit_debug_errno(u
, r
, "Failed to set invocation ID on control group %s, ignoring: %m", u
->cgroup_path
);
631 static int lookup_block_device(const char *p
, dev_t
*ret
) {
639 r
= device_path_parse_major_minor(p
, &mode
, &rdev
);
640 if (r
== -ENODEV
) { /* not a parsable device node, need to go to disk */
642 if (stat(p
, &st
) < 0)
643 return log_warning_errno(errno
, "Couldn't stat device '%s': %m", p
);
644 rdev
= (dev_t
)st
.st_rdev
;
645 dev
= (dev_t
)st
.st_dev
;
648 return log_warning_errno(r
, "Failed to parse major/minor from path '%s': %m", p
);
651 log_warning("Device node '%s' is a character device, but block device needed.", p
);
653 } else if (S_ISBLK(mode
))
655 else if (major(dev
) != 0)
656 *ret
= dev
; /* If this is not a device node then use the block device this file is stored on */
658 /* If this is btrfs, getting the backing block device is a bit harder */
659 r
= btrfs_get_block_device(p
, ret
);
660 if (r
< 0 && r
!= -ENOTTY
)
661 return log_warning_errno(r
, "Failed to determine block device backing btrfs file system '%s': %m", p
);
663 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p
);
668 /* If this is a LUKS device, try to get the originating block device */
669 (void) block_get_originating(*ret
, ret
);
671 /* If this is a partition, try to get the originating block device */
672 (void) block_get_whole_disk(*ret
, ret
);
676 static bool cgroup_context_has_cpu_weight(CGroupContext
*c
) {
677 return c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
||
678 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
;
681 static bool cgroup_context_has_cpu_shares(CGroupContext
*c
) {
682 return c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
683 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
;
686 static uint64_t cgroup_context_cpu_weight(CGroupContext
*c
, ManagerState state
) {
687 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
688 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
)
689 return c
->startup_cpu_weight
;
690 else if (c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
)
691 return c
->cpu_weight
;
693 return CGROUP_WEIGHT_DEFAULT
;
696 static uint64_t cgroup_context_cpu_shares(CGroupContext
*c
, ManagerState state
) {
697 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
698 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
699 return c
->startup_cpu_shares
;
700 else if (c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
701 return c
->cpu_shares
;
703 return CGROUP_CPU_SHARES_DEFAULT
;
706 usec_t
cgroup_cpu_adjust_period(usec_t period
, usec_t quota
, usec_t resolution
, usec_t max_period
) {
707 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
708 * need to be higher than that boundary. quota is specified in USecPerSec.
709 * Additionally, period must be at most max_period. */
712 return MIN(MAX3(period
, resolution
, resolution
* USEC_PER_SEC
/ quota
), max_period
);
715 static usec_t
cgroup_cpu_adjust_period_and_log(Unit
*u
, usec_t period
, usec_t quota
) {
718 if (quota
== USEC_INFINITY
)
719 /* Always use default period for infinity quota. */
720 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
722 if (period
== USEC_INFINITY
)
723 /* Default period was requested. */
724 period
= CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
726 /* Clamp to interval [1ms, 1s] */
727 new_period
= cgroup_cpu_adjust_period(period
, quota
, USEC_PER_MSEC
, USEC_PER_SEC
);
729 if (new_period
!= period
) {
730 char v
[FORMAT_TIMESPAN_MAX
];
731 log_unit_full(u
, u
->warned_clamping_cpu_quota_period
? LOG_DEBUG
: LOG_WARNING
, 0,
732 "Clamping CPU interval for cpu.max: period is now %s",
733 format_timespan(v
, sizeof(v
), new_period
, 1));
734 u
->warned_clamping_cpu_quota_period
= true;
740 static void cgroup_apply_unified_cpu_weight(Unit
*u
, uint64_t weight
) {
741 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
743 xsprintf(buf
, "%" PRIu64
"\n", weight
);
744 (void) set_attribute_and_warn(u
, "cpu", "cpu.weight", buf
);
747 static void cgroup_apply_unified_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
748 char buf
[(DECIMAL_STR_MAX(usec_t
) + 1) * 2 + 1];
750 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
751 if (quota
!= USEC_INFINITY
)
752 xsprintf(buf
, USEC_FMT
" " USEC_FMT
"\n",
753 MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
), period
);
755 xsprintf(buf
, "max " USEC_FMT
"\n", period
);
756 (void) set_attribute_and_warn(u
, "cpu", "cpu.max", buf
);
759 static void cgroup_apply_legacy_cpu_shares(Unit
*u
, uint64_t shares
) {
760 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
762 xsprintf(buf
, "%" PRIu64
"\n", shares
);
763 (void) set_attribute_and_warn(u
, "cpu", "cpu.shares", buf
);
766 static void cgroup_apply_legacy_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
767 char buf
[DECIMAL_STR_MAX(usec_t
) + 2];
769 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
771 xsprintf(buf
, USEC_FMT
"\n", period
);
772 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_period_us", buf
);
774 if (quota
!= USEC_INFINITY
) {
775 xsprintf(buf
, USEC_FMT
"\n", MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
));
776 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", buf
);
778 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", "-1\n");
781 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares
) {
782 return CLAMP(shares
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_CPU_SHARES_DEFAULT
,
783 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
786 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight
) {
787 return CLAMP(weight
* CGROUP_CPU_SHARES_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
788 CGROUP_CPU_SHARES_MIN
, CGROUP_CPU_SHARES_MAX
);
791 static void cgroup_apply_unified_cpuset(Unit
*u
, const CPUSet
*cpus
, const char *name
) {
792 _cleanup_free_
char *buf
= NULL
;
794 buf
= cpu_set_to_range_string(cpus
);
800 (void) set_attribute_and_warn(u
, "cpuset", name
, buf
);
803 static bool cgroup_context_has_io_config(CGroupContext
*c
) {
804 return c
->io_accounting
||
805 c
->io_weight
!= CGROUP_WEIGHT_INVALID
||
806 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
807 c
->io_device_weights
||
808 c
->io_device_latencies
||
812 static bool cgroup_context_has_blockio_config(CGroupContext
*c
) {
813 return c
->blockio_accounting
||
814 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
815 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
816 c
->blockio_device_weights
||
817 c
->blockio_device_bandwidths
;
820 static uint64_t cgroup_context_io_weight(CGroupContext
*c
, ManagerState state
) {
821 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
822 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
)
823 return c
->startup_io_weight
;
824 else if (c
->io_weight
!= CGROUP_WEIGHT_INVALID
)
827 return CGROUP_WEIGHT_DEFAULT
;
830 static uint64_t cgroup_context_blkio_weight(CGroupContext
*c
, ManagerState state
) {
831 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
832 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
833 return c
->startup_blockio_weight
;
834 else if (c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
835 return c
->blockio_weight
;
837 return CGROUP_BLKIO_WEIGHT_DEFAULT
;
840 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight
) {
841 return CLAMP(blkio_weight
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_BLKIO_WEIGHT_DEFAULT
,
842 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
845 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight
) {
846 return CLAMP(io_weight
* CGROUP_BLKIO_WEIGHT_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
847 CGROUP_BLKIO_WEIGHT_MIN
, CGROUP_BLKIO_WEIGHT_MAX
);
850 static void cgroup_apply_io_device_weight(Unit
*u
, const char *dev_path
, uint64_t io_weight
) {
851 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
855 r
= lookup_block_device(dev_path
, &dev
);
859 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), io_weight
);
860 (void) set_attribute_and_warn(u
, "io", "io.weight", buf
);
863 static void cgroup_apply_blkio_device_weight(Unit
*u
, const char *dev_path
, uint64_t blkio_weight
) {
864 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
868 r
= lookup_block_device(dev_path
, &dev
);
872 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), blkio_weight
);
873 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight_device", buf
);
876 static void cgroup_apply_io_device_latency(Unit
*u
, const char *dev_path
, usec_t target
) {
877 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
881 r
= lookup_block_device(dev_path
, &dev
);
885 if (target
!= USEC_INFINITY
)
886 xsprintf(buf
, "%u:%u target=%" PRIu64
"\n", major(dev
), minor(dev
), target
);
888 xsprintf(buf
, "%u:%u target=max\n", major(dev
), minor(dev
));
890 (void) set_attribute_and_warn(u
, "io", "io.latency", buf
);
893 static void cgroup_apply_io_device_limit(Unit
*u
, const char *dev_path
, uint64_t *limits
) {
894 char limit_bufs
[_CGROUP_IO_LIMIT_TYPE_MAX
][DECIMAL_STR_MAX(uint64_t)];
895 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
896 CGroupIOLimitType type
;
900 r
= lookup_block_device(dev_path
, &dev
);
904 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
905 if (limits
[type
] != cgroup_io_limit_defaults
[type
])
906 xsprintf(limit_bufs
[type
], "%" PRIu64
, limits
[type
]);
908 xsprintf(limit_bufs
[type
], "%s", limits
[type
] == CGROUP_LIMIT_MAX
? "max" : "0");
910 xsprintf(buf
, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev
), minor(dev
),
911 limit_bufs
[CGROUP_IO_RBPS_MAX
], limit_bufs
[CGROUP_IO_WBPS_MAX
],
912 limit_bufs
[CGROUP_IO_RIOPS_MAX
], limit_bufs
[CGROUP_IO_WIOPS_MAX
]);
913 (void) set_attribute_and_warn(u
, "io", "io.max", buf
);
916 static void cgroup_apply_blkio_device_limit(Unit
*u
, const char *dev_path
, uint64_t rbps
, uint64_t wbps
) {
917 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
921 r
= lookup_block_device(dev_path
, &dev
);
925 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), rbps
);
926 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.read_bps_device", buf
);
928 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), wbps
);
929 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.write_bps_device", buf
);
932 static bool unit_has_unified_memory_config(Unit
*u
) {
937 c
= unit_get_cgroup_context(u
);
940 return unit_get_ancestor_memory_min(u
) > 0 || unit_get_ancestor_memory_low(u
) > 0 ||
941 c
->memory_high
!= CGROUP_LIMIT_MAX
|| c
->memory_max
!= CGROUP_LIMIT_MAX
||
942 c
->memory_swap_max
!= CGROUP_LIMIT_MAX
;
945 static void cgroup_apply_unified_memory_limit(Unit
*u
, const char *file
, uint64_t v
) {
946 char buf
[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
948 if (v
!= CGROUP_LIMIT_MAX
)
949 xsprintf(buf
, "%" PRIu64
"\n", v
);
951 (void) set_attribute_and_warn(u
, "memory", file
, buf
);
954 static void cgroup_apply_firewall(Unit
*u
) {
957 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
959 if (bpf_firewall_compile(u
) < 0)
962 (void) bpf_firewall_load_custom(u
);
963 (void) bpf_firewall_install(u
);
966 static int cgroup_apply_devices(Unit
*u
) {
967 _cleanup_(bpf_program_unrefp
) BPFProgram
*prog
= NULL
;
970 CGroupDeviceAllow
*a
;
971 CGroupDevicePolicy policy
;
974 assert_se(c
= unit_get_cgroup_context(u
));
975 assert_se(path
= u
->cgroup_path
);
977 policy
= c
->device_policy
;
979 if (cg_all_unified() > 0) {
980 r
= bpf_devices_cgroup_init(&prog
, policy
, c
->device_allow
);
982 return log_unit_warning_errno(u
, r
, "Failed to initialize device control bpf program: %m");
985 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore
988 if (c
->device_allow
|| policy
!= CGROUP_DEVICE_POLICY_AUTO
)
989 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
991 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
993 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
, -EPERM
) ? LOG_DEBUG
: LOG_WARNING
, r
,
994 "Failed to reset devices.allow/devices.deny: %m");
997 bool whitelist_static
= policy
== CGROUP_DEVICE_POLICY_CLOSED
||
998 (policy
== CGROUP_DEVICE_POLICY_AUTO
&& c
->device_allow
);
999 if (whitelist_static
)
1000 (void) bpf_devices_whitelist_static(prog
, path
);
1002 bool any
= whitelist_static
;
1003 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
1017 if (path_startswith(a
->path
, "/dev/"))
1018 r
= bpf_devices_whitelist_device(prog
, path
, a
->path
, acc
);
1019 else if ((val
= startswith(a
->path
, "block-")))
1020 r
= bpf_devices_whitelist_major(prog
, path
, val
, 'b', acc
);
1021 else if ((val
= startswith(a
->path
, "char-")))
1022 r
= bpf_devices_whitelist_major(prog
, path
, val
, 'c', acc
);
1024 log_unit_debug(u
, "Ignoring device '%s' while writing cgroup attribute.", a
->path
);
1033 log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENODEV
), "No devices matched by device filter.");
1035 /* The kernel verifier would reject a program we would build with the normal intro and outro
1036 but no whitelisting rules (outro would contain an unreachable instruction for successful
1038 policy
= CGROUP_DEVICE_POLICY_STRICT
;
1041 r
= bpf_devices_apply_policy(prog
, policy
, any
, path
, &u
->bpf_device_control_installed
);
1043 static bool warned
= false;
1045 log_full_errno(warned
? LOG_DEBUG
: LOG_WARNING
, r
,
1046 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1047 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1048 "(This warning is only shown for the first loaded unit using device ACL.)", u
->id
);
1055 static void cgroup_context_apply(
1057 CGroupMask apply_mask
,
1058 ManagerState state
) {
1062 bool is_host_root
, is_local_root
;
1067 /* Nothing to do? Exit early! */
1068 if (apply_mask
== 0)
1071 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
1072 * attributes should only be managed for cgroups further down the tree. */
1073 is_local_root
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1074 is_host_root
= unit_has_host_root_cgroup(u
);
1076 assert_se(c
= unit_get_cgroup_context(u
));
1077 assert_se(path
= u
->cgroup_path
);
1079 if (is_local_root
) /* Make sure we don't try to display messages with an empty path. */
1082 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
1083 * then), and missing cgroups, i.e. EROFS and ENOENT. */
1085 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
1086 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
1087 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
1088 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
1089 * we couldn't even write to them if we wanted to). */
1090 if ((apply_mask
& CGROUP_MASK_CPU
) && !is_local_root
) {
1092 if (cg_all_unified() > 0) {
1095 if (cgroup_context_has_cpu_weight(c
))
1096 weight
= cgroup_context_cpu_weight(c
, state
);
1097 else if (cgroup_context_has_cpu_shares(c
)) {
1100 shares
= cgroup_context_cpu_shares(c
, state
);
1101 weight
= cgroup_cpu_shares_to_weight(shares
);
1103 log_cgroup_compat(u
, "Applying [Startup]CPUShares=%" PRIu64
" as [Startup]CPUWeight=%" PRIu64
" on %s",
1104 shares
, weight
, path
);
1106 weight
= CGROUP_WEIGHT_DEFAULT
;
1108 cgroup_apply_unified_cpu_weight(u
, weight
);
1109 cgroup_apply_unified_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
1114 if (cgroup_context_has_cpu_weight(c
)) {
1117 weight
= cgroup_context_cpu_weight(c
, state
);
1118 shares
= cgroup_cpu_weight_to_shares(weight
);
1120 log_cgroup_compat(u
, "Applying [Startup]CPUWeight=%" PRIu64
" as [Startup]CPUShares=%" PRIu64
" on %s",
1121 weight
, shares
, path
);
1122 } else if (cgroup_context_has_cpu_shares(c
))
1123 shares
= cgroup_context_cpu_shares(c
, state
);
1125 shares
= CGROUP_CPU_SHARES_DEFAULT
;
1127 cgroup_apply_legacy_cpu_shares(u
, shares
);
1128 cgroup_apply_legacy_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
1132 if ((apply_mask
& CGROUP_MASK_CPUSET
) && !is_local_root
) {
1133 cgroup_apply_unified_cpuset(u
, &c
->cpuset_cpus
, "cpuset.cpus");
1134 cgroup_apply_unified_cpuset(u
, &c
->cpuset_mems
, "cpuset.mems");
1137 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
1138 * controller), and in case of containers we want to leave control of these attributes to the container manager
1139 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
1140 if ((apply_mask
& CGROUP_MASK_IO
) && !is_local_root
) {
1141 char buf
[8+DECIMAL_STR_MAX(uint64_t)+1];
1142 bool has_io
, has_blockio
;
1145 has_io
= cgroup_context_has_io_config(c
);
1146 has_blockio
= cgroup_context_has_blockio_config(c
);
1149 weight
= cgroup_context_io_weight(c
, state
);
1150 else if (has_blockio
) {
1151 uint64_t blkio_weight
;
1153 blkio_weight
= cgroup_context_blkio_weight(c
, state
);
1154 weight
= cgroup_weight_blkio_to_io(blkio_weight
);
1156 log_cgroup_compat(u
, "Applying [Startup]BlockIOWeight=%" PRIu64
" as [Startup]IOWeight=%" PRIu64
,
1157 blkio_weight
, weight
);
1159 weight
= CGROUP_WEIGHT_DEFAULT
;
1161 xsprintf(buf
, "default %" PRIu64
"\n", weight
);
1162 (void) set_attribute_and_warn(u
, "io", "io.weight", buf
);
1164 /* FIXME: drop this when distro kernels properly support BFQ through "io.weight"
1165 * See also: https://github.com/systemd/systemd/pull/13335 */
1166 xsprintf(buf
, "%" PRIu64
"\n", weight
);
1167 (void) set_attribute_and_warn(u
, "io", "io.bfq.weight", buf
);
1170 CGroupIODeviceLatency
*latency
;
1171 CGroupIODeviceLimit
*limit
;
1172 CGroupIODeviceWeight
*w
;
1174 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
)
1175 cgroup_apply_io_device_weight(u
, w
->path
, w
->weight
);
1177 LIST_FOREACH(device_limits
, limit
, c
->io_device_limits
)
1178 cgroup_apply_io_device_limit(u
, limit
->path
, limit
->limits
);
1180 LIST_FOREACH(device_latencies
, latency
, c
->io_device_latencies
)
1181 cgroup_apply_io_device_latency(u
, latency
->path
, latency
->target_usec
);
1183 } else if (has_blockio
) {
1184 CGroupBlockIODeviceWeight
*w
;
1185 CGroupBlockIODeviceBandwidth
*b
;
1187 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
1188 weight
= cgroup_weight_blkio_to_io(w
->weight
);
1190 log_cgroup_compat(u
, "Applying BlockIODeviceWeight=%" PRIu64
" as IODeviceWeight=%" PRIu64
" for %s",
1191 w
->weight
, weight
, w
->path
);
1193 cgroup_apply_io_device_weight(u
, w
->path
, weight
);
1196 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
1197 uint64_t limits
[_CGROUP_IO_LIMIT_TYPE_MAX
];
1198 CGroupIOLimitType type
;
1200 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
1201 limits
[type
] = cgroup_io_limit_defaults
[type
];
1203 limits
[CGROUP_IO_RBPS_MAX
] = b
->rbps
;
1204 limits
[CGROUP_IO_WBPS_MAX
] = b
->wbps
;
1206 log_cgroup_compat(u
, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as IO{Read|Write}BandwidthMax= for %s",
1207 b
->rbps
, b
->wbps
, b
->path
);
1209 cgroup_apply_io_device_limit(u
, b
->path
, limits
);
1214 if (apply_mask
& CGROUP_MASK_BLKIO
) {
1215 bool has_io
, has_blockio
;
1217 has_io
= cgroup_context_has_io_config(c
);
1218 has_blockio
= cgroup_context_has_blockio_config(c
);
1220 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
1221 * left to our container manager, too. */
1222 if (!is_local_root
) {
1223 char buf
[DECIMAL_STR_MAX(uint64_t)+1];
1229 io_weight
= cgroup_context_io_weight(c
, state
);
1230 weight
= cgroup_weight_io_to_blkio(cgroup_context_io_weight(c
, state
));
1232 log_cgroup_compat(u
, "Applying [Startup]IOWeight=%" PRIu64
" as [Startup]BlockIOWeight=%" PRIu64
,
1234 } else if (has_blockio
)
1235 weight
= cgroup_context_blkio_weight(c
, state
);
1237 weight
= CGROUP_BLKIO_WEIGHT_DEFAULT
;
1239 xsprintf(buf
, "%" PRIu64
"\n", weight
);
1240 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight", buf
);
1243 CGroupIODeviceWeight
*w
;
1245 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
) {
1246 weight
= cgroup_weight_io_to_blkio(w
->weight
);
1248 log_cgroup_compat(u
, "Applying IODeviceWeight=%" PRIu64
" as BlockIODeviceWeight=%" PRIu64
" for %s",
1249 w
->weight
, weight
, w
->path
);
1251 cgroup_apply_blkio_device_weight(u
, w
->path
, weight
);
1253 } else if (has_blockio
) {
1254 CGroupBlockIODeviceWeight
*w
;
1256 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
1257 cgroup_apply_blkio_device_weight(u
, w
->path
, w
->weight
);
1261 /* The bandwidth limits are something that make sense to be applied to the host's root but not container
1262 * roots, as there we want the container manager to handle it */
1263 if (is_host_root
|| !is_local_root
) {
1265 CGroupIODeviceLimit
*l
;
1267 LIST_FOREACH(device_limits
, l
, c
->io_device_limits
) {
1268 log_cgroup_compat(u
, "Applying IO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as BlockIO{Read|Write}BandwidthMax= for %s",
1269 l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
], l
->path
);
1271 cgroup_apply_blkio_device_limit(u
, l
->path
, l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
]);
1273 } else if (has_blockio
) {
1274 CGroupBlockIODeviceBandwidth
*b
;
1276 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
)
1277 cgroup_apply_blkio_device_limit(u
, b
->path
, b
->rbps
, b
->wbps
);
1282 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
1283 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
1284 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
1285 * write to this if we wanted to.) */
1286 if ((apply_mask
& CGROUP_MASK_MEMORY
) && !is_local_root
) {
1288 if (cg_all_unified() > 0) {
1289 uint64_t max
, swap_max
= CGROUP_LIMIT_MAX
;
1291 if (unit_has_unified_memory_config(u
)) {
1292 max
= c
->memory_max
;
1293 swap_max
= c
->memory_swap_max
;
1295 max
= c
->memory_limit
;
1297 if (max
!= CGROUP_LIMIT_MAX
)
1298 log_cgroup_compat(u
, "Applying MemoryLimit=%" PRIu64
" as MemoryMax=", max
);
1301 cgroup_apply_unified_memory_limit(u
, "memory.min", unit_get_ancestor_memory_min(u
));
1302 cgroup_apply_unified_memory_limit(u
, "memory.low", unit_get_ancestor_memory_low(u
));
1303 cgroup_apply_unified_memory_limit(u
, "memory.high", c
->memory_high
);
1304 cgroup_apply_unified_memory_limit(u
, "memory.max", max
);
1305 cgroup_apply_unified_memory_limit(u
, "memory.swap.max", swap_max
);
1307 (void) set_attribute_and_warn(u
, "memory", "memory.oom.group", one_zero(c
->memory_oom_group
));
1310 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
1313 if (unit_has_unified_memory_config(u
)) {
1314 val
= c
->memory_max
;
1315 log_cgroup_compat(u
, "Applying MemoryMax=%" PRIi64
" as MemoryLimit=", val
);
1317 val
= c
->memory_limit
;
1319 if (val
== CGROUP_LIMIT_MAX
)
1320 strncpy(buf
, "-1\n", sizeof(buf
));
1322 xsprintf(buf
, "%" PRIu64
"\n", val
);
1324 (void) set_attribute_and_warn(u
, "memory", "memory.limit_in_bytes", buf
);
1328 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
1329 * containers, where we leave this to the manager */
1330 if ((apply_mask
& (CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
)) &&
1331 (is_host_root
|| cg_all_unified() > 0 || !is_local_root
))
1332 (void) cgroup_apply_devices(u
);
1334 if (apply_mask
& CGROUP_MASK_PIDS
) {
1337 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1338 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1339 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1340 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1341 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1342 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1343 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1344 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1345 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1346 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1347 * which is desirable so that there's an official way to release control of the sysctl from
1348 * systemd: set the limit to unbounded and reload. */
1350 if (tasks_max_isset(&c
->tasks_max
)) {
1351 u
->manager
->sysctl_pid_max_changed
= true;
1352 r
= procfs_tasks_set_limit(tasks_max_resolve(&c
->tasks_max
));
1353 } else if (u
->manager
->sysctl_pid_max_changed
)
1354 r
= procfs_tasks_set_limit(TASKS_MAX
);
1358 log_unit_full(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
,
1359 "Failed to write to tasks limit sysctls: %m");
1362 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
1363 * leave it for the container manager. */
1364 if (!is_local_root
) {
1365 if (tasks_max_isset(&c
->tasks_max
)) {
1366 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
1368 xsprintf(buf
, "%" PRIu64
"\n", tasks_max_resolve(&c
->tasks_max
));
1369 (void) set_attribute_and_warn(u
, "pids", "pids.max", buf
);
1371 (void) set_attribute_and_warn(u
, "pids", "pids.max", "max\n");
1375 if (apply_mask
& CGROUP_MASK_BPF_FIREWALL
)
1376 cgroup_apply_firewall(u
);
1379 static bool unit_get_needs_bpf_firewall(Unit
*u
) {
1384 c
= unit_get_cgroup_context(u
);
1388 if (c
->ip_accounting
||
1389 c
->ip_address_allow
||
1390 c
->ip_address_deny
||
1391 c
->ip_filters_ingress
||
1392 c
->ip_filters_egress
)
1395 /* If any parent slice has an IP access list defined, it applies too */
1396 for (p
= UNIT_DEREF(u
->slice
); p
; p
= UNIT_DEREF(p
->slice
)) {
1397 c
= unit_get_cgroup_context(p
);
1401 if (c
->ip_address_allow
||
1409 static CGroupMask
unit_get_cgroup_mask(Unit
*u
) {
1410 CGroupMask mask
= 0;
1415 c
= unit_get_cgroup_context(u
);
1419 /* Figure out which controllers we need, based on the cgroup context object */
1421 if (c
->cpu_accounting
)
1422 mask
|= get_cpu_accounting_mask();
1424 if (cgroup_context_has_cpu_weight(c
) ||
1425 cgroup_context_has_cpu_shares(c
) ||
1426 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
1427 mask
|= CGROUP_MASK_CPU
;
1429 if (c
->cpuset_cpus
.set
|| c
->cpuset_mems
.set
)
1430 mask
|= CGROUP_MASK_CPUSET
;
1432 if (cgroup_context_has_io_config(c
) || cgroup_context_has_blockio_config(c
))
1433 mask
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
1435 if (c
->memory_accounting
||
1436 c
->memory_limit
!= CGROUP_LIMIT_MAX
||
1437 unit_has_unified_memory_config(u
))
1438 mask
|= CGROUP_MASK_MEMORY
;
1440 if (c
->device_allow
||
1441 c
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
)
1442 mask
|= CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
;
1444 if (c
->tasks_accounting
||
1445 tasks_max_isset(&c
->tasks_max
))
1446 mask
|= CGROUP_MASK_PIDS
;
1448 return CGROUP_MASK_EXTEND_JOINED(mask
);
1451 static CGroupMask
unit_get_bpf_mask(Unit
*u
) {
1452 CGroupMask mask
= 0;
1454 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
1457 if (unit_get_needs_bpf_firewall(u
))
1458 mask
|= CGROUP_MASK_BPF_FIREWALL
;
1463 CGroupMask
unit_get_own_mask(Unit
*u
) {
1466 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
1467 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
1469 if (u
->load_state
!= UNIT_LOADED
)
1472 c
= unit_get_cgroup_context(u
);
1476 return (unit_get_cgroup_mask(u
) | unit_get_bpf_mask(u
) | unit_get_delegate_mask(u
)) & ~unit_get_ancestor_disable_mask(u
);
1479 CGroupMask
unit_get_delegate_mask(Unit
*u
) {
1482 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1483 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1485 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1487 if (!unit_cgroup_delegate(u
))
1490 if (cg_all_unified() <= 0) {
1493 e
= unit_get_exec_context(u
);
1494 if (e
&& !exec_context_maintains_privileges(e
))
1498 assert_se(c
= unit_get_cgroup_context(u
));
1499 return CGROUP_MASK_EXTEND_JOINED(c
->delegate_controllers
);
1502 CGroupMask
unit_get_members_mask(Unit
*u
) {
1505 /* Returns the mask of controllers all of the unit's children require, merged */
1507 if (u
->cgroup_members_mask_valid
)
1508 return u
->cgroup_members_mask
; /* Use cached value if possible */
1510 u
->cgroup_members_mask
= 0;
1512 if (u
->type
== UNIT_SLICE
) {
1517 HASHMAP_FOREACH_KEY(v
, member
, u
->dependencies
[UNIT_BEFORE
], i
) {
1518 if (UNIT_DEREF(member
->slice
) == u
)
1519 u
->cgroup_members_mask
|= unit_get_subtree_mask(member
); /* note that this calls ourselves again, for the children */
1523 u
->cgroup_members_mask_valid
= true;
1524 return u
->cgroup_members_mask
;
1527 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
1530 /* Returns the mask of controllers all of the unit's siblings
1531 * require, i.e. the members mask of the unit's parent slice
1532 * if there is one. */
1534 if (UNIT_ISSET(u
->slice
))
1535 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
1537 return unit_get_subtree_mask(u
); /* we are the top-level slice */
1540 CGroupMask
unit_get_disable_mask(Unit
*u
) {
1543 c
= unit_get_cgroup_context(u
);
1547 return c
->disable_controllers
;
1550 CGroupMask
unit_get_ancestor_disable_mask(Unit
*u
) {
1554 mask
= unit_get_disable_mask(u
);
1556 /* Returns the mask of controllers which are marked as forcibly
1557 * disabled in any ancestor unit or the unit in question. */
1559 if (UNIT_ISSET(u
->slice
))
1560 mask
|= unit_get_ancestor_disable_mask(UNIT_DEREF(u
->slice
));
1565 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
1567 /* Returns the mask of this subtree, meaning of the group
1568 * itself and its children. */
1570 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
1573 CGroupMask
unit_get_target_mask(Unit
*u
) {
1576 /* This returns the cgroup mask of all controllers to enable
1577 * for a specific cgroup, i.e. everything it needs itself,
1578 * plus all that its children need, plus all that its siblings
1579 * need. This is primarily useful on the legacy cgroup
1580 * hierarchy, where we need to duplicate each cgroup in each
1581 * hierarchy that shall be enabled for it. */
1583 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
1585 if (mask
& CGROUP_MASK_BPF_FIREWALL
& ~u
->manager
->cgroup_supported
)
1586 emit_bpf_firewall_warning(u
);
1588 mask
&= u
->manager
->cgroup_supported
;
1589 mask
&= ~unit_get_ancestor_disable_mask(u
);
1594 CGroupMask
unit_get_enable_mask(Unit
*u
) {
1597 /* This returns the cgroup mask of all controllers to enable
1598 * for the children of a specific cgroup. This is primarily
1599 * useful for the unified cgroup hierarchy, where each cgroup
1600 * controls which controllers are enabled for its children. */
1602 mask
= unit_get_members_mask(u
);
1603 mask
&= u
->manager
->cgroup_supported
;
1604 mask
&= ~unit_get_ancestor_disable_mask(u
);
1609 void unit_invalidate_cgroup_members_masks(Unit
*u
) {
1612 /* Recurse invalidate the member masks cache all the way up the tree */
1613 u
->cgroup_members_mask_valid
= false;
1615 if (UNIT_ISSET(u
->slice
))
1616 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u
->slice
));
1619 const char *unit_get_realized_cgroup_path(Unit
*u
, CGroupMask mask
) {
1621 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
1625 if (u
->cgroup_path
&&
1626 u
->cgroup_realized
&&
1627 FLAGS_SET(u
->cgroup_realized_mask
, mask
))
1628 return u
->cgroup_path
;
1630 u
= UNIT_DEREF(u
->slice
);
1636 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
1637 return unit_get_realized_cgroup_path(userdata
, mask
);
1640 char *unit_default_cgroup_path(const Unit
*u
) {
1641 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
1646 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1647 return strdup(u
->manager
->cgroup_root
);
1649 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
1650 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
1655 escaped
= cg_escape(u
->id
);
1659 return path_join(empty_to_root(u
->manager
->cgroup_root
), slice
, escaped
);
1662 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
1663 _cleanup_free_
char *p
= NULL
;
1668 if (streq_ptr(u
->cgroup_path
, path
))
1678 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
1683 unit_release_cgroup(u
);
1684 u
->cgroup_path
= TAKE_PTR(p
);
1689 int unit_watch_cgroup(Unit
*u
) {
1690 _cleanup_free_
char *events
= NULL
;
1695 /* Watches the "cgroups.events" attribute of this unit's cgroup for "empty" events, but only if
1696 * cgroupv2 is available. */
1698 if (!u
->cgroup_path
)
1701 if (u
->cgroup_control_inotify_wd
>= 0)
1704 /* Only applies to the unified hierarchy */
1705 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
1707 return log_error_errno(r
, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1711 /* No point in watch the top-level slice, it's never going to run empty. */
1712 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1715 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_control_inotify_wd_unit
, &trivial_hash_ops
);
1719 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events", &events
);
1723 u
->cgroup_control_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
1724 if (u
->cgroup_control_inotify_wd
< 0) {
1726 if (errno
== ENOENT
) /* If the directory is already gone we don't need to track it, so this
1727 * is not an error */
1730 return log_unit_error_errno(u
, errno
, "Failed to add control inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
1733 r
= hashmap_put(u
->manager
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_control_inotify_wd
), u
);
1735 return log_unit_error_errno(u
, r
, "Failed to add control inotify watch descriptor to hash map: %m");
1740 int unit_watch_cgroup_memory(Unit
*u
) {
1741 _cleanup_free_
char *events
= NULL
;
1747 /* Watches the "memory.events" attribute of this unit's cgroup for "oom_kill" events, but only if
1748 * cgroupv2 is available. */
1750 if (!u
->cgroup_path
)
1753 c
= unit_get_cgroup_context(u
);
1757 /* The "memory.events" attribute is only available if the memory controller is on. Let's hence tie
1758 * this to memory accounting, in a way watching for OOM kills is a form of memory accounting after
1760 if (!c
->memory_accounting
)
1763 /* Don't watch inner nodes, as the kernel doesn't report oom_kill events recursively currently, and
1764 * we also don't want to generate a log message for each parent cgroup of a process. */
1765 if (u
->type
== UNIT_SLICE
)
1768 if (u
->cgroup_memory_inotify_wd
>= 0)
1771 /* Only applies to the unified hierarchy */
1772 r
= cg_all_unified();
1774 return log_error_errno(r
, "Failed to determine whether the memory controller is unified: %m");
1778 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_memory_inotify_wd_unit
, &trivial_hash_ops
);
1782 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "memory.events", &events
);
1786 u
->cgroup_memory_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
1787 if (u
->cgroup_memory_inotify_wd
< 0) {
1789 if (errno
== ENOENT
) /* If the directory is already gone we don't need to track it, so this
1790 * is not an error */
1793 return log_unit_error_errno(u
, errno
, "Failed to add memory inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
1796 r
= hashmap_put(u
->manager
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_memory_inotify_wd
), u
);
1798 return log_unit_error_errno(u
, r
, "Failed to add memory inotify watch descriptor to hash map: %m");
1803 int unit_pick_cgroup_path(Unit
*u
) {
1804 _cleanup_free_
char *path
= NULL
;
1812 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1815 path
= unit_default_cgroup_path(u
);
1819 r
= unit_set_cgroup_path(u
, path
);
1821 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
1823 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
1828 static int unit_create_cgroup(
1830 CGroupMask target_mask
,
1831 CGroupMask enable_mask
,
1832 ManagerState state
) {
1839 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1842 /* Figure out our cgroup path */
1843 r
= unit_pick_cgroup_path(u
);
1847 /* First, create our own group */
1848 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
1850 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
1853 /* Start watching it */
1854 (void) unit_watch_cgroup(u
);
1855 (void) unit_watch_cgroup_memory(u
);
1857 /* Preserve enabled controllers in delegated units, adjust others. */
1858 if (created
|| !u
->cgroup_realized
|| !unit_cgroup_delegate(u
)) {
1859 CGroupMask result_mask
= 0;
1861 /* Enable all controllers we need */
1862 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
, &result_mask
);
1864 log_unit_warning_errno(u
, r
, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
1866 /* If we just turned off a controller, this might release the controller for our parent too, let's
1867 * enqueue the parent for re-realization in that case again. */
1868 if (UNIT_ISSET(u
->slice
)) {
1869 CGroupMask turned_off
;
1871 turned_off
= (u
->cgroup_realized
? u
->cgroup_enabled_mask
& ~result_mask
: 0);
1872 if (turned_off
!= 0) {
1875 /* Force the parent to propagate the enable mask to the kernel again, by invalidating
1876 * the controller we just turned off. */
1878 for (parent
= UNIT_DEREF(u
->slice
); parent
; parent
= UNIT_DEREF(parent
->slice
))
1879 unit_invalidate_cgroup(parent
, turned_off
);
1883 /* Remember what's actually enabled now */
1884 u
->cgroup_enabled_mask
= result_mask
;
1887 /* Keep track that this is now realized */
1888 u
->cgroup_realized
= true;
1889 u
->cgroup_realized_mask
= target_mask
;
1891 if (u
->type
!= UNIT_SLICE
&& !unit_cgroup_delegate(u
)) {
1893 /* Then, possibly move things over, but not if
1894 * subgroups may contain processes, which is the case
1895 * for slice and delegation units. */
1896 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
1898 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
1901 /* Set attributes */
1902 cgroup_context_apply(u
, target_mask
, state
);
1903 cgroup_xattr_apply(u
);
1908 static int unit_attach_pid_to_cgroup_via_bus(Unit
*u
, pid_t pid
, const char *suffix_path
) {
1909 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1915 if (MANAGER_IS_SYSTEM(u
->manager
))
1918 if (!u
->manager
->system_bus
)
1921 if (!u
->cgroup_path
)
1924 /* Determine this unit's cgroup path relative to our cgroup root */
1925 pp
= path_startswith(u
->cgroup_path
, u
->manager
->cgroup_root
);
1929 pp
= strjoina("/", pp
, suffix_path
);
1930 path_simplify(pp
, false);
1932 r
= sd_bus_call_method(u
->manager
->system_bus
,
1933 "org.freedesktop.systemd1",
1934 "/org/freedesktop/systemd1",
1935 "org.freedesktop.systemd1.Manager",
1936 "AttachProcessesToUnit",
1939 NULL
/* empty unit name means client's unit, i.e. us */, pp
, 1, (uint32_t) pid
);
1941 return log_unit_debug_errno(u
, r
, "Failed to attach unit process " PID_FMT
" via the bus: %s", pid
, bus_error_message(&error
, r
));
1946 int unit_attach_pids_to_cgroup(Unit
*u
, Set
*pids
, const char *suffix_path
) {
1947 CGroupMask delegated_mask
;
1955 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1958 if (set_isempty(pids
))
1961 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
1962 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
1963 r
= bpf_firewall_load_custom(u
);
1967 r
= unit_realize_cgroup(u
);
1971 if (isempty(suffix_path
))
1974 p
= prefix_roota(u
->cgroup_path
, suffix_path
);
1976 delegated_mask
= unit_get_delegate_mask(u
);
1979 SET_FOREACH(pidp
, pids
, i
) {
1980 pid_t pid
= PTR_TO_PID(pidp
);
1983 /* First, attach the PID to the main cgroup hierarchy */
1984 q
= cg_attach(SYSTEMD_CGROUP_CONTROLLER
, p
, pid
);
1986 log_unit_debug_errno(u
, q
, "Couldn't move process " PID_FMT
" to requested cgroup '%s': %m", pid
, p
);
1988 if (MANAGER_IS_USER(u
->manager
) && IN_SET(q
, -EPERM
, -EACCES
)) {
1991 /* If we are in a user instance, and we can't move the process ourselves due to
1992 * permission problems, let's ask the system instance about it instead. Since it's more
1993 * privileged it might be able to move the process across the leaves of a subtree who's
1994 * top node is not owned by us. */
1996 z
= unit_attach_pid_to_cgroup_via_bus(u
, pid
, suffix_path
);
1998 log_unit_debug_errno(u
, z
, "Couldn't move process " PID_FMT
" to requested cgroup '%s' via the system bus either: %m", pid
, p
);
2000 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
2004 r
= q
; /* Remember first error */
2009 q
= cg_all_unified();
2015 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
2016 * innermost realized one */
2018 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
2019 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
2020 const char *realized
;
2022 if (!(u
->manager
->cgroup_supported
& bit
))
2025 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
2026 if (delegated_mask
& u
->cgroup_realized_mask
& bit
) {
2027 q
= cg_attach(cgroup_controller_to_string(c
), p
, pid
);
2029 continue; /* Success! */
2031 log_unit_debug_errno(u
, q
, "Failed to attach PID " PID_FMT
" to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
2032 pid
, p
, cgroup_controller_to_string(c
));
2035 /* So this controller is either not delegate or realized, or something else weird happened. In
2036 * that case let's attach the PID at least to the closest cgroup up the tree that is
2038 realized
= unit_get_realized_cgroup_path(u
, bit
);
2040 continue; /* Not even realized in the root slice? Then let's not bother */
2042 q
= cg_attach(cgroup_controller_to_string(c
), realized
, pid
);
2044 log_unit_debug_errno(u
, q
, "Failed to attach PID " PID_FMT
" to realized cgroup %s in controller %s, ignoring: %m",
2045 pid
, realized
, cgroup_controller_to_string(c
));
2052 static bool unit_has_mask_realized(
2054 CGroupMask target_mask
,
2055 CGroupMask enable_mask
) {
2059 /* Returns true if this unit is fully realized. We check four things:
2061 * 1. Whether the cgroup was created at all
2062 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
2063 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
2064 * 4. Whether the invalidation mask is currently zero
2066 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
2067 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
2068 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
2069 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
2070 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
2071 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
2072 * simply don't matter. */
2074 return u
->cgroup_realized
&&
2075 ((u
->cgroup_realized_mask
^ target_mask
) & CGROUP_MASK_V1
) == 0 &&
2076 ((u
->cgroup_enabled_mask
^ enable_mask
) & CGROUP_MASK_V2
) == 0 &&
2077 u
->cgroup_invalidated_mask
== 0;
2080 static bool unit_has_mask_disables_realized(
2082 CGroupMask target_mask
,
2083 CGroupMask enable_mask
) {
2087 /* Returns true if all controllers which should be disabled are indeed disabled.
2089 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
2090 * already removed. */
2092 return !u
->cgroup_realized
||
2093 (FLAGS_SET(u
->cgroup_realized_mask
, target_mask
& CGROUP_MASK_V1
) &&
2094 FLAGS_SET(u
->cgroup_enabled_mask
, enable_mask
& CGROUP_MASK_V2
));
2097 static bool unit_has_mask_enables_realized(
2099 CGroupMask target_mask
,
2100 CGroupMask enable_mask
) {
2104 /* Returns true if all controllers which should be enabled are indeed enabled.
2106 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
2107 * we want to add is already added. */
2109 return u
->cgroup_realized
&&
2110 ((u
->cgroup_realized_mask
| target_mask
) & CGROUP_MASK_V1
) == (u
->cgroup_realized_mask
& CGROUP_MASK_V1
) &&
2111 ((u
->cgroup_enabled_mask
| enable_mask
) & CGROUP_MASK_V2
) == (u
->cgroup_enabled_mask
& CGROUP_MASK_V2
);
2114 void unit_add_to_cgroup_realize_queue(Unit
*u
) {
2117 if (u
->in_cgroup_realize_queue
)
2120 LIST_PREPEND(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
2121 u
->in_cgroup_realize_queue
= true;
2124 static void unit_remove_from_cgroup_realize_queue(Unit
*u
) {
2127 if (!u
->in_cgroup_realize_queue
)
2130 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
2131 u
->in_cgroup_realize_queue
= false;
2134 /* Controllers can only be enabled breadth-first, from the root of the
2135 * hierarchy downwards to the unit in question. */
2136 static int unit_realize_cgroup_now_enable(Unit
*u
, ManagerState state
) {
2137 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
2142 /* First go deal with this unit's parent, or we won't be able to enable
2143 * any new controllers at this layer. */
2144 if (UNIT_ISSET(u
->slice
)) {
2145 r
= unit_realize_cgroup_now_enable(UNIT_DEREF(u
->slice
), state
);
2150 target_mask
= unit_get_target_mask(u
);
2151 enable_mask
= unit_get_enable_mask(u
);
2153 /* We can only enable in this direction, don't try to disable anything.
2155 if (unit_has_mask_enables_realized(u
, target_mask
, enable_mask
))
2158 new_target_mask
= u
->cgroup_realized_mask
| target_mask
;
2159 new_enable_mask
= u
->cgroup_enabled_mask
| enable_mask
;
2161 return unit_create_cgroup(u
, new_target_mask
, new_enable_mask
, state
);
2164 /* Controllers can only be disabled depth-first, from the leaves of the
2165 * hierarchy upwards to the unit in question. */
2166 static int unit_realize_cgroup_now_disable(Unit
*u
, ManagerState state
) {
2173 if (u
->type
!= UNIT_SLICE
)
2176 HASHMAP_FOREACH_KEY(v
, m
, u
->dependencies
[UNIT_BEFORE
], i
) {
2177 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
2180 if (UNIT_DEREF(m
->slice
) != u
)
2183 /* The cgroup for this unit might not actually be fully
2184 * realised yet, in which case it isn't holding any controllers
2186 if (!m
->cgroup_path
)
2189 /* We must disable those below us first in order to release the
2191 if (m
->type
== UNIT_SLICE
)
2192 (void) unit_realize_cgroup_now_disable(m
, state
);
2194 target_mask
= unit_get_target_mask(m
);
2195 enable_mask
= unit_get_enable_mask(m
);
2197 /* We can only disable in this direction, don't try to enable
2199 if (unit_has_mask_disables_realized(m
, target_mask
, enable_mask
))
2202 new_target_mask
= m
->cgroup_realized_mask
& target_mask
;
2203 new_enable_mask
= m
->cgroup_enabled_mask
& enable_mask
;
2205 r
= unit_create_cgroup(m
, new_target_mask
, new_enable_mask
, state
);
2213 /* Check if necessary controllers and attributes for a unit are in place.
2215 * - If so, do nothing.
2216 * - If not, create paths, move processes over, and set attributes.
2218 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
2219 * a depth-first way. As such the process looks like this:
2221 * Suppose we have a cgroup hierarchy which looks like this:
2234 * 1. We want to realise cgroup "d" now.
2235 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
2236 * 3. cgroup "k" just started requesting the memory controller.
2238 * To make this work we must do the following in order:
2240 * 1. Disable CPU controller in k, j
2241 * 2. Disable CPU controller in d
2242 * 3. Enable memory controller in root
2243 * 4. Enable memory controller in a
2244 * 5. Enable memory controller in d
2245 * 6. Enable memory controller in k
2247 * Notice that we need to touch j in one direction, but not the other. We also
2248 * don't go beyond d when disabling -- it's up to "a" to get realized if it
2249 * wants to disable further. The basic rules are therefore:
2251 * - If you're disabling something, you need to realise all of the cgroups from
2252 * your recursive descendants to the root. This starts from the leaves.
2253 * - If you're enabling something, you need to realise from the root cgroup
2254 * downwards, but you don't need to iterate your recursive descendants.
2256 * Returns 0 on success and < 0 on failure. */
2257 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
2258 CGroupMask target_mask
, enable_mask
;
2263 unit_remove_from_cgroup_realize_queue(u
);
2265 target_mask
= unit_get_target_mask(u
);
2266 enable_mask
= unit_get_enable_mask(u
);
2268 if (unit_has_mask_realized(u
, target_mask
, enable_mask
))
2271 /* Disable controllers below us, if there are any */
2272 r
= unit_realize_cgroup_now_disable(u
, state
);
2276 /* Enable controllers above us, if there are any */
2277 if (UNIT_ISSET(u
->slice
)) {
2278 r
= unit_realize_cgroup_now_enable(UNIT_DEREF(u
->slice
), state
);
2283 /* Now actually deal with the cgroup we were trying to realise and set attributes */
2284 r
= unit_create_cgroup(u
, target_mask
, enable_mask
, state
);
2288 /* Now, reset the invalidation mask */
2289 u
->cgroup_invalidated_mask
= 0;
2293 unsigned manager_dispatch_cgroup_realize_queue(Manager
*m
) {
2301 state
= manager_state(m
);
2303 while ((i
= m
->cgroup_realize_queue
)) {
2304 assert(i
->in_cgroup_realize_queue
);
2306 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i
))) {
2307 /* Maybe things changed, and the unit is not actually active anymore? */
2308 unit_remove_from_cgroup_realize_queue(i
);
2312 r
= unit_realize_cgroup_now(i
, state
);
2314 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
2322 static void unit_add_siblings_to_cgroup_realize_queue(Unit
*u
) {
2325 /* This adds the siblings of the specified unit and the
2326 * siblings of all parent units to the cgroup queue. (But
2327 * neither the specified unit itself nor the parents.) */
2329 while ((slice
= UNIT_DEREF(u
->slice
))) {
2334 HASHMAP_FOREACH_KEY(v
, m
, u
->dependencies
[UNIT_BEFORE
], i
) {
2335 /* Skip units that have a dependency on the slice
2336 * but aren't actually in it. */
2337 if (UNIT_DEREF(m
->slice
) != slice
)
2340 /* No point in doing cgroup application for units
2341 * without active processes. */
2342 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
2345 /* If the unit doesn't need any new controllers
2346 * and has current ones realized, it doesn't need
2348 if (unit_has_mask_realized(m
,
2349 unit_get_target_mask(m
),
2350 unit_get_enable_mask(m
)))
2353 unit_add_to_cgroup_realize_queue(m
);
2360 int unit_realize_cgroup(Unit
*u
) {
2363 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2366 /* So, here's the deal: when realizing the cgroups for this
2367 * unit, we need to first create all parents, but there's more
2368 * actually: for the weight-based controllers we also need to
2369 * make sure that all our siblings (i.e. units that are in the
2370 * same slice as we are) have cgroups, too. Otherwise, things
2371 * would become very uneven as each of their processes would
2372 * get as much resources as all our group together. This call
2373 * will synchronously create the parent cgroups, but will
2374 * defer work on the siblings to the next event loop
2377 /* Add all sibling slices to the cgroup queue. */
2378 unit_add_siblings_to_cgroup_realize_queue(u
);
2380 /* And realize this one now (and apply the values) */
2381 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
2384 void unit_release_cgroup(Unit
*u
) {
2387 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
2388 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
2390 if (u
->cgroup_path
) {
2391 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
2392 u
->cgroup_path
= mfree(u
->cgroup_path
);
2395 if (u
->cgroup_control_inotify_wd
>= 0) {
2396 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_control_inotify_wd
) < 0)
2397 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup control inotify watch %i for %s, ignoring: %m", u
->cgroup_control_inotify_wd
, u
->id
);
2399 (void) hashmap_remove(u
->manager
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_control_inotify_wd
));
2400 u
->cgroup_control_inotify_wd
= -1;
2403 if (u
->cgroup_memory_inotify_wd
>= 0) {
2404 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_memory_inotify_wd
) < 0)
2405 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup memory inotify watch %i for %s, ignoring: %m", u
->cgroup_memory_inotify_wd
, u
->id
);
2407 (void) hashmap_remove(u
->manager
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_memory_inotify_wd
));
2408 u
->cgroup_memory_inotify_wd
= -1;
2412 void unit_prune_cgroup(Unit
*u
) {
2418 /* Removes the cgroup, if empty and possible, and stops watching it. */
2420 if (!u
->cgroup_path
)
2423 (void) unit_get_cpu_usage(u
, NULL
); /* Cache the last CPU usage value before we destroy the cgroup */
2425 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
2427 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
2429 /* One reason we could have failed here is, that the cgroup still contains a process.
2430 * However, if the cgroup becomes removable at a later time, it might be removed when
2431 * the containing slice is stopped. So even if we failed now, this unit shouldn't assume
2432 * that the cgroup is still realized the next time it is started. Do not return early
2433 * on error, continue cleanup. */
2434 log_unit_full(u
, r
== -EBUSY
? LOG_DEBUG
: LOG_WARNING
, r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
2439 unit_release_cgroup(u
);
2441 u
->cgroup_realized
= false;
2442 u
->cgroup_realized_mask
= 0;
2443 u
->cgroup_enabled_mask
= 0;
2445 u
->bpf_device_control_installed
= bpf_program_unref(u
->bpf_device_control_installed
);
2448 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
2449 _cleanup_fclose_
FILE *f
= NULL
;
2450 pid_t pid
= 0, npid
;
2456 if (!u
->cgroup_path
)
2459 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
2463 while (cg_read_pid(f
, &npid
) > 0) {
2468 if (pid_is_my_child(npid
) == 0)
2472 /* Dang, there's more than one daemonized PID
2473 in this group, so we don't know what process
2474 is the main process. */
2485 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
2486 _cleanup_closedir_
DIR *d
= NULL
;
2487 _cleanup_fclose_
FILE *f
= NULL
;
2493 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
2499 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
2500 r
= unit_watch_pid(u
, pid
, false);
2501 if (r
< 0 && ret
>= 0)
2505 if (r
< 0 && ret
>= 0)
2509 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
2516 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
2517 _cleanup_free_
char *p
= NULL
;
2519 p
= path_join(empty_to_root(path
), fn
);
2525 r
= unit_watch_pids_in_path(u
, p
);
2526 if (r
< 0 && ret
>= 0)
2530 if (r
< 0 && ret
>= 0)
2537 int unit_synthesize_cgroup_empty_event(Unit
*u
) {
2542 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2543 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2544 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2546 if (!u
->cgroup_path
)
2549 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2552 if (r
> 0) /* On unified we have reliable notifications, and don't need this */
2555 if (!set_isempty(u
->pids
))
2558 unit_add_to_cgroup_empty_queue(u
);
2562 int unit_watch_all_pids(Unit
*u
) {
2567 /* Adds all PIDs from our cgroup to the set of PIDs we
2568 * watch. This is a fallback logic for cases where we do not
2569 * get reliable cgroup empty notifications: we try to use
2570 * SIGCHLD as replacement. */
2572 if (!u
->cgroup_path
)
2575 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2578 if (r
> 0) /* On unified we can use proper notifications */
2581 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
2584 static int on_cgroup_empty_event(sd_event_source
*s
, void *userdata
) {
2585 Manager
*m
= userdata
;
2592 u
= m
->cgroup_empty_queue
;
2596 assert(u
->in_cgroup_empty_queue
);
2597 u
->in_cgroup_empty_queue
= false;
2598 LIST_REMOVE(cgroup_empty_queue
, m
->cgroup_empty_queue
, u
);
2600 if (m
->cgroup_empty_queue
) {
2601 /* More stuff queued, let's make sure we remain enabled */
2602 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
2604 log_debug_errno(r
, "Failed to reenable cgroup empty event source, ignoring: %m");
2607 unit_add_to_gc_queue(u
);
2609 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
2610 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
2615 void unit_add_to_cgroup_empty_queue(Unit
*u
) {
2620 /* Note that there are four different ways how cgroup empty events reach us:
2622 * 1. On the unified hierarchy we get an inotify event on the cgroup
2624 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2626 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2628 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2629 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2631 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2632 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2633 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2634 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2635 * case for scope units). */
2637 if (u
->in_cgroup_empty_queue
)
2640 /* Let's verify that the cgroup is really empty */
2641 if (!u
->cgroup_path
)
2643 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
2645 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
2651 LIST_PREPEND(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
2652 u
->in_cgroup_empty_queue
= true;
2654 /* Trigger the defer event */
2655 r
= sd_event_source_set_enabled(u
->manager
->cgroup_empty_event_source
, SD_EVENT_ONESHOT
);
2657 log_debug_errno(r
, "Failed to enable cgroup empty event source: %m");
2660 int unit_check_oom(Unit
*u
) {
2661 _cleanup_free_
char *oom_kill
= NULL
;
2666 if (!u
->cgroup_path
)
2669 r
= cg_get_keyed_attribute("memory", u
->cgroup_path
, "memory.events", STRV_MAKE("oom_kill"), &oom_kill
);
2671 return log_unit_debug_errno(u
, r
, "Failed to read oom_kill field of memory.events cgroup attribute: %m");
2673 r
= safe_atou64(oom_kill
, &c
);
2675 return log_unit_debug_errno(u
, r
, "Failed to parse oom_kill field: %m");
2677 increased
= c
> u
->oom_kill_last
;
2678 u
->oom_kill_last
= c
;
2683 log_struct(LOG_NOTICE
,
2684 "MESSAGE_ID=" SD_MESSAGE_UNIT_OUT_OF_MEMORY_STR
,
2686 LOG_UNIT_INVOCATION_ID(u
),
2687 LOG_UNIT_MESSAGE(u
, "A process of this unit has been killed by the OOM killer."));
2689 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
2690 UNIT_VTABLE(u
)->notify_cgroup_oom(u
);
2695 static int on_cgroup_oom_event(sd_event_source
*s
, void *userdata
) {
2696 Manager
*m
= userdata
;
2703 u
= m
->cgroup_oom_queue
;
2707 assert(u
->in_cgroup_oom_queue
);
2708 u
->in_cgroup_oom_queue
= false;
2709 LIST_REMOVE(cgroup_oom_queue
, m
->cgroup_oom_queue
, u
);
2711 if (m
->cgroup_oom_queue
) {
2712 /* More stuff queued, let's make sure we remain enabled */
2713 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
2715 log_debug_errno(r
, "Failed to reenable cgroup oom event source, ignoring: %m");
2718 (void) unit_check_oom(u
);
2722 static void unit_add_to_cgroup_oom_queue(Unit
*u
) {
2727 if (u
->in_cgroup_oom_queue
)
2729 if (!u
->cgroup_path
)
2732 LIST_PREPEND(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
2733 u
->in_cgroup_oom_queue
= true;
2735 /* Trigger the defer event */
2736 if (!u
->manager
->cgroup_oom_event_source
) {
2737 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2739 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_cgroup_oom_event
, u
->manager
);
2741 log_error_errno(r
, "Failed to create cgroup oom event source: %m");
2745 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_NORMAL
-8);
2747 log_error_errno(r
, "Failed to set priority of cgroup oom event source: %m");
2751 (void) sd_event_source_set_description(s
, "cgroup-oom");
2752 u
->manager
->cgroup_oom_event_source
= TAKE_PTR(s
);
2755 r
= sd_event_source_set_enabled(u
->manager
->cgroup_oom_event_source
, SD_EVENT_ONESHOT
);
2757 log_error_errno(r
, "Failed to enable cgroup oom event source: %m");
2760 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
2761 Manager
*m
= userdata
;
2768 union inotify_event_buffer buffer
;
2769 struct inotify_event
*e
;
2772 l
= read(fd
, &buffer
, sizeof(buffer
));
2774 if (IN_SET(errno
, EINTR
, EAGAIN
))
2777 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
2780 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
2784 /* Queue overflow has no watch descriptor */
2787 if (e
->mask
& IN_IGNORED
)
2788 /* The watch was just removed */
2791 /* Note that inotify might deliver events for a watch even after it was removed,
2792 * because it was queued before the removal. Let's ignore this here safely. */
2794 u
= hashmap_get(m
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
2796 unit_add_to_cgroup_empty_queue(u
);
2798 u
= hashmap_get(m
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
2800 unit_add_to_cgroup_oom_queue(u
);
2805 static int cg_bpf_mask_supported(CGroupMask
*ret
) {
2806 CGroupMask mask
= 0;
2809 /* BPF-based firewall */
2810 r
= bpf_firewall_supported();
2812 mask
|= CGROUP_MASK_BPF_FIREWALL
;
2814 /* BPF-based device access control */
2815 r
= bpf_devices_supported();
2817 mask
|= CGROUP_MASK_BPF_DEVICES
;
2823 int manager_setup_cgroup(Manager
*m
) {
2824 _cleanup_free_
char *path
= NULL
;
2825 const char *scope_path
;
2833 /* 1. Determine hierarchy */
2834 m
->cgroup_root
= mfree(m
->cgroup_root
);
2835 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
2837 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
2839 /* Chop off the init scope, if we are already located in it */
2840 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2842 /* LEGACY: Also chop off the system slice if we are in
2843 * it. This is to support live upgrades from older systemd
2844 * versions where PID 1 was moved there. Also see
2845 * cg_get_root_path(). */
2846 if (!e
&& MANAGER_IS_SYSTEM(m
)) {
2847 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
2849 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
2854 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2855 * easily prepend it everywhere. */
2856 delete_trailing_chars(m
->cgroup_root
, "/");
2859 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
2861 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
2865 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
2867 all_unified
= cg_all_unified();
2868 if (all_unified
< 0)
2869 return log_error_errno(all_unified
, "Couldn't determine whether we are in all unified mode: %m");
2870 if (all_unified
> 0)
2871 log_debug("Unified cgroup hierarchy is located at %s.", path
);
2873 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2875 return log_error_errno(r
, "Failed to determine whether systemd's own controller is in unified mode: %m");
2877 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path
);
2879 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY
". File system hierarchy is at %s.", path
);
2882 /* 3. Allocate cgroup empty defer event source */
2883 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2884 r
= sd_event_add_defer(m
->event
, &m
->cgroup_empty_event_source
, on_cgroup_empty_event
, m
);
2886 return log_error_errno(r
, "Failed to create cgroup empty event source: %m");
2888 /* Schedule cgroup empty checks early, but after having processed service notification messages or
2889 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
2890 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
2891 r
= sd_event_source_set_priority(m
->cgroup_empty_event_source
, SD_EVENT_PRIORITY_NORMAL
-5);
2893 return log_error_errno(r
, "Failed to set priority of cgroup empty event source: %m");
2895 r
= sd_event_source_set_enabled(m
->cgroup_empty_event_source
, SD_EVENT_OFF
);
2897 return log_error_errno(r
, "Failed to disable cgroup empty event source: %m");
2899 (void) sd_event_source_set_description(m
->cgroup_empty_event_source
, "cgroup-empty");
2901 /* 4. Install notifier inotify object, or agent */
2902 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
2904 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
2906 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2907 safe_close(m
->cgroup_inotify_fd
);
2909 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
2910 if (m
->cgroup_inotify_fd
< 0)
2911 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
2913 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
2915 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
2917 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
2918 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
2919 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2920 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_NORMAL
-9);
2922 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
2924 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
2926 } else if (MANAGER_IS_SYSTEM(m
) && manager_owns_host_root_cgroup(m
) && !MANAGER_IS_TEST_RUN(m
)) {
2928 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2929 * since it does not generate events when control groups with children run empty. */
2931 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
2933 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
2935 log_debug("Installed release agent.");
2937 log_debug("Release agent already installed.");
2940 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2941 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2942 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2944 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2945 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2947 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
2949 /* 6. And pin it, so that it cannot be unmounted */
2950 safe_close(m
->pin_cgroupfs_fd
);
2951 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
2952 if (m
->pin_cgroupfs_fd
< 0)
2953 return log_error_errno(errno
, "Failed to open pin file: %m");
2955 } else if (!MANAGER_IS_TEST_RUN(m
))
2956 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
2958 /* 7. Always enable hierarchical support if it exists... */
2959 if (!all_unified
&& !MANAGER_IS_TEST_RUN(m
))
2960 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2962 /* 8. Figure out which controllers are supported */
2963 r
= cg_mask_supported(&m
->cgroup_supported
);
2965 return log_error_errno(r
, "Failed to determine supported controllers: %m");
2967 /* 9. Figure out which bpf-based pseudo-controllers are supported */
2968 r
= cg_bpf_mask_supported(&mask
);
2970 return log_error_errno(r
, "Failed to determine supported bpf-based pseudo-controllers: %m");
2971 m
->cgroup_supported
|= mask
;
2973 /* 10. Log which controllers are supported */
2974 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
2975 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& CGROUP_CONTROLLER_TO_MASK(c
)));
2980 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
2983 /* We can't really delete the group, since we are in it. But
2985 if (delete && m
->cgroup_root
&& m
->test_run_flags
!= MANAGER_TEST_RUN_MINIMAL
)
2986 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
2988 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2990 m
->cgroup_control_inotify_wd_unit
= hashmap_free(m
->cgroup_control_inotify_wd_unit
);
2991 m
->cgroup_memory_inotify_wd_unit
= hashmap_free(m
->cgroup_memory_inotify_wd_unit
);
2993 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2994 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
2996 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
2998 m
->cgroup_root
= mfree(m
->cgroup_root
);
3001 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
3008 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
3012 p
= strdupa(cgroup
);
3016 e
= strrchr(p
, '/');
3018 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
3022 u
= hashmap_get(m
->cgroup_unit
, p
);
3028 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
3029 _cleanup_free_
char *cgroup
= NULL
;
3033 if (!pid_is_valid(pid
))
3036 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
) < 0)
3039 return manager_get_unit_by_cgroup(m
, cgroup
);
3042 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
3047 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
3048 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
3049 * relevant one as children of the process will be assigned to that one, too, before all else. */
3051 if (!pid_is_valid(pid
))
3054 if (pid
== getpid_cached())
3055 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
3057 u
= manager_get_unit_by_pid_cgroup(m
, pid
);
3061 u
= hashmap_get(m
->watch_pids
, PID_TO_PTR(pid
));
3065 array
= hashmap_get(m
->watch_pids
, PID_TO_PTR(-pid
));
3072 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
3078 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
3079 * or from the --system instance */
3081 log_debug("Got cgroup empty notification for: %s", cgroup
);
3083 u
= manager_get_unit_by_cgroup(m
, cgroup
);
3087 unit_add_to_cgroup_empty_queue(u
);
3091 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
3092 _cleanup_free_
char *v
= NULL
;
3098 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
3101 if (!u
->cgroup_path
)
3104 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3105 if (unit_has_host_root_cgroup(u
))
3106 return procfs_memory_get_used(ret
);
3108 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
3111 r
= cg_all_unified();
3115 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
3117 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
3123 return safe_atou64(v
, ret
);
3126 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
3127 _cleanup_free_
char *v
= NULL
;
3133 if (!UNIT_CGROUP_BOOL(u
, tasks_accounting
))
3136 if (!u
->cgroup_path
)
3139 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3140 if (unit_has_host_root_cgroup(u
))
3141 return procfs_tasks_get_current(ret
);
3143 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
3146 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
3152 return safe_atou64(v
, ret
);
3155 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
3156 _cleanup_free_
char *v
= NULL
;
3163 if (!u
->cgroup_path
)
3166 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3167 if (unit_has_host_root_cgroup(u
))
3168 return procfs_cpu_get_usage(ret
);
3170 /* Requisite controllers for CPU accounting are not enabled */
3171 if ((get_cpu_accounting_mask() & ~u
->cgroup_realized_mask
) != 0)
3174 r
= cg_all_unified();
3178 _cleanup_free_
char *val
= NULL
;
3181 r
= cg_get_keyed_attribute("cpu", u
->cgroup_path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
3182 if (IN_SET(r
, -ENOENT
, -ENXIO
))
3187 r
= safe_atou64(val
, &us
);
3191 ns
= us
* NSEC_PER_USEC
;
3193 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
3199 r
= safe_atou64(v
, &ns
);
3208 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
3214 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
3215 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
3216 * call this function with a NULL return value. */
3218 if (!UNIT_CGROUP_BOOL(u
, cpu_accounting
))
3221 r
= unit_get_cpu_usage_raw(u
, &ns
);
3222 if (r
== -ENODATA
&& u
->cpu_usage_last
!= NSEC_INFINITY
) {
3223 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
3227 *ret
= u
->cpu_usage_last
;
3233 if (ns
> u
->cpu_usage_base
)
3234 ns
-= u
->cpu_usage_base
;
3238 u
->cpu_usage_last
= ns
;
3245 int unit_get_ip_accounting(
3247 CGroupIPAccountingMetric metric
,
3254 assert(metric
>= 0);
3255 assert(metric
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
);
3258 if (!UNIT_CGROUP_BOOL(u
, ip_accounting
))
3261 fd
= IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_INGRESS_PACKETS
) ?
3262 u
->ip_accounting_ingress_map_fd
:
3263 u
->ip_accounting_egress_map_fd
;
3267 if (IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
3268 r
= bpf_firewall_read_accounting(fd
, &value
, NULL
);
3270 r
= bpf_firewall_read_accounting(fd
, NULL
, &value
);
3274 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
3275 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
3276 * ip_accounting_extra[] field, and add them in here transparently. */
3278 *ret
= value
+ u
->ip_accounting_extra
[metric
];
3283 static int unit_get_io_accounting_raw(Unit
*u
, uint64_t ret
[static _CGROUP_IO_ACCOUNTING_METRIC_MAX
]) {
3284 static const char *const field_names
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
3285 [CGROUP_IO_READ_BYTES
] = "rbytes=",
3286 [CGROUP_IO_WRITE_BYTES
] = "wbytes=",
3287 [CGROUP_IO_READ_OPERATIONS
] = "rios=",
3288 [CGROUP_IO_WRITE_OPERATIONS
] = "wios=",
3290 uint64_t acc
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {};
3291 _cleanup_free_
char *path
= NULL
;
3292 _cleanup_fclose_
FILE *f
= NULL
;
3297 if (!u
->cgroup_path
)
3300 if (unit_has_host_root_cgroup(u
))
3301 return -ENODATA
; /* TODO: return useful data for the top-level cgroup */
3303 r
= cg_all_unified();
3306 if (r
== 0) /* TODO: support cgroupv1 */
3309 if (!FLAGS_SET(u
->cgroup_realized_mask
, CGROUP_MASK_IO
))
3312 r
= cg_get_path("io", u
->cgroup_path
, "io.stat", &path
);
3316 f
= fopen(path
, "re");
3321 _cleanup_free_
char *line
= NULL
;
3324 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3331 p
+= strcspn(p
, WHITESPACE
); /* Skip over device major/minor */
3332 p
+= strspn(p
, WHITESPACE
); /* Skip over following whitespace */
3335 _cleanup_free_
char *word
= NULL
;
3337 r
= extract_first_word(&p
, &word
, NULL
, EXTRACT_RETAIN_ESCAPE
);
3343 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++) {
3346 x
= startswith(word
, field_names
[i
]);
3350 r
= safe_atou64(x
, &w
);
3354 /* Sum up the stats of all devices */
3362 memcpy(ret
, acc
, sizeof(acc
));
3366 int unit_get_io_accounting(
3368 CGroupIOAccountingMetric metric
,
3372 uint64_t raw
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
];
3375 /* Retrieve an IO account parameter. This will subtract the counter when the unit was started. */
3377 if (!UNIT_CGROUP_BOOL(u
, io_accounting
))
3380 if (allow_cache
&& u
->io_accounting_last
[metric
] != UINT64_MAX
)
3383 r
= unit_get_io_accounting_raw(u
, raw
);
3384 if (r
== -ENODATA
&& u
->io_accounting_last
[metric
] != UINT64_MAX
)
3389 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++) {
3390 /* Saturated subtraction */
3391 if (raw
[i
] > u
->io_accounting_base
[i
])
3392 u
->io_accounting_last
[i
] = raw
[i
] - u
->io_accounting_base
[i
];
3394 u
->io_accounting_last
[i
] = 0;
3399 *ret
= u
->io_accounting_last
[metric
];
3404 int unit_reset_cpu_accounting(Unit
*u
) {
3409 u
->cpu_usage_last
= NSEC_INFINITY
;
3411 r
= unit_get_cpu_usage_raw(u
, &u
->cpu_usage_base
);
3413 u
->cpu_usage_base
= 0;
3420 int unit_reset_ip_accounting(Unit
*u
) {
3425 if (u
->ip_accounting_ingress_map_fd
>= 0)
3426 r
= bpf_firewall_reset_accounting(u
->ip_accounting_ingress_map_fd
);
3428 if (u
->ip_accounting_egress_map_fd
>= 0)
3429 q
= bpf_firewall_reset_accounting(u
->ip_accounting_egress_map_fd
);
3431 zero(u
->ip_accounting_extra
);
3433 return r
< 0 ? r
: q
;
3436 int unit_reset_io_accounting(Unit
*u
) {
3441 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++)
3442 u
->io_accounting_last
[i
] = UINT64_MAX
;
3444 r
= unit_get_io_accounting_raw(u
, u
->io_accounting_base
);
3446 zero(u
->io_accounting_base
);
3453 int unit_reset_accounting(Unit
*u
) {
3458 r
= unit_reset_cpu_accounting(u
);
3459 q
= unit_reset_io_accounting(u
);
3460 v
= unit_reset_ip_accounting(u
);
3462 return r
< 0 ? r
: q
< 0 ? q
: v
;
3465 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
3468 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3474 /* always invalidate compat pairs together */
3475 if (m
& (CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
))
3476 m
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
3478 if (m
& (CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
))
3479 m
|= CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
;
3481 if (FLAGS_SET(u
->cgroup_invalidated_mask
, m
)) /* NOP? */
3484 u
->cgroup_invalidated_mask
|= m
;
3485 unit_add_to_cgroup_realize_queue(u
);
3488 void unit_invalidate_cgroup_bpf(Unit
*u
) {
3491 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3494 if (u
->cgroup_invalidated_mask
& CGROUP_MASK_BPF_FIREWALL
) /* NOP? */
3497 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
3498 unit_add_to_cgroup_realize_queue(u
);
3500 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
3501 * list of our children includes our own. */
3502 if (u
->type
== UNIT_SLICE
) {
3507 HASHMAP_FOREACH_KEY(v
, member
, u
->dependencies
[UNIT_BEFORE
], i
) {
3508 if (UNIT_DEREF(member
->slice
) == u
)
3509 unit_invalidate_cgroup_bpf(member
);
3514 bool unit_cgroup_delegate(Unit
*u
) {
3519 if (!UNIT_VTABLE(u
)->can_delegate
)
3522 c
= unit_get_cgroup_context(u
);
3529 void manager_invalidate_startup_units(Manager
*m
) {
3535 SET_FOREACH(u
, m
->startup_units
, i
)
3536 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_IO
|CGROUP_MASK_BLKIO
);
3539 static int unit_get_nice(Unit
*u
) {
3542 ec
= unit_get_exec_context(u
);
3543 return ec
? ec
->nice
: 0;
3546 static uint64_t unit_get_cpu_weight(Unit
*u
) {
3547 ManagerState state
= manager_state(u
->manager
);
3550 cc
= unit_get_cgroup_context(u
);
3551 return cc
? cgroup_context_cpu_weight(cc
, state
) : CGROUP_WEIGHT_DEFAULT
;
3554 int compare_job_priority(const void *a
, const void *b
) {
3555 const Job
*x
= a
, *y
= b
;
3557 uint64_t weight_x
, weight_y
;
3560 if ((ret
= CMP(x
->unit
->type
, y
->unit
->type
)) != 0)
3563 weight_x
= unit_get_cpu_weight(x
->unit
);
3564 weight_y
= unit_get_cpu_weight(y
->unit
);
3566 if ((ret
= CMP(weight_x
, weight_y
)) != 0)
3569 nice_x
= unit_get_nice(x
->unit
);
3570 nice_y
= unit_get_nice(y
->unit
);
3572 if ((ret
= CMP(nice_x
, nice_y
)) != 0)
3575 return strcmp(x
->unit
->id
, y
->unit
->id
);
3578 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
3579 [CGROUP_DEVICE_POLICY_AUTO
] = "auto",
3580 [CGROUP_DEVICE_POLICY_CLOSED
] = "closed",
3581 [CGROUP_DEVICE_POLICY_STRICT
] = "strict",
3584 int unit_get_cpuset(Unit
*u
, CPUSet
*cpus
, const char *name
) {
3585 _cleanup_free_
char *v
= NULL
;
3591 if (!u
->cgroup_path
)
3594 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUSET
) == 0)
3597 r
= cg_all_unified();
3603 r
= cg_get_attribute("cpuset", u
->cgroup_path
, name
, &v
);
3609 return parse_cpu_set_full(v
, cpus
, false, NULL
, NULL
, 0, NULL
);
3612 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);