1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
5 #include "sd-messages.h"
8 #include "alloc-util.h"
9 #include "blockdev-util.h"
10 #include "bpf-devices.h"
11 #include "bpf-firewall.h"
12 #include "bpf-foreign.h"
13 #include "bpf-restrict-ifaces.h"
14 #include "bpf-socket-bind.h"
15 #include "btrfs-util.h"
16 #include "bus-error.h"
17 #include "bus-locator.h"
18 #include "cgroup-setup.h"
19 #include "cgroup-util.h"
21 #include "devnum-util.h"
24 #include "firewall-util.h"
25 #include "in-addr-prefix-util.h"
26 #include "inotify-util.h"
28 #include "ip-protocol-list.h"
29 #include "limits-util.h"
30 #include "nulstr-util.h"
31 #include "parse-util.h"
32 #include "path-util.h"
33 #include "percent-util.h"
34 #include "process-util.h"
35 #include "procfs-util.h"
37 #include "serialize.h"
39 #include "stdio-util.h"
40 #include "string-table.h"
41 #include "string-util.h"
45 #include "bpf-dlopen.h"
47 #include "bpf/restrict_fs/restrict-fs-skel.h"
50 #define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
52 /* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
53 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
54 * out specific attributes from us. */
55 #define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
57 uint64_t cgroup_tasks_max_resolve(const CGroupTasksMax
*tasks_max
) {
58 if (tasks_max
->scale
== 0)
59 return tasks_max
->value
;
61 return system_tasks_max_scale(tasks_max
->value
, tasks_max
->scale
);
64 bool manager_owns_host_root_cgroup(Manager
*m
) {
67 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
68 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
69 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
70 * we run in any kind of container virtualization. */
72 if (MANAGER_IS_USER(m
))
75 if (detect_container() > 0)
78 return empty_or_root(m
->cgroup_root
);
81 bool unit_has_startup_cgroup_constraints(Unit
*u
) {
84 /* Returns true if this unit has any directives which apply during
85 * startup/shutdown phases. */
89 c
= unit_get_cgroup_context(u
);
93 return c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
94 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
95 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
96 c
->startup_cpuset_cpus
.set
||
97 c
->startup_cpuset_mems
.set
||
98 c
->startup_memory_high_set
||
99 c
->startup_memory_max_set
||
100 c
->startup_memory_swap_max_set
||
101 c
->startup_memory_zswap_max_set
||
102 c
->startup_memory_low_set
;
105 bool unit_has_host_root_cgroup(Unit
*u
) {
108 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
109 * the manager manages the root cgroup. */
111 if (!manager_owns_host_root_cgroup(u
->manager
))
114 return unit_has_name(u
, SPECIAL_ROOT_SLICE
);
117 static int set_attribute_and_warn(Unit
*u
, const char *controller
, const char *attribute
, const char *value
) {
122 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
123 if (!crt
|| !crt
->cgroup_path
)
126 r
= cg_set_attribute(controller
, crt
->cgroup_path
, attribute
, value
);
128 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
129 strna(attribute
), empty_to_root(crt
->cgroup_path
), (int) strcspn(value
, NEWLINE
), value
);
134 static void cgroup_compat_warn(void) {
135 static bool cgroup_compat_warned
= false;
137 if (cgroup_compat_warned
)
140 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
141 "See cgroup-compat debug messages for details.");
143 cgroup_compat_warned
= true;
146 #define log_cgroup_compat(unit, fmt, ...) do { \
147 cgroup_compat_warn(); \
148 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
151 void cgroup_context_init(CGroupContext
*c
) {
154 /* Initialize everything to the kernel defaults. When initializing a bool member to 'true', make
155 * sure to serialize in execute-serialize.c using serialize_bool() instead of
156 * serialize_bool_elide(), as sd-executor will initialize here to 'true', but serialize_bool_elide()
157 * skips serialization if the value is 'false' (as that's the common default), so if the value at
158 * runtime is zero it would be lost after deserialization. Same when initializing uint64_t and other
159 * values, update/add a conditional serialization check. This is to minimize the amount of
160 * serialized data that is sent to the sd-executor, so that there is less work to do on the default
163 *c
= (CGroupContext
) {
164 .cpu_weight
= CGROUP_WEIGHT_INVALID
,
165 .startup_cpu_weight
= CGROUP_WEIGHT_INVALID
,
166 .cpu_quota_per_sec_usec
= USEC_INFINITY
,
167 .cpu_quota_period_usec
= USEC_INFINITY
,
169 .cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
170 .startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
172 .memory_high
= CGROUP_LIMIT_MAX
,
173 .startup_memory_high
= CGROUP_LIMIT_MAX
,
174 .memory_max
= CGROUP_LIMIT_MAX
,
175 .startup_memory_max
= CGROUP_LIMIT_MAX
,
176 .memory_swap_max
= CGROUP_LIMIT_MAX
,
177 .startup_memory_swap_max
= CGROUP_LIMIT_MAX
,
178 .memory_zswap_max
= CGROUP_LIMIT_MAX
,
179 .startup_memory_zswap_max
= CGROUP_LIMIT_MAX
,
181 .memory_limit
= CGROUP_LIMIT_MAX
,
183 .memory_zswap_writeback
= true,
185 .io_weight
= CGROUP_WEIGHT_INVALID
,
186 .startup_io_weight
= CGROUP_WEIGHT_INVALID
,
188 .blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
189 .startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
191 .tasks_max
= CGROUP_TASKS_MAX_UNSET
,
193 .moom_swap
= MANAGED_OOM_AUTO
,
194 .moom_mem_pressure
= MANAGED_OOM_AUTO
,
195 .moom_preference
= MANAGED_OOM_PREFERENCE_NONE
,
197 .memory_pressure_watch
= _CGROUP_PRESSURE_WATCH_INVALID
,
198 .memory_pressure_threshold_usec
= USEC_INFINITY
,
202 int cgroup_context_add_io_device_weight_dup(CGroupContext
*c
, const CGroupIODeviceWeight
*w
) {
203 _cleanup_free_ CGroupIODeviceWeight
*n
= NULL
;
208 n
= new(CGroupIODeviceWeight
, 1);
212 *n
= (CGroupIODeviceWeight
) {
213 .path
= strdup(w
->path
),
219 LIST_PREPEND(device_weights
, c
->io_device_weights
, TAKE_PTR(n
));
223 int cgroup_context_add_io_device_limit_dup(CGroupContext
*c
, const CGroupIODeviceLimit
*l
) {
224 _cleanup_free_ CGroupIODeviceLimit
*n
= NULL
;
229 n
= new0(CGroupIODeviceLimit
, 1);
233 n
->path
= strdup(l
->path
);
237 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
238 n
->limits
[type
] = l
->limits
[type
];
240 LIST_PREPEND(device_limits
, c
->io_device_limits
, TAKE_PTR(n
));
244 int cgroup_context_add_io_device_latency_dup(CGroupContext
*c
, const CGroupIODeviceLatency
*l
) {
245 _cleanup_free_ CGroupIODeviceLatency
*n
= NULL
;
250 n
= new(CGroupIODeviceLatency
, 1);
254 *n
= (CGroupIODeviceLatency
) {
255 .path
= strdup(l
->path
),
256 .target_usec
= l
->target_usec
,
261 LIST_PREPEND(device_latencies
, c
->io_device_latencies
, TAKE_PTR(n
));
265 int cgroup_context_add_block_io_device_weight_dup(CGroupContext
*c
, const CGroupBlockIODeviceWeight
*w
) {
266 _cleanup_free_ CGroupBlockIODeviceWeight
*n
= NULL
;
271 n
= new(CGroupBlockIODeviceWeight
, 1);
275 *n
= (CGroupBlockIODeviceWeight
) {
276 .path
= strdup(w
->path
),
282 LIST_PREPEND(device_weights
, c
->blockio_device_weights
, TAKE_PTR(n
));
286 int cgroup_context_add_block_io_device_bandwidth_dup(CGroupContext
*c
, const CGroupBlockIODeviceBandwidth
*b
) {
287 _cleanup_free_ CGroupBlockIODeviceBandwidth
*n
= NULL
;
292 n
= new(CGroupBlockIODeviceBandwidth
, 1);
296 *n
= (CGroupBlockIODeviceBandwidth
) {
301 LIST_PREPEND(device_bandwidths
, c
->blockio_device_bandwidths
, TAKE_PTR(n
));
305 int cgroup_context_add_device_allow_dup(CGroupContext
*c
, const CGroupDeviceAllow
*a
) {
306 _cleanup_free_ CGroupDeviceAllow
*n
= NULL
;
311 n
= new(CGroupDeviceAllow
, 1);
315 *n
= (CGroupDeviceAllow
) {
316 .path
= strdup(a
->path
),
317 .permissions
= a
->permissions
,
322 LIST_PREPEND(device_allow
, c
->device_allow
, TAKE_PTR(n
));
326 static int cgroup_context_add_socket_bind_item_dup(CGroupContext
*c
, const CGroupSocketBindItem
*i
, CGroupSocketBindItem
*h
) {
327 _cleanup_free_ CGroupSocketBindItem
*n
= NULL
;
332 n
= new(CGroupSocketBindItem
, 1);
336 *n
= (CGroupSocketBindItem
) {
337 .address_family
= i
->address_family
,
338 .ip_protocol
= i
->ip_protocol
,
339 .nr_ports
= i
->nr_ports
,
340 .port_min
= i
->port_min
,
343 LIST_PREPEND(socket_bind_items
, h
, TAKE_PTR(n
));
347 int cgroup_context_add_socket_bind_item_allow_dup(CGroupContext
*c
, const CGroupSocketBindItem
*i
) {
348 return cgroup_context_add_socket_bind_item_dup(c
, i
, c
->socket_bind_allow
);
351 int cgroup_context_add_socket_bind_item_deny_dup(CGroupContext
*c
, const CGroupSocketBindItem
*i
) {
352 return cgroup_context_add_socket_bind_item_dup(c
, i
, c
->socket_bind_deny
);
355 int cgroup_context_copy(CGroupContext
*dst
, const CGroupContext
*src
) {
356 struct in_addr_prefix
*i
;
363 dst
->cpu_accounting
= src
->cpu_accounting
;
364 dst
->io_accounting
= src
->io_accounting
;
365 dst
->blockio_accounting
= src
->blockio_accounting
;
366 dst
->memory_accounting
= src
->memory_accounting
;
367 dst
->tasks_accounting
= src
->tasks_accounting
;
368 dst
->ip_accounting
= src
->ip_accounting
;
370 dst
->memory_oom_group
= src
->memory_oom_group
;
372 dst
->cpu_weight
= src
->cpu_weight
;
373 dst
->startup_cpu_weight
= src
->startup_cpu_weight
;
374 dst
->cpu_quota_per_sec_usec
= src
->cpu_quota_per_sec_usec
;
375 dst
->cpu_quota_period_usec
= src
->cpu_quota_period_usec
;
377 dst
->cpuset_cpus
= src
->cpuset_cpus
;
378 dst
->startup_cpuset_cpus
= src
->startup_cpuset_cpus
;
379 dst
->cpuset_mems
= src
->cpuset_mems
;
380 dst
->startup_cpuset_mems
= src
->startup_cpuset_mems
;
382 dst
->io_weight
= src
->io_weight
;
383 dst
->startup_io_weight
= src
->startup_io_weight
;
385 LIST_FOREACH_BACKWARDS(device_weights
, w
, LIST_FIND_TAIL(device_weights
, src
->io_device_weights
)) {
386 r
= cgroup_context_add_io_device_weight_dup(dst
, w
);
391 LIST_FOREACH_BACKWARDS(device_limits
, l
, LIST_FIND_TAIL(device_limits
, src
->io_device_limits
)) {
392 r
= cgroup_context_add_io_device_limit_dup(dst
, l
);
397 LIST_FOREACH_BACKWARDS(device_latencies
, l
, LIST_FIND_TAIL(device_latencies
, src
->io_device_latencies
)) {
398 r
= cgroup_context_add_io_device_latency_dup(dst
, l
);
403 dst
->default_memory_min
= src
->default_memory_min
;
404 dst
->default_memory_low
= src
->default_memory_low
;
405 dst
->default_startup_memory_low
= src
->default_startup_memory_low
;
406 dst
->memory_min
= src
->memory_min
;
407 dst
->memory_low
= src
->memory_low
;
408 dst
->startup_memory_low
= src
->startup_memory_low
;
409 dst
->memory_high
= src
->memory_high
;
410 dst
->startup_memory_high
= src
->startup_memory_high
;
411 dst
->memory_max
= src
->memory_max
;
412 dst
->startup_memory_max
= src
->startup_memory_max
;
413 dst
->memory_swap_max
= src
->memory_swap_max
;
414 dst
->startup_memory_swap_max
= src
->startup_memory_swap_max
;
415 dst
->memory_zswap_max
= src
->memory_zswap_max
;
416 dst
->startup_memory_zswap_max
= src
->startup_memory_zswap_max
;
418 dst
->default_memory_min_set
= src
->default_memory_min_set
;
419 dst
->default_memory_low_set
= src
->default_memory_low_set
;
420 dst
->default_startup_memory_low_set
= src
->default_startup_memory_low_set
;
421 dst
->memory_min_set
= src
->memory_min_set
;
422 dst
->memory_low_set
= src
->memory_low_set
;
423 dst
->startup_memory_low_set
= src
->startup_memory_low_set
;
424 dst
->startup_memory_high_set
= src
->startup_memory_high_set
;
425 dst
->startup_memory_max_set
= src
->startup_memory_max_set
;
426 dst
->startup_memory_swap_max_set
= src
->startup_memory_swap_max_set
;
427 dst
->startup_memory_zswap_max_set
= src
->startup_memory_zswap_max_set
;
428 dst
->memory_zswap_writeback
= src
->memory_zswap_writeback
;
430 SET_FOREACH(i
, src
->ip_address_allow
) {
431 r
= in_addr_prefix_add(&dst
->ip_address_allow
, i
);
436 SET_FOREACH(i
, src
->ip_address_deny
) {
437 r
= in_addr_prefix_add(&dst
->ip_address_deny
, i
);
442 dst
->ip_address_allow_reduced
= src
->ip_address_allow_reduced
;
443 dst
->ip_address_deny_reduced
= src
->ip_address_deny_reduced
;
445 if (!strv_isempty(src
->ip_filters_ingress
)) {
446 dst
->ip_filters_ingress
= strv_copy(src
->ip_filters_ingress
);
447 if (!dst
->ip_filters_ingress
)
451 if (!strv_isempty(src
->ip_filters_egress
)) {
452 dst
->ip_filters_egress
= strv_copy(src
->ip_filters_egress
);
453 if (!dst
->ip_filters_egress
)
457 LIST_FOREACH_BACKWARDS(programs
, l
, LIST_FIND_TAIL(programs
, src
->bpf_foreign_programs
)) {
458 r
= cgroup_context_add_bpf_foreign_program_dup(dst
, l
);
463 SET_FOREACH(iface
, src
->restrict_network_interfaces
) {
464 r
= set_put_strdup(&dst
->restrict_network_interfaces
, iface
);
468 dst
->restrict_network_interfaces_is_allow_list
= src
->restrict_network_interfaces_is_allow_list
;
470 dst
->cpu_shares
= src
->cpu_shares
;
471 dst
->startup_cpu_shares
= src
->startup_cpu_shares
;
473 dst
->blockio_weight
= src
->blockio_weight
;
474 dst
->startup_blockio_weight
= src
->startup_blockio_weight
;
476 LIST_FOREACH_BACKWARDS(device_weights
, l
, LIST_FIND_TAIL(device_weights
, src
->blockio_device_weights
)) {
477 r
= cgroup_context_add_block_io_device_weight_dup(dst
, l
);
482 LIST_FOREACH_BACKWARDS(device_bandwidths
, l
, LIST_FIND_TAIL(device_bandwidths
, src
->blockio_device_bandwidths
)) {
483 r
= cgroup_context_add_block_io_device_bandwidth_dup(dst
, l
);
488 dst
->memory_limit
= src
->memory_limit
;
490 dst
->device_policy
= src
->device_policy
;
491 LIST_FOREACH_BACKWARDS(device_allow
, l
, LIST_FIND_TAIL(device_allow
, src
->device_allow
)) {
492 r
= cgroup_context_add_device_allow_dup(dst
, l
);
497 LIST_FOREACH_BACKWARDS(socket_bind_items
, l
, LIST_FIND_TAIL(socket_bind_items
, src
->socket_bind_allow
)) {
498 r
= cgroup_context_add_socket_bind_item_allow_dup(dst
, l
);
504 LIST_FOREACH_BACKWARDS(socket_bind_items
, l
, LIST_FIND_TAIL(socket_bind_items
, src
->socket_bind_deny
)) {
505 r
= cgroup_context_add_socket_bind_item_deny_dup(dst
, l
);
510 dst
->tasks_max
= src
->tasks_max
;
515 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
519 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
524 void cgroup_context_free_io_device_weight(CGroupContext
*c
, CGroupIODeviceWeight
*w
) {
528 LIST_REMOVE(device_weights
, c
->io_device_weights
, w
);
533 void cgroup_context_free_io_device_latency(CGroupContext
*c
, CGroupIODeviceLatency
*l
) {
537 LIST_REMOVE(device_latencies
, c
->io_device_latencies
, l
);
542 void cgroup_context_free_io_device_limit(CGroupContext
*c
, CGroupIODeviceLimit
*l
) {
546 LIST_REMOVE(device_limits
, c
->io_device_limits
, l
);
551 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
555 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
560 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
564 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
569 void cgroup_context_remove_bpf_foreign_program(CGroupContext
*c
, CGroupBPFForeignProgram
*p
) {
573 LIST_REMOVE(programs
, c
->bpf_foreign_programs
, p
);
578 void cgroup_context_remove_socket_bind(CGroupSocketBindItem
**head
) {
581 LIST_CLEAR(socket_bind_items
, *head
, free
);
584 void cgroup_context_done(CGroupContext
*c
) {
587 while (c
->io_device_weights
)
588 cgroup_context_free_io_device_weight(c
, c
->io_device_weights
);
590 while (c
->io_device_latencies
)
591 cgroup_context_free_io_device_latency(c
, c
->io_device_latencies
);
593 while (c
->io_device_limits
)
594 cgroup_context_free_io_device_limit(c
, c
->io_device_limits
);
596 while (c
->blockio_device_weights
)
597 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
599 while (c
->blockio_device_bandwidths
)
600 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
602 while (c
->device_allow
)
603 cgroup_context_free_device_allow(c
, c
->device_allow
);
605 cgroup_context_remove_socket_bind(&c
->socket_bind_allow
);
606 cgroup_context_remove_socket_bind(&c
->socket_bind_deny
);
608 c
->ip_address_allow
= set_free(c
->ip_address_allow
);
609 c
->ip_address_deny
= set_free(c
->ip_address_deny
);
611 c
->ip_filters_ingress
= strv_free(c
->ip_filters_ingress
);
612 c
->ip_filters_egress
= strv_free(c
->ip_filters_egress
);
614 while (c
->bpf_foreign_programs
)
615 cgroup_context_remove_bpf_foreign_program(c
, c
->bpf_foreign_programs
);
617 c
->restrict_network_interfaces
= set_free_free(c
->restrict_network_interfaces
);
619 cpu_set_reset(&c
->cpuset_cpus
);
620 cpu_set_reset(&c
->startup_cpuset_cpus
);
621 cpu_set_reset(&c
->cpuset_mems
);
622 cpu_set_reset(&c
->startup_cpuset_mems
);
624 c
->delegate_subgroup
= mfree(c
->delegate_subgroup
);
626 nft_set_context_clear(&c
->nft_set_context
);
629 static int unit_get_kernel_memory_limit(Unit
*u
, const char *file
, uint64_t *ret
) {
632 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
633 if (!crt
|| !crt
->cgroup_path
)
636 return cg_get_attribute_as_uint64("memory", crt
->cgroup_path
, file
, ret
);
639 static int unit_compare_memory_limit(Unit
*u
, const char *property_name
, uint64_t *ret_unit_value
, uint64_t *ret_kernel_value
) {
646 /* Compare kernel memcg configuration against our internal systemd state. Unsupported (and will
647 * return -ENODATA) on cgroup v1.
652 * 0: If the kernel memory setting doesn't match our configuration.
653 * >0: If the kernel memory setting matches our configuration.
655 * The following values are only guaranteed to be populated on return >=0:
657 * - ret_unit_value will contain our internal expected value for the unit, page-aligned.
658 * - ret_kernel_value will contain the actual value presented by the kernel. */
662 r
= cg_all_unified();
664 return log_debug_errno(r
, "Failed to determine cgroup hierarchy version: %m");
666 /* Unsupported on v1.
668 * We don't return ENOENT, since that could actually mask a genuine problem where somebody else has
669 * silently masked the controller. */
673 /* The root slice doesn't have any controller files, so we can't compare anything. */
674 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
677 /* It's possible to have MemoryFoo set without systemd wanting to have the memory controller enabled,
678 * for example, in the case of DisableControllers= or cgroup_disable on the kernel command line. To
679 * avoid specious errors in these scenarios, check that we even expect the memory controller to be
681 m
= unit_get_target_mask(u
);
682 if (!FLAGS_SET(m
, CGROUP_MASK_MEMORY
))
685 assert_se(c
= unit_get_cgroup_context(u
));
687 bool startup
= u
->manager
&& IN_SET(manager_state(u
->manager
), MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
);
689 if (streq(property_name
, "MemoryLow")) {
690 unit_value
= unit_get_ancestor_memory_low(u
);
692 } else if (startup
&& streq(property_name
, "StartupMemoryLow")) {
693 unit_value
= unit_get_ancestor_startup_memory_low(u
);
695 } else if (streq(property_name
, "MemoryMin")) {
696 unit_value
= unit_get_ancestor_memory_min(u
);
698 } else if (streq(property_name
, "MemoryHigh")) {
699 unit_value
= c
->memory_high
;
700 file
= "memory.high";
701 } else if (startup
&& streq(property_name
, "StartupMemoryHigh")) {
702 unit_value
= c
->startup_memory_high
;
703 file
= "memory.high";
704 } else if (streq(property_name
, "MemoryMax")) {
705 unit_value
= c
->memory_max
;
707 } else if (startup
&& streq(property_name
, "StartupMemoryMax")) {
708 unit_value
= c
->startup_memory_max
;
710 } else if (streq(property_name
, "MemorySwapMax")) {
711 unit_value
= c
->memory_swap_max
;
712 file
= "memory.swap.max";
713 } else if (startup
&& streq(property_name
, "StartupMemorySwapMax")) {
714 unit_value
= c
->startup_memory_swap_max
;
715 file
= "memory.swap.max";
716 } else if (streq(property_name
, "MemoryZSwapMax")) {
717 unit_value
= c
->memory_zswap_max
;
718 file
= "memory.zswap.max";
719 } else if (startup
&& streq(property_name
, "StartupMemoryZSwapMax")) {
720 unit_value
= c
->startup_memory_zswap_max
;
721 file
= "memory.zswap.max";
725 r
= unit_get_kernel_memory_limit(u
, file
, ret_kernel_value
);
727 return log_unit_debug_errno(u
, r
, "Failed to parse %s: %m", file
);
729 /* It's intended (soon) in a future kernel to not expose cgroup memory limits rounded to page
730 * boundaries, but instead separate the user-exposed limit, which is whatever userspace told us, from
731 * our internal page-counting. To support those future kernels, just check the value itself first
732 * without any page-alignment. */
733 if (*ret_kernel_value
== unit_value
) {
734 *ret_unit_value
= unit_value
;
738 /* The current kernel behaviour, by comparison, is that even if you write a particular number of
739 * bytes into a cgroup memory file, it always returns that number page-aligned down (since the kernel
740 * internally stores cgroup limits in pages). As such, so long as it aligns properly, everything is
742 if (unit_value
!= CGROUP_LIMIT_MAX
)
743 unit_value
= PAGE_ALIGN_DOWN(unit_value
);
745 *ret_unit_value
= unit_value
;
747 return *ret_kernel_value
== *ret_unit_value
;
750 #define FORMAT_CGROUP_DIFF_MAX 128
752 static char *format_cgroup_memory_limit_comparison(Unit
*u
, const char *property_name
, char *buf
, size_t l
) {
757 assert(property_name
);
761 r
= unit_compare_memory_limit(u
, property_name
, &sval
, &kval
);
763 /* memory.swap.max is special in that it relies on CONFIG_MEMCG_SWAP (and the default swapaccount=1).
764 * In the absence of reliably being able to detect whether memcg swap support is available or not,
765 * only complain if the error is not ENOENT. This is similarly the case for memory.zswap.max relying
766 * on CONFIG_ZSWAP. */
767 if (r
> 0 || IN_SET(r
, -ENODATA
, -EOWNERDEAD
) ||
768 (r
== -ENOENT
&& STR_IN_SET(property_name
,
770 "StartupMemorySwapMax",
772 "StartupMemoryZSwapMax")))
776 (void) snprintf(buf
, l
, " (error getting kernel value: %m)");
778 (void) snprintf(buf
, l
, " (different value in kernel: %" PRIu64
")", kval
);
783 const char *cgroup_device_permissions_to_string(CGroupDevicePermissions p
) {
784 static const char *table
[_CGROUP_DEVICE_PERMISSIONS_MAX
] = {
785 /* Lets simply define a table with every possible combination. As long as those are just 8 we
786 * can get away with it. If this ever grows to more we need to revisit this logic though. */
788 [CGROUP_DEVICE_READ
] = "r",
789 [CGROUP_DEVICE_WRITE
] = "w",
790 [CGROUP_DEVICE_MKNOD
] = "m",
791 [CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
] = "rw",
792 [CGROUP_DEVICE_READ
|CGROUP_DEVICE_MKNOD
] = "rm",
793 [CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
] = "wm",
794 [CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
] = "rwm",
797 if (p
< 0 || p
>= _CGROUP_DEVICE_PERMISSIONS_MAX
)
803 CGroupDevicePermissions
cgroup_device_permissions_from_string(const char *s
) {
804 CGroupDevicePermissions p
= 0;
807 return _CGROUP_DEVICE_PERMISSIONS_INVALID
;
809 for (const char *c
= s
; *c
; c
++) {
811 p
|= CGROUP_DEVICE_READ
;
813 p
|= CGROUP_DEVICE_WRITE
;
815 p
|= CGROUP_DEVICE_MKNOD
;
817 return _CGROUP_DEVICE_PERMISSIONS_INVALID
;
823 void cgroup_context_dump(Unit
*u
, FILE* f
, const char *prefix
) {
824 _cleanup_free_
char *disable_controllers_str
= NULL
, *delegate_controllers_str
= NULL
, *cpuset_cpus
= NULL
, *cpuset_mems
= NULL
, *startup_cpuset_cpus
= NULL
, *startup_cpuset_mems
= NULL
;
826 struct in_addr_prefix
*iaai
;
827 char cda
[FORMAT_CGROUP_DIFF_MAX
], cdb
[FORMAT_CGROUP_DIFF_MAX
], cdc
[FORMAT_CGROUP_DIFF_MAX
], cdd
[FORMAT_CGROUP_DIFF_MAX
],
828 cde
[FORMAT_CGROUP_DIFF_MAX
], cdf
[FORMAT_CGROUP_DIFF_MAX
], cdg
[FORMAT_CGROUP_DIFF_MAX
], cdh
[FORMAT_CGROUP_DIFF_MAX
],
829 cdi
[FORMAT_CGROUP_DIFF_MAX
], cdj
[FORMAT_CGROUP_DIFF_MAX
], cdk
[FORMAT_CGROUP_DIFF_MAX
];
834 assert_se(c
= unit_get_cgroup_context(u
));
836 prefix
= strempty(prefix
);
838 (void) cg_mask_to_string(c
->disable_controllers
, &disable_controllers_str
);
839 (void) cg_mask_to_string(c
->delegate_controllers
, &delegate_controllers_str
);
841 /* "Delegate=" means "yes, but no controllers". Show this as "(none)". */
842 const char *delegate_str
= delegate_controllers_str
?: c
->delegate
? "(none)" : "no";
844 cpuset_cpus
= cpu_set_to_range_string(&c
->cpuset_cpus
);
845 startup_cpuset_cpus
= cpu_set_to_range_string(&c
->startup_cpuset_cpus
);
846 cpuset_mems
= cpu_set_to_range_string(&c
->cpuset_mems
);
847 startup_cpuset_mems
= cpu_set_to_range_string(&c
->startup_cpuset_mems
);
850 "%sCPUAccounting: %s\n"
851 "%sIOAccounting: %s\n"
852 "%sBlockIOAccounting: %s\n"
853 "%sMemoryAccounting: %s\n"
854 "%sTasksAccounting: %s\n"
855 "%sIPAccounting: %s\n"
856 "%sCPUWeight: %" PRIu64
"\n"
857 "%sStartupCPUWeight: %" PRIu64
"\n"
858 "%sCPUShares: %" PRIu64
"\n"
859 "%sStartupCPUShares: %" PRIu64
"\n"
860 "%sCPUQuotaPerSecSec: %s\n"
861 "%sCPUQuotaPeriodSec: %s\n"
862 "%sAllowedCPUs: %s\n"
863 "%sStartupAllowedCPUs: %s\n"
864 "%sAllowedMemoryNodes: %s\n"
865 "%sStartupAllowedMemoryNodes: %s\n"
866 "%sIOWeight: %" PRIu64
"\n"
867 "%sStartupIOWeight: %" PRIu64
"\n"
868 "%sBlockIOWeight: %" PRIu64
"\n"
869 "%sStartupBlockIOWeight: %" PRIu64
"\n"
870 "%sDefaultMemoryMin: %" PRIu64
"\n"
871 "%sDefaultMemoryLow: %" PRIu64
"\n"
872 "%sMemoryMin: %" PRIu64
"%s\n"
873 "%sMemoryLow: %" PRIu64
"%s\n"
874 "%sStartupMemoryLow: %" PRIu64
"%s\n"
875 "%sMemoryHigh: %" PRIu64
"%s\n"
876 "%sStartupMemoryHigh: %" PRIu64
"%s\n"
877 "%sMemoryMax: %" PRIu64
"%s\n"
878 "%sStartupMemoryMax: %" PRIu64
"%s\n"
879 "%sMemorySwapMax: %" PRIu64
"%s\n"
880 "%sStartupMemorySwapMax: %" PRIu64
"%s\n"
881 "%sMemoryZSwapMax: %" PRIu64
"%s\n"
882 "%sStartupMemoryZSwapMax: %" PRIu64
"%s\n"
883 "%sMemoryZSwapWriteback: %s\n"
884 "%sMemoryLimit: %" PRIu64
"\n"
885 "%sTasksMax: %" PRIu64
"\n"
886 "%sDevicePolicy: %s\n"
887 "%sDisableControllers: %s\n"
889 "%sManagedOOMSwap: %s\n"
890 "%sManagedOOMMemoryPressure: %s\n"
891 "%sManagedOOMMemoryPressureLimit: " PERMYRIAD_AS_PERCENT_FORMAT_STR
"\n"
892 "%sManagedOOMPreference: %s\n"
893 "%sMemoryPressureWatch: %s\n"
894 "%sCoredumpReceive: %s\n",
895 prefix
, yes_no(c
->cpu_accounting
),
896 prefix
, yes_no(c
->io_accounting
),
897 prefix
, yes_no(c
->blockio_accounting
),
898 prefix
, yes_no(c
->memory_accounting
),
899 prefix
, yes_no(c
->tasks_accounting
),
900 prefix
, yes_no(c
->ip_accounting
),
901 prefix
, c
->cpu_weight
,
902 prefix
, c
->startup_cpu_weight
,
903 prefix
, c
->cpu_shares
,
904 prefix
, c
->startup_cpu_shares
,
905 prefix
, FORMAT_TIMESPAN(c
->cpu_quota_per_sec_usec
, 1),
906 prefix
, FORMAT_TIMESPAN(c
->cpu_quota_period_usec
, 1),
907 prefix
, strempty(cpuset_cpus
),
908 prefix
, strempty(startup_cpuset_cpus
),
909 prefix
, strempty(cpuset_mems
),
910 prefix
, strempty(startup_cpuset_mems
),
911 prefix
, c
->io_weight
,
912 prefix
, c
->startup_io_weight
,
913 prefix
, c
->blockio_weight
,
914 prefix
, c
->startup_blockio_weight
,
915 prefix
, c
->default_memory_min
,
916 prefix
, c
->default_memory_low
,
917 prefix
, c
->memory_min
, format_cgroup_memory_limit_comparison(u
, "MemoryMin", cda
, sizeof(cda
)),
918 prefix
, c
->memory_low
, format_cgroup_memory_limit_comparison(u
, "MemoryLow", cdb
, sizeof(cdb
)),
919 prefix
, c
->startup_memory_low
, format_cgroup_memory_limit_comparison(u
, "StartupMemoryLow", cdc
, sizeof(cdc
)),
920 prefix
, c
->memory_high
, format_cgroup_memory_limit_comparison(u
, "MemoryHigh", cdd
, sizeof(cdd
)),
921 prefix
, c
->startup_memory_high
, format_cgroup_memory_limit_comparison(u
, "StartupMemoryHigh", cde
, sizeof(cde
)),
922 prefix
, c
->memory_max
, format_cgroup_memory_limit_comparison(u
, "MemoryMax", cdf
, sizeof(cdf
)),
923 prefix
, c
->startup_memory_max
, format_cgroup_memory_limit_comparison(u
, "StartupMemoryMax", cdg
, sizeof(cdg
)),
924 prefix
, c
->memory_swap_max
, format_cgroup_memory_limit_comparison(u
, "MemorySwapMax", cdh
, sizeof(cdh
)),
925 prefix
, c
->startup_memory_swap_max
, format_cgroup_memory_limit_comparison(u
, "StartupMemorySwapMax", cdi
, sizeof(cdi
)),
926 prefix
, c
->memory_zswap_max
, format_cgroup_memory_limit_comparison(u
, "MemoryZSwapMax", cdj
, sizeof(cdj
)),
927 prefix
, c
->startup_memory_zswap_max
, format_cgroup_memory_limit_comparison(u
, "StartupMemoryZSwapMax", cdk
, sizeof(cdk
)),
928 prefix
, yes_no(c
->memory_zswap_writeback
),
929 prefix
, c
->memory_limit
,
930 prefix
, cgroup_tasks_max_resolve(&c
->tasks_max
),
931 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
932 prefix
, strempty(disable_controllers_str
),
933 prefix
, delegate_str
,
934 prefix
, managed_oom_mode_to_string(c
->moom_swap
),
935 prefix
, managed_oom_mode_to_string(c
->moom_mem_pressure
),
936 prefix
, PERMYRIAD_AS_PERCENT_FORMAT_VAL(UINT32_SCALE_TO_PERMYRIAD(c
->moom_mem_pressure_limit
)),
937 prefix
, managed_oom_preference_to_string(c
->moom_preference
),
938 prefix
, cgroup_pressure_watch_to_string(c
->memory_pressure_watch
),
939 prefix
, yes_no(c
->coredump_receive
));
941 if (c
->delegate_subgroup
)
942 fprintf(f
, "%sDelegateSubgroup: %s\n",
943 prefix
, c
->delegate_subgroup
);
945 if (c
->memory_pressure_threshold_usec
!= USEC_INFINITY
)
946 fprintf(f
, "%sMemoryPressureThresholdSec: %s\n",
947 prefix
, FORMAT_TIMESPAN(c
->memory_pressure_threshold_usec
, 1));
949 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
950 /* strna() below should be redundant, for avoiding -Werror=format-overflow= error. See #30223. */
952 "%sDeviceAllow: %s %s\n",
955 strna(cgroup_device_permissions_to_string(a
->permissions
)));
957 LIST_FOREACH(device_weights
, iw
, c
->io_device_weights
)
959 "%sIODeviceWeight: %s %" PRIu64
"\n",
964 LIST_FOREACH(device_latencies
, l
, c
->io_device_latencies
)
966 "%sIODeviceLatencyTargetSec: %s %s\n",
969 FORMAT_TIMESPAN(l
->target_usec
, 1));
971 LIST_FOREACH(device_limits
, il
, c
->io_device_limits
)
972 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
973 if (il
->limits
[type
] != cgroup_io_limit_defaults
[type
])
977 cgroup_io_limit_type_to_string(type
),
979 FORMAT_BYTES(il
->limits
[type
]));
981 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
983 "%sBlockIODeviceWeight: %s %" PRIu64
,
988 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
989 if (b
->rbps
!= CGROUP_LIMIT_MAX
)
991 "%sBlockIOReadBandwidth: %s %s\n",
994 FORMAT_BYTES(b
->rbps
));
995 if (b
->wbps
!= CGROUP_LIMIT_MAX
)
997 "%sBlockIOWriteBandwidth: %s %s\n",
1000 FORMAT_BYTES(b
->wbps
));
1003 SET_FOREACH(iaai
, c
->ip_address_allow
)
1004 fprintf(f
, "%sIPAddressAllow: %s\n", prefix
,
1005 IN_ADDR_PREFIX_TO_STRING(iaai
->family
, &iaai
->address
, iaai
->prefixlen
));
1006 SET_FOREACH(iaai
, c
->ip_address_deny
)
1007 fprintf(f
, "%sIPAddressDeny: %s\n", prefix
,
1008 IN_ADDR_PREFIX_TO_STRING(iaai
->family
, &iaai
->address
, iaai
->prefixlen
));
1010 STRV_FOREACH(path
, c
->ip_filters_ingress
)
1011 fprintf(f
, "%sIPIngressFilterPath: %s\n", prefix
, *path
);
1012 STRV_FOREACH(path
, c
->ip_filters_egress
)
1013 fprintf(f
, "%sIPEgressFilterPath: %s\n", prefix
, *path
);
1015 LIST_FOREACH(programs
, p
, c
->bpf_foreign_programs
)
1016 fprintf(f
, "%sBPFProgram: %s:%s",
1017 prefix
, bpf_cgroup_attach_type_to_string(p
->attach_type
), p
->bpffs_path
);
1019 if (c
->socket_bind_allow
) {
1020 fprintf(f
, "%sSocketBindAllow: ", prefix
);
1021 cgroup_context_dump_socket_bind_items(c
->socket_bind_allow
, f
);
1025 if (c
->socket_bind_deny
) {
1026 fprintf(f
, "%sSocketBindDeny: ", prefix
);
1027 cgroup_context_dump_socket_bind_items(c
->socket_bind_deny
, f
);
1031 if (c
->restrict_network_interfaces
) {
1033 SET_FOREACH(iface
, c
->restrict_network_interfaces
)
1034 fprintf(f
, "%sRestrictNetworkInterfaces: %s\n", prefix
, iface
);
1037 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
)
1038 fprintf(f
, "%sNFTSet: %s:%s:%s:%s\n", prefix
, nft_set_source_to_string(nft_set
->source
),
1039 nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
);
1042 void cgroup_context_dump_socket_bind_item(const CGroupSocketBindItem
*item
, FILE *f
) {
1043 const char *family
, *colon1
, *protocol
= "", *colon2
= "";
1045 family
= strempty(af_to_ipv4_ipv6(item
->address_family
));
1046 colon1
= isempty(family
) ? "" : ":";
1048 if (item
->ip_protocol
!= 0) {
1049 protocol
= ip_protocol_to_tcp_udp(item
->ip_protocol
);
1053 if (item
->nr_ports
== 0)
1054 fprintf(f
, "%s%s%s%sany", family
, colon1
, protocol
, colon2
);
1055 else if (item
->nr_ports
== 1)
1056 fprintf(f
, "%s%s%s%s%" PRIu16
, family
, colon1
, protocol
, colon2
, item
->port_min
);
1058 uint16_t port_max
= item
->port_min
+ item
->nr_ports
- 1;
1059 fprintf(f
, "%s%s%s%s%" PRIu16
"-%" PRIu16
, family
, colon1
, protocol
, colon2
,
1060 item
->port_min
, port_max
);
1064 void cgroup_context_dump_socket_bind_items(const CGroupSocketBindItem
*items
, FILE *f
) {
1067 LIST_FOREACH(socket_bind_items
, bi
, items
) {
1073 cgroup_context_dump_socket_bind_item(bi
, f
);
1077 int cgroup_context_add_device_allow(CGroupContext
*c
, const char *dev
, CGroupDevicePermissions p
) {
1078 _cleanup_free_ CGroupDeviceAllow
*a
= NULL
;
1079 _cleanup_free_
char *d
= NULL
;
1083 assert(p
>= 0 && p
< _CGROUP_DEVICE_PERMISSIONS_MAX
);
1086 p
= _CGROUP_DEVICE_PERMISSIONS_ALL
;
1088 a
= new(CGroupDeviceAllow
, 1);
1096 *a
= (CGroupDeviceAllow
) {
1097 .path
= TAKE_PTR(d
),
1101 LIST_PREPEND(device_allow
, c
->device_allow
, a
);
1107 int cgroup_context_add_or_update_device_allow(CGroupContext
*c
, const char *dev
, CGroupDevicePermissions p
) {
1110 assert(p
>= 0 && p
< _CGROUP_DEVICE_PERMISSIONS_MAX
);
1113 p
= _CGROUP_DEVICE_PERMISSIONS_ALL
;
1115 LIST_FOREACH(device_allow
, b
, c
->device_allow
)
1116 if (path_equal(b
->path
, dev
)) {
1121 return cgroup_context_add_device_allow(c
, dev
, p
);
1124 int cgroup_context_add_bpf_foreign_program(CGroupContext
*c
, uint32_t attach_type
, const char *bpffs_path
) {
1125 CGroupBPFForeignProgram
*p
;
1126 _cleanup_free_
char *d
= NULL
;
1131 if (!path_is_normalized(bpffs_path
) || !path_is_absolute(bpffs_path
))
1132 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
), "Path is not normalized: %m");
1134 d
= strdup(bpffs_path
);
1138 p
= new(CGroupBPFForeignProgram
, 1);
1142 *p
= (CGroupBPFForeignProgram
) {
1143 .attach_type
= attach_type
,
1144 .bpffs_path
= TAKE_PTR(d
),
1147 LIST_PREPEND(programs
, c
->bpf_foreign_programs
, TAKE_PTR(p
));
1152 #define UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(entry) \
1153 uint64_t unit_get_ancestor_##entry(Unit *u) { \
1156 /* 1. Is entry set in this unit? If so, use that. \
1157 * 2. Is the default for this entry set in any \
1158 * ancestor? If so, use that. \
1159 * 3. Otherwise, return CGROUP_LIMIT_MIN. */ \
1163 c = unit_get_cgroup_context(u); \
1164 if (c && c->entry##_set) \
1167 while ((u = UNIT_GET_SLICE(u))) { \
1168 c = unit_get_cgroup_context(u); \
1169 if (c && c->default_##entry##_set) \
1170 return c->default_##entry; \
1173 /* We've reached the root, but nobody had default for \
1174 * this entry set, so set it to the kernel default. */ \
1175 return CGROUP_LIMIT_MIN; \
1178 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_low
);
1179 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(startup_memory_low
);
1180 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_min
);
1182 static void unit_set_xattr_graceful(Unit
*u
, const char *name
, const void *data
, size_t size
) {
1188 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1189 if (!crt
|| !crt
->cgroup_path
)
1192 r
= cg_set_xattr(crt
->cgroup_path
, name
, data
, size
, 0);
1194 log_unit_debug_errno(u
, r
, "Failed to set '%s' xattr on control group %s, ignoring: %m", name
, empty_to_root(crt
->cgroup_path
));
1197 static void unit_remove_xattr_graceful(Unit
*u
, const char *name
) {
1203 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1204 if (!crt
|| !crt
->cgroup_path
)
1207 r
= cg_remove_xattr(crt
->cgroup_path
, name
);
1208 if (r
< 0 && !ERRNO_IS_XATTR_ABSENT(r
))
1209 log_unit_debug_errno(u
, r
, "Failed to remove '%s' xattr flag on control group %s, ignoring: %m", name
, empty_to_root(crt
->cgroup_path
));
1212 static void cgroup_oomd_xattr_apply(Unit
*u
) {
1217 c
= unit_get_cgroup_context(u
);
1221 if (c
->moom_preference
== MANAGED_OOM_PREFERENCE_OMIT
)
1222 unit_set_xattr_graceful(u
, "user.oomd_omit", "1", 1);
1224 if (c
->moom_preference
== MANAGED_OOM_PREFERENCE_AVOID
)
1225 unit_set_xattr_graceful(u
, "user.oomd_avoid", "1", 1);
1227 if (c
->moom_preference
!= MANAGED_OOM_PREFERENCE_AVOID
)
1228 unit_remove_xattr_graceful(u
, "user.oomd_avoid");
1230 if (c
->moom_preference
!= MANAGED_OOM_PREFERENCE_OMIT
)
1231 unit_remove_xattr_graceful(u
, "user.oomd_omit");
1234 static int cgroup_log_xattr_apply(Unit
*u
) {
1236 size_t len
, allowed_patterns_len
, denied_patterns_len
;
1237 _cleanup_free_
char *patterns
= NULL
, *allowed_patterns
= NULL
, *denied_patterns
= NULL
;
1243 c
= unit_get_exec_context(u
);
1245 /* Some unit types have a cgroup context but no exec context, so we do not log
1246 * any error here to avoid confusion. */
1249 if (set_isempty(c
->log_filter_allowed_patterns
) && set_isempty(c
->log_filter_denied_patterns
)) {
1250 unit_remove_xattr_graceful(u
, "user.journald_log_filter_patterns");
1254 r
= set_make_nulstr(c
->log_filter_allowed_patterns
, &allowed_patterns
, &allowed_patterns_len
);
1256 return log_debug_errno(r
, "Failed to make nulstr from set: %m");
1258 r
= set_make_nulstr(c
->log_filter_denied_patterns
, &denied_patterns
, &denied_patterns_len
);
1260 return log_debug_errno(r
, "Failed to make nulstr from set: %m");
1262 /* Use nul character separated strings without trailing nul */
1263 allowed_patterns_len
= LESS_BY(allowed_patterns_len
, 1u);
1264 denied_patterns_len
= LESS_BY(denied_patterns_len
, 1u);
1266 len
= allowed_patterns_len
+ 1 + denied_patterns_len
;
1267 patterns
= new(char, len
);
1269 return log_oom_debug();
1271 last
= mempcpy_safe(patterns
, allowed_patterns
, allowed_patterns_len
);
1273 memcpy_safe(last
, denied_patterns
, denied_patterns_len
);
1275 unit_set_xattr_graceful(u
, "user.journald_log_filter_patterns", patterns
, len
);
1280 static void cgroup_invocation_id_xattr_apply(Unit
*u
) {
1285 b
= !sd_id128_is_null(u
->invocation_id
);
1286 FOREACH_STRING(xn
, "trusted.invocation_id", "user.invocation_id") {
1288 unit_set_xattr_graceful(u
, xn
, SD_ID128_TO_STRING(u
->invocation_id
), 32);
1290 unit_remove_xattr_graceful(u
, xn
);
1294 static void cgroup_coredump_xattr_apply(Unit
*u
) {
1299 c
= unit_get_cgroup_context(u
);
1303 if (unit_cgroup_delegate(u
) && c
->coredump_receive
)
1304 unit_set_xattr_graceful(u
, "user.coredump_receive", "1", 1);
1306 unit_remove_xattr_graceful(u
, "user.coredump_receive");
1309 static void cgroup_delegate_xattr_apply(Unit
*u
) {
1314 /* Indicate on the cgroup whether delegation is on, via an xattr. This is best-effort, as old kernels
1315 * didn't support xattrs on cgroups at all. Later they got support for setting 'trusted.*' xattrs,
1316 * and even later 'user.*' xattrs. We started setting this field when 'trusted.*' was added, and
1317 * given this is now pretty much API, let's continue to support that. But also set 'user.*' as well,
1318 * since it is readable by any user, not just CAP_SYS_ADMIN. This hence comes with slightly weaker
1319 * security (as users who got delegated cgroups could turn it off if they like), but this shouldn't
1320 * be a big problem given this communicates delegation state to clients, but the manager never reads
1322 b
= unit_cgroup_delegate(u
);
1323 FOREACH_STRING(xn
, "trusted.delegate", "user.delegate") {
1325 unit_set_xattr_graceful(u
, xn
, "1", 1);
1327 unit_remove_xattr_graceful(u
, xn
);
1331 static void cgroup_survive_xattr_apply(Unit
*u
) {
1336 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1340 if (u
->survive_final_kill_signal
) {
1343 "user.survive_final_kill_signal",
1347 /* user xattr support was added in kernel v5.7 */
1348 if (ERRNO_IS_NEG_NOT_SUPPORTED(r
))
1351 "trusted.survive_final_kill_signal",
1356 log_unit_debug_errno(u
,
1358 "Failed to set 'survive_final_kill_signal' xattr on control "
1359 "group %s, ignoring: %m",
1360 empty_to_root(crt
->cgroup_path
));
1362 unit_remove_xattr_graceful(u
, "user.survive_final_kill_signal");
1363 unit_remove_xattr_graceful(u
, "trusted.survive_final_kill_signal");
1367 static void cgroup_xattr_apply(Unit
*u
) {
1370 /* The 'user.*' xattrs can be set from a user manager. */
1371 cgroup_oomd_xattr_apply(u
);
1372 cgroup_log_xattr_apply(u
);
1373 cgroup_coredump_xattr_apply(u
);
1375 if (!MANAGER_IS_SYSTEM(u
->manager
))
1378 cgroup_invocation_id_xattr_apply(u
);
1379 cgroup_delegate_xattr_apply(u
);
1380 cgroup_survive_xattr_apply(u
);
1383 static int lookup_block_device(const char *p
, dev_t
*ret
) {
1384 dev_t rdev
, dev
= 0;
1391 r
= device_path_parse_major_minor(p
, &mode
, &rdev
);
1392 if (r
== -ENODEV
) { /* not a parsable device node, need to go to disk */
1395 if (stat(p
, &st
) < 0)
1396 return log_warning_errno(errno
, "Couldn't stat device '%s': %m", p
);
1402 return log_warning_errno(r
, "Failed to parse major/minor from path '%s': %m", p
);
1405 return log_warning_errno(SYNTHETIC_ERRNO(ENOTBLK
),
1406 "Device node '%s' is a character device, but block device needed.", p
);
1409 else if (major(dev
) != 0)
1410 *ret
= dev
; /* If this is not a device node then use the block device this file is stored on */
1412 /* If this is btrfs, getting the backing block device is a bit harder */
1413 r
= btrfs_get_block_device(p
, ret
);
1415 return log_warning_errno(SYNTHETIC_ERRNO(ENODEV
),
1416 "'%s' is not a block device node, and file system block device cannot be determined or is not local.", p
);
1418 return log_warning_errno(r
, "Failed to determine block device backing btrfs file system '%s': %m", p
);
1421 /* If this is a LUKS/DM device, recursively try to get the originating block device */
1422 while (block_get_originating(*ret
, ret
) > 0);
1424 /* If this is a partition, try to get the originating block device */
1425 (void) block_get_whole_disk(*ret
, ret
);
1429 static bool cgroup_context_has_cpu_weight(CGroupContext
*c
) {
1430 return c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
||
1431 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
;
1434 static bool cgroup_context_has_cpu_shares(CGroupContext
*c
) {
1435 return c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
1436 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
;
1439 static bool cgroup_context_has_allowed_cpus(CGroupContext
*c
) {
1440 return c
->cpuset_cpus
.set
|| c
->startup_cpuset_cpus
.set
;
1443 static bool cgroup_context_has_allowed_mems(CGroupContext
*c
) {
1444 return c
->cpuset_mems
.set
|| c
->startup_cpuset_mems
.set
;
1447 uint64_t cgroup_context_cpu_weight(CGroupContext
*c
, ManagerState state
) {
1450 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1451 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
)
1452 return c
->startup_cpu_weight
;
1453 else if (c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
)
1454 return c
->cpu_weight
;
1456 return CGROUP_WEIGHT_DEFAULT
;
1459 static uint64_t cgroup_context_cpu_shares(CGroupContext
*c
, ManagerState state
) {
1460 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1461 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
1462 return c
->startup_cpu_shares
;
1463 else if (c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
1464 return c
->cpu_shares
;
1466 return CGROUP_CPU_SHARES_DEFAULT
;
1469 static CPUSet
*cgroup_context_allowed_cpus(CGroupContext
*c
, ManagerState state
) {
1470 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1471 c
->startup_cpuset_cpus
.set
)
1472 return &c
->startup_cpuset_cpus
;
1474 return &c
->cpuset_cpus
;
1477 static CPUSet
*cgroup_context_allowed_mems(CGroupContext
*c
, ManagerState state
) {
1478 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1479 c
->startup_cpuset_mems
.set
)
1480 return &c
->startup_cpuset_mems
;
1482 return &c
->cpuset_mems
;
1485 usec_t
cgroup_cpu_adjust_period(usec_t period
, usec_t quota
, usec_t resolution
, usec_t max_period
) {
1486 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
1487 * need to be higher than that boundary. quota is specified in USecPerSec.
1488 * Additionally, period must be at most max_period. */
1491 return MIN(MAX3(period
, resolution
, resolution
* USEC_PER_SEC
/ quota
), max_period
);
1494 static usec_t
cgroup_cpu_adjust_period_and_log(Unit
*u
, usec_t period
, usec_t quota
) {
1499 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1501 return USEC_INFINITY
;
1503 if (quota
== USEC_INFINITY
)
1504 /* Always use default period for infinity quota. */
1505 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
1507 if (period
== USEC_INFINITY
)
1508 /* Default period was requested. */
1509 period
= CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
1511 /* Clamp to interval [1ms, 1s] */
1512 new_period
= cgroup_cpu_adjust_period(period
, quota
, USEC_PER_MSEC
, USEC_PER_SEC
);
1514 if (new_period
!= period
) {
1515 log_unit_full(u
, crt
->warned_clamping_cpu_quota_period
? LOG_DEBUG
: LOG_WARNING
,
1516 "Clamping CPU interval for cpu.max: period is now %s",
1517 FORMAT_TIMESPAN(new_period
, 1));
1518 crt
->warned_clamping_cpu_quota_period
= true;
1524 static void cgroup_apply_unified_cpu_weight(Unit
*u
, uint64_t weight
) {
1525 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
1527 if (weight
== CGROUP_WEIGHT_IDLE
)
1529 xsprintf(buf
, "%" PRIu64
"\n", weight
);
1530 (void) set_attribute_and_warn(u
, "cpu", "cpu.weight", buf
);
1533 static void cgroup_apply_unified_cpu_idle(Unit
*u
, uint64_t weight
) {
1536 const char *idle_val
;
1540 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1541 if (!crt
|| !crt
->cgroup_path
)
1544 is_idle
= weight
== CGROUP_WEIGHT_IDLE
;
1545 idle_val
= one_zero(is_idle
);
1546 r
= cg_set_attribute("cpu", crt
->cgroup_path
, "cpu.idle", idle_val
);
1547 if (r
< 0 && (r
!= -ENOENT
|| is_idle
))
1548 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
, "Failed to set '%s' attribute on '%s' to '%s': %m",
1549 "cpu.idle", empty_to_root(crt
->cgroup_path
), idle_val
);
1552 static void cgroup_apply_unified_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
1553 char buf
[(DECIMAL_STR_MAX(usec_t
) + 1) * 2 + 1];
1557 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
1558 if (quota
!= USEC_INFINITY
)
1559 xsprintf(buf
, USEC_FMT
" " USEC_FMT
"\n",
1560 MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
), period
);
1562 xsprintf(buf
, "max " USEC_FMT
"\n", period
);
1563 (void) set_attribute_and_warn(u
, "cpu", "cpu.max", buf
);
1566 static void cgroup_apply_legacy_cpu_shares(Unit
*u
, uint64_t shares
) {
1567 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
1569 xsprintf(buf
, "%" PRIu64
"\n", shares
);
1570 (void) set_attribute_and_warn(u
, "cpu", "cpu.shares", buf
);
1573 static void cgroup_apply_legacy_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
1574 char buf
[DECIMAL_STR_MAX(usec_t
) + 2];
1576 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
1578 xsprintf(buf
, USEC_FMT
"\n", period
);
1579 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_period_us", buf
);
1581 if (quota
!= USEC_INFINITY
) {
1582 xsprintf(buf
, USEC_FMT
"\n", MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
));
1583 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", buf
);
1585 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", "-1\n");
1588 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares
) {
1589 return CLAMP(shares
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_CPU_SHARES_DEFAULT
,
1590 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
1593 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight
) {
1594 /* we don't support idle in cgroupv1 */
1595 if (weight
== CGROUP_WEIGHT_IDLE
)
1596 return CGROUP_CPU_SHARES_MIN
;
1598 return CLAMP(weight
* CGROUP_CPU_SHARES_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
1599 CGROUP_CPU_SHARES_MIN
, CGROUP_CPU_SHARES_MAX
);
1602 static void cgroup_apply_unified_cpuset(Unit
*u
, const CPUSet
*cpus
, const char *name
) {
1603 _cleanup_free_
char *buf
= NULL
;
1605 buf
= cpu_set_to_range_string(cpus
);
1611 (void) set_attribute_and_warn(u
, "cpuset", name
, buf
);
1614 static bool cgroup_context_has_io_config(CGroupContext
*c
) {
1615 return c
->io_accounting
||
1616 c
->io_weight
!= CGROUP_WEIGHT_INVALID
||
1617 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
1618 c
->io_device_weights
||
1619 c
->io_device_latencies
||
1620 c
->io_device_limits
;
1623 static bool cgroup_context_has_blockio_config(CGroupContext
*c
) {
1624 return c
->blockio_accounting
||
1625 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
1626 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
1627 c
->blockio_device_weights
||
1628 c
->blockio_device_bandwidths
;
1631 static uint64_t cgroup_context_io_weight(CGroupContext
*c
, ManagerState state
) {
1632 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1633 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
)
1634 return c
->startup_io_weight
;
1635 if (c
->io_weight
!= CGROUP_WEIGHT_INVALID
)
1636 return c
->io_weight
;
1637 return CGROUP_WEIGHT_DEFAULT
;
1640 static uint64_t cgroup_context_blkio_weight(CGroupContext
*c
, ManagerState state
) {
1641 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1642 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
1643 return c
->startup_blockio_weight
;
1644 if (c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
1645 return c
->blockio_weight
;
1646 return CGROUP_BLKIO_WEIGHT_DEFAULT
;
1649 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight
) {
1650 return CLAMP(blkio_weight
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_BLKIO_WEIGHT_DEFAULT
,
1651 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
1654 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight
) {
1655 return CLAMP(io_weight
* CGROUP_BLKIO_WEIGHT_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
1656 CGROUP_BLKIO_WEIGHT_MIN
, CGROUP_BLKIO_WEIGHT_MAX
);
1659 static int set_bfq_weight(Unit
*u
, const char *controller
, dev_t dev
, uint64_t io_weight
) {
1660 static const char * const prop_names
[] = {
1664 "BlockIODeviceWeight",
1666 static bool warned
= false;
1667 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+STRLEN("\n")];
1669 uint64_t bfq_weight
;
1674 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1675 if (!crt
|| !crt
->cgroup_path
)
1678 /* FIXME: drop this function when distro kernels properly support BFQ through "io.weight"
1679 * See also: https://github.com/systemd/systemd/pull/13335 and
1680 * https://github.com/torvalds/linux/commit/65752aef0a407e1ef17ec78a7fc31ba4e0b360f9. */
1681 p
= strjoina(controller
, ".bfq.weight");
1682 /* Adjust to kernel range is 1..1000, the default is 100. */
1683 bfq_weight
= BFQ_WEIGHT(io_weight
);
1686 xsprintf(buf
, DEVNUM_FORMAT_STR
" %" PRIu64
"\n", DEVNUM_FORMAT_VAL(dev
), bfq_weight
);
1688 xsprintf(buf
, "%" PRIu64
"\n", bfq_weight
);
1690 r
= cg_set_attribute(controller
, crt
->cgroup_path
, p
, buf
);
1692 /* FIXME: drop this when kernels prior
1693 * 795fe54c2a82 ("bfq: Add per-device weight") v5.4
1694 * are not interesting anymore. Old kernels will fail with EINVAL, while new kernels won't return
1695 * EINVAL on properly formatted input by us. Treat EINVAL accordingly. */
1696 if (r
== -EINVAL
&& major(dev
) > 0) {
1698 log_unit_warning(u
, "Kernel version does not accept per-device setting in %s.", p
);
1701 r
= -EOPNOTSUPP
; /* mask as unconfigured device */
1702 } else if (r
>= 0 && io_weight
!= bfq_weight
)
1703 log_unit_debug(u
, "%s=%" PRIu64
" scaled to %s=%" PRIu64
,
1704 prop_names
[2*(major(dev
) > 0) + streq(controller
, "blkio")],
1705 io_weight
, p
, bfq_weight
);
1709 static void cgroup_apply_io_device_weight(Unit
*u
, const char *dev_path
, uint64_t io_weight
) {
1710 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1716 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1717 if (!crt
|| !crt
->cgroup_path
)
1720 if (lookup_block_device(dev_path
, &dev
) < 0)
1723 r1
= set_bfq_weight(u
, "io", dev
, io_weight
);
1725 xsprintf(buf
, DEVNUM_FORMAT_STR
" %" PRIu64
"\n", DEVNUM_FORMAT_VAL(dev
), io_weight
);
1726 r2
= cg_set_attribute("io", crt
->cgroup_path
, "io.weight", buf
);
1728 /* Look at the configured device, when both fail, prefer io.weight errno. */
1729 r
= r2
== -EOPNOTSUPP
? r1
: r2
;
1732 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
),
1733 r
, "Failed to set 'io[.bfq].weight' attribute on '%s' to '%.*s': %m",
1734 empty_to_root(crt
->cgroup_path
), (int) strcspn(buf
, NEWLINE
), buf
);
1737 static void cgroup_apply_blkio_device_weight(Unit
*u
, const char *dev_path
, uint64_t blkio_weight
) {
1738 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1742 r
= lookup_block_device(dev_path
, &dev
);
1746 xsprintf(buf
, DEVNUM_FORMAT_STR
" %" PRIu64
"\n", DEVNUM_FORMAT_VAL(dev
), blkio_weight
);
1747 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight_device", buf
);
1750 static void cgroup_apply_io_device_latency(Unit
*u
, const char *dev_path
, usec_t target
) {
1751 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
1755 r
= lookup_block_device(dev_path
, &dev
);
1759 if (target
!= USEC_INFINITY
)
1760 xsprintf(buf
, DEVNUM_FORMAT_STR
" target=%" PRIu64
"\n", DEVNUM_FORMAT_VAL(dev
), target
);
1762 xsprintf(buf
, DEVNUM_FORMAT_STR
" target=max\n", DEVNUM_FORMAT_VAL(dev
));
1764 (void) set_attribute_and_warn(u
, "io", "io.latency", buf
);
1767 static void cgroup_apply_io_device_limit(Unit
*u
, const char *dev_path
, uint64_t *limits
) {
1768 char limit_bufs
[_CGROUP_IO_LIMIT_TYPE_MAX
][DECIMAL_STR_MAX(uint64_t)],
1769 buf
[DECIMAL_STR_MAX(dev_t
)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
1772 if (lookup_block_device(dev_path
, &dev
) < 0)
1775 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
1776 if (limits
[type
] != cgroup_io_limit_defaults
[type
])
1777 xsprintf(limit_bufs
[type
], "%" PRIu64
, limits
[type
]);
1779 xsprintf(limit_bufs
[type
], "%s", limits
[type
] == CGROUP_LIMIT_MAX
? "max" : "0");
1781 xsprintf(buf
, DEVNUM_FORMAT_STR
" rbps=%s wbps=%s riops=%s wiops=%s\n", DEVNUM_FORMAT_VAL(dev
),
1782 limit_bufs
[CGROUP_IO_RBPS_MAX
], limit_bufs
[CGROUP_IO_WBPS_MAX
],
1783 limit_bufs
[CGROUP_IO_RIOPS_MAX
], limit_bufs
[CGROUP_IO_WIOPS_MAX
]);
1784 (void) set_attribute_and_warn(u
, "io", "io.max", buf
);
1787 static void cgroup_apply_blkio_device_limit(Unit
*u
, const char *dev_path
, uint64_t rbps
, uint64_t wbps
) {
1788 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1791 if (lookup_block_device(dev_path
, &dev
) < 0)
1794 sprintf(buf
, DEVNUM_FORMAT_STR
" %" PRIu64
"\n", DEVNUM_FORMAT_VAL(dev
), rbps
);
1795 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.read_bps_device", buf
);
1797 sprintf(buf
, DEVNUM_FORMAT_STR
" %" PRIu64
"\n", DEVNUM_FORMAT_VAL(dev
), wbps
);
1798 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.write_bps_device", buf
);
1801 static bool unit_has_unified_memory_config(Unit
*u
) {
1806 assert_se(c
= unit_get_cgroup_context(u
));
1808 return unit_get_ancestor_memory_min(u
) > 0 ||
1809 unit_get_ancestor_memory_low(u
) > 0 || unit_get_ancestor_startup_memory_low(u
) > 0 ||
1810 c
->memory_high
!= CGROUP_LIMIT_MAX
|| c
->startup_memory_high_set
||
1811 c
->memory_max
!= CGROUP_LIMIT_MAX
|| c
->startup_memory_max_set
||
1812 c
->memory_swap_max
!= CGROUP_LIMIT_MAX
|| c
->startup_memory_swap_max_set
||
1813 c
->memory_zswap_max
!= CGROUP_LIMIT_MAX
|| c
->startup_memory_zswap_max_set
;
1816 static void cgroup_apply_unified_memory_limit(Unit
*u
, const char *file
, uint64_t v
) {
1817 char buf
[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
1819 if (v
!= CGROUP_LIMIT_MAX
)
1820 xsprintf(buf
, "%" PRIu64
"\n", v
);
1822 (void) set_attribute_and_warn(u
, "memory", file
, buf
);
1825 static void cgroup_apply_firewall(Unit
*u
) {
1828 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
1830 if (bpf_firewall_compile(u
) < 0)
1833 (void) bpf_firewall_load_custom(u
);
1834 (void) bpf_firewall_install(u
);
1837 void unit_modify_nft_set(Unit
*u
, bool add
) {
1842 if (!MANAGER_IS_SYSTEM(u
->manager
))
1845 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1848 if (cg_all_unified() <= 0)
1851 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1852 if (!crt
|| crt
->cgroup_id
== 0)
1855 if (!u
->manager
->fw_ctx
) {
1856 r
= fw_ctx_new_full(&u
->manager
->fw_ctx
, /* init_tables= */ false);
1860 assert(u
->manager
->fw_ctx
);
1863 CGroupContext
*c
= ASSERT_PTR(unit_get_cgroup_context(u
));
1865 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
) {
1866 if (nft_set
->source
!= NFT_SET_SOURCE_CGROUP
)
1869 uint64_t element
= crt
->cgroup_id
;
1871 r
= nft_set_element_modify_any(u
->manager
->fw_ctx
, add
, nft_set
->nfproto
, nft_set
->table
, nft_set
->set
, &element
, sizeof(element
));
1873 log_warning_errno(r
, "Failed to %s NFT set: family %s, table %s, set %s, cgroup %" PRIu64
", ignoring: %m",
1874 add
? "add" : "delete", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, crt
->cgroup_id
);
1876 log_debug("%s NFT set: family %s, table %s, set %s, cgroup %" PRIu64
,
1877 add
? "Added" : "Deleted", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, crt
->cgroup_id
);
1881 static void cgroup_apply_socket_bind(Unit
*u
) {
1884 (void) bpf_socket_bind_install(u
);
1887 static void cgroup_apply_restrict_network_interfaces(Unit
*u
) {
1890 (void) bpf_restrict_ifaces_install(u
);
1893 static int cgroup_apply_devices(Unit
*u
) {
1894 _cleanup_(bpf_program_freep
) BPFProgram
*prog
= NULL
;
1896 CGroupDevicePolicy policy
;
1899 assert_se(c
= unit_get_cgroup_context(u
));
1901 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
1902 if (!crt
|| !crt
->cgroup_path
)
1905 policy
= c
->device_policy
;
1907 if (cg_all_unified() > 0) {
1908 r
= bpf_devices_cgroup_init(&prog
, policy
, c
->device_allow
);
1910 return log_unit_warning_errno(u
, r
, "Failed to initialize device control bpf program: %m");
1913 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore
1916 if (c
->device_allow
|| policy
!= CGROUP_DEVICE_POLICY_AUTO
)
1917 r
= cg_set_attribute("devices", crt
->cgroup_path
, "devices.deny", "a");
1919 r
= cg_set_attribute("devices", crt
->cgroup_path
, "devices.allow", "a");
1921 log_unit_full_errno(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
, -EPERM
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1922 "Failed to reset devices.allow/devices.deny: %m");
1925 bool allow_list_static
= policy
== CGROUP_DEVICE_POLICY_CLOSED
||
1926 (policy
== CGROUP_DEVICE_POLICY_AUTO
&& c
->device_allow
);
1929 if (allow_list_static
) {
1930 r
= bpf_devices_allow_list_static(prog
, crt
->cgroup_path
);
1935 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
1938 if (a
->permissions
== 0)
1941 if (path_startswith(a
->path
, "/dev/"))
1942 r
= bpf_devices_allow_list_device(prog
, crt
->cgroup_path
, a
->path
, a
->permissions
);
1943 else if ((val
= startswith(a
->path
, "block-")))
1944 r
= bpf_devices_allow_list_major(prog
, crt
->cgroup_path
, val
, 'b', a
->permissions
);
1945 else if ((val
= startswith(a
->path
, "char-")))
1946 r
= bpf_devices_allow_list_major(prog
, crt
->cgroup_path
, val
, 'c', a
->permissions
);
1948 log_unit_debug(u
, "Ignoring device '%s' while writing cgroup attribute.", a
->path
);
1957 log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENODEV
), "No devices matched by device filter.");
1959 /* The kernel verifier would reject a program we would build with the normal intro and outro
1960 but no allow-listing rules (outro would contain an unreachable instruction for successful
1962 policy
= CGROUP_DEVICE_POLICY_STRICT
;
1965 r
= bpf_devices_apply_policy(&prog
, policy
, any
, crt
->cgroup_path
, &crt
->bpf_device_control_installed
);
1967 static bool warned
= false;
1969 log_full_errno(warned
? LOG_DEBUG
: LOG_WARNING
, r
,
1970 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1971 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1972 "(This warning is only shown for the first loaded unit using device ACL.)", u
->id
);
1979 static void set_io_weight(Unit
*u
, uint64_t weight
) {
1980 char buf
[STRLEN("default \n")+DECIMAL_STR_MAX(uint64_t)];
1984 (void) set_bfq_weight(u
, "io", makedev(0, 0), weight
);
1986 xsprintf(buf
, "default %" PRIu64
"\n", weight
);
1987 (void) set_attribute_and_warn(u
, "io", "io.weight", buf
);
1990 static void set_blkio_weight(Unit
*u
, uint64_t weight
) {
1991 char buf
[STRLEN("\n")+DECIMAL_STR_MAX(uint64_t)];
1995 (void) set_bfq_weight(u
, "blkio", makedev(0, 0), weight
);
1997 xsprintf(buf
, "%" PRIu64
"\n", weight
);
1998 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight", buf
);
2001 static void cgroup_apply_bpf_foreign_program(Unit
*u
) {
2004 (void) bpf_foreign_install(u
);
2007 static void cgroup_context_apply(
2009 CGroupMask apply_mask
,
2010 ManagerState state
) {
2012 bool is_host_root
, is_local_root
;
2019 /* Nothing to do? Exit early! */
2020 if (apply_mask
== 0)
2023 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
2024 * attributes should only be managed for cgroups further down the tree. */
2025 is_local_root
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
2026 is_host_root
= unit_has_host_root_cgroup(u
);
2028 assert_se(c
= unit_get_cgroup_context(u
));
2030 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2031 if (!crt
|| !crt
->cgroup_path
)
2034 path
= crt
->cgroup_path
;
2036 if (is_local_root
) /* Make sure we don't try to display messages with an empty path. */
2039 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
2040 * then), and missing cgroups, i.e. EROFS and ENOENT. */
2042 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
2043 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
2044 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
2045 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
2046 * we couldn't even write to them if we wanted to). */
2047 if ((apply_mask
& CGROUP_MASK_CPU
) && !is_local_root
) {
2049 if (cg_all_unified() > 0) {
2052 if (cgroup_context_has_cpu_weight(c
))
2053 weight
= cgroup_context_cpu_weight(c
, state
);
2054 else if (cgroup_context_has_cpu_shares(c
)) {
2057 shares
= cgroup_context_cpu_shares(c
, state
);
2058 weight
= cgroup_cpu_shares_to_weight(shares
);
2060 log_cgroup_compat(u
, "Applying [Startup]CPUShares=%" PRIu64
" as [Startup]CPUWeight=%" PRIu64
" on %s",
2061 shares
, weight
, path
);
2063 weight
= CGROUP_WEIGHT_DEFAULT
;
2065 cgroup_apply_unified_cpu_idle(u
, weight
);
2066 cgroup_apply_unified_cpu_weight(u
, weight
);
2067 cgroup_apply_unified_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
2072 if (cgroup_context_has_cpu_weight(c
)) {
2075 weight
= cgroup_context_cpu_weight(c
, state
);
2076 shares
= cgroup_cpu_weight_to_shares(weight
);
2078 log_cgroup_compat(u
, "Applying [Startup]CPUWeight=%" PRIu64
" as [Startup]CPUShares=%" PRIu64
" on %s",
2079 weight
, shares
, path
);
2080 } else if (cgroup_context_has_cpu_shares(c
))
2081 shares
= cgroup_context_cpu_shares(c
, state
);
2083 shares
= CGROUP_CPU_SHARES_DEFAULT
;
2085 cgroup_apply_legacy_cpu_shares(u
, shares
);
2086 cgroup_apply_legacy_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
2090 if ((apply_mask
& CGROUP_MASK_CPUSET
) && !is_local_root
) {
2091 cgroup_apply_unified_cpuset(u
, cgroup_context_allowed_cpus(c
, state
), "cpuset.cpus");
2092 cgroup_apply_unified_cpuset(u
, cgroup_context_allowed_mems(c
, state
), "cpuset.mems");
2095 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
2096 * controller), and in case of containers we want to leave control of these attributes to the container manager
2097 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
2098 if ((apply_mask
& CGROUP_MASK_IO
) && !is_local_root
) {
2099 bool has_io
, has_blockio
;
2102 has_io
= cgroup_context_has_io_config(c
);
2103 has_blockio
= cgroup_context_has_blockio_config(c
);
2106 weight
= cgroup_context_io_weight(c
, state
);
2107 else if (has_blockio
) {
2108 uint64_t blkio_weight
;
2110 blkio_weight
= cgroup_context_blkio_weight(c
, state
);
2111 weight
= cgroup_weight_blkio_to_io(blkio_weight
);
2113 log_cgroup_compat(u
, "Applying [Startup]BlockIOWeight=%" PRIu64
" as [Startup]IOWeight=%" PRIu64
,
2114 blkio_weight
, weight
);
2116 weight
= CGROUP_WEIGHT_DEFAULT
;
2118 set_io_weight(u
, weight
);
2121 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
)
2122 cgroup_apply_io_device_weight(u
, w
->path
, w
->weight
);
2124 LIST_FOREACH(device_limits
, limit
, c
->io_device_limits
)
2125 cgroup_apply_io_device_limit(u
, limit
->path
, limit
->limits
);
2127 LIST_FOREACH(device_latencies
, latency
, c
->io_device_latencies
)
2128 cgroup_apply_io_device_latency(u
, latency
->path
, latency
->target_usec
);
2130 } else if (has_blockio
) {
2131 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
2132 weight
= cgroup_weight_blkio_to_io(w
->weight
);
2134 log_cgroup_compat(u
, "Applying BlockIODeviceWeight=%" PRIu64
" as IODeviceWeight=%" PRIu64
" for %s",
2135 w
->weight
, weight
, w
->path
);
2137 cgroup_apply_io_device_weight(u
, w
->path
, weight
);
2140 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
2141 uint64_t limits
[_CGROUP_IO_LIMIT_TYPE_MAX
];
2143 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
2144 limits
[type
] = cgroup_io_limit_defaults
[type
];
2146 limits
[CGROUP_IO_RBPS_MAX
] = b
->rbps
;
2147 limits
[CGROUP_IO_WBPS_MAX
] = b
->wbps
;
2149 log_cgroup_compat(u
, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as IO{Read|Write}BandwidthMax= for %s",
2150 b
->rbps
, b
->wbps
, b
->path
);
2152 cgroup_apply_io_device_limit(u
, b
->path
, limits
);
2157 if (apply_mask
& CGROUP_MASK_BLKIO
) {
2158 bool has_io
, has_blockio
;
2160 has_io
= cgroup_context_has_io_config(c
);
2161 has_blockio
= cgroup_context_has_blockio_config(c
);
2163 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
2164 * left to our container manager, too. */
2165 if (!is_local_root
) {
2171 io_weight
= cgroup_context_io_weight(c
, state
);
2172 weight
= cgroup_weight_io_to_blkio(cgroup_context_io_weight(c
, state
));
2174 log_cgroup_compat(u
, "Applying [Startup]IOWeight=%" PRIu64
" as [Startup]BlockIOWeight=%" PRIu64
,
2176 } else if (has_blockio
)
2177 weight
= cgroup_context_blkio_weight(c
, state
);
2179 weight
= CGROUP_BLKIO_WEIGHT_DEFAULT
;
2181 set_blkio_weight(u
, weight
);
2184 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
) {
2185 weight
= cgroup_weight_io_to_blkio(w
->weight
);
2187 log_cgroup_compat(u
, "Applying IODeviceWeight=%" PRIu64
" as BlockIODeviceWeight=%" PRIu64
" for %s",
2188 w
->weight
, weight
, w
->path
);
2190 cgroup_apply_blkio_device_weight(u
, w
->path
, weight
);
2192 else if (has_blockio
)
2193 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
2194 cgroup_apply_blkio_device_weight(u
, w
->path
, w
->weight
);
2197 /* The bandwidth limits are something that make sense to be applied to the host's root but not container
2198 * roots, as there we want the container manager to handle it */
2199 if (is_host_root
|| !is_local_root
) {
2201 LIST_FOREACH(device_limits
, l
, c
->io_device_limits
) {
2202 log_cgroup_compat(u
, "Applying IO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as BlockIO{Read|Write}BandwidthMax= for %s",
2203 l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
], l
->path
);
2205 cgroup_apply_blkio_device_limit(u
, l
->path
, l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
]);
2207 else if (has_blockio
)
2208 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
)
2209 cgroup_apply_blkio_device_limit(u
, b
->path
, b
->rbps
, b
->wbps
);
2213 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
2214 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
2215 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
2216 * write to this if we wanted to.) */
2217 if ((apply_mask
& CGROUP_MASK_MEMORY
) && !is_local_root
) {
2219 if (cg_all_unified() > 0) {
2220 uint64_t max
, swap_max
= CGROUP_LIMIT_MAX
, zswap_max
= CGROUP_LIMIT_MAX
, high
= CGROUP_LIMIT_MAX
;
2222 if (unit_has_unified_memory_config(u
)) {
2223 bool startup
= IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
);
2225 high
= startup
&& c
->startup_memory_high_set
? c
->startup_memory_high
: c
->memory_high
;
2226 max
= startup
&& c
->startup_memory_max_set
? c
->startup_memory_max
: c
->memory_max
;
2227 swap_max
= startup
&& c
->startup_memory_swap_max_set
? c
->startup_memory_swap_max
: c
->memory_swap_max
;
2228 zswap_max
= startup
&& c
->startup_memory_zswap_max_set
? c
->startup_memory_zswap_max
: c
->memory_zswap_max
;
2230 max
= c
->memory_limit
;
2232 if (max
!= CGROUP_LIMIT_MAX
)
2233 log_cgroup_compat(u
, "Applying MemoryLimit=%" PRIu64
" as MemoryMax=", max
);
2236 cgroup_apply_unified_memory_limit(u
, "memory.min", unit_get_ancestor_memory_min(u
));
2237 cgroup_apply_unified_memory_limit(u
, "memory.low", unit_get_ancestor_memory_low(u
));
2238 cgroup_apply_unified_memory_limit(u
, "memory.high", high
);
2239 cgroup_apply_unified_memory_limit(u
, "memory.max", max
);
2240 cgroup_apply_unified_memory_limit(u
, "memory.swap.max", swap_max
);
2241 cgroup_apply_unified_memory_limit(u
, "memory.zswap.max", zswap_max
);
2243 (void) set_attribute_and_warn(u
, "memory", "memory.oom.group", one_zero(c
->memory_oom_group
));
2244 (void) set_attribute_and_warn(u
, "memory", "memory.zswap.writeback", one_zero(c
->memory_zswap_writeback
));
2247 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
2250 if (unit_has_unified_memory_config(u
)) {
2251 val
= c
->memory_max
;
2252 if (val
!= CGROUP_LIMIT_MAX
)
2253 log_cgroup_compat(u
, "Applying MemoryMax=%" PRIu64
" as MemoryLimit=", val
);
2255 val
= c
->memory_limit
;
2257 if (val
== CGROUP_LIMIT_MAX
)
2258 strncpy(buf
, "-1\n", sizeof(buf
));
2260 xsprintf(buf
, "%" PRIu64
"\n", val
);
2262 (void) set_attribute_and_warn(u
, "memory", "memory.limit_in_bytes", buf
);
2266 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
2267 * containers, where we leave this to the manager */
2268 if ((apply_mask
& (CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
)) &&
2269 (is_host_root
|| cg_all_unified() > 0 || !is_local_root
))
2270 (void) cgroup_apply_devices(u
);
2272 if (apply_mask
& CGROUP_MASK_PIDS
) {
2275 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
2276 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
2277 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
2278 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
2279 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
2280 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
2281 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
2282 * it also counts. But if the user never set a limit through us (i.e. we are the default of
2283 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
2284 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
2285 * which is desirable so that there's an official way to release control of the sysctl from
2286 * systemd: set the limit to unbounded and reload. */
2288 if (cgroup_tasks_max_isset(&c
->tasks_max
)) {
2289 u
->manager
->sysctl_pid_max_changed
= true;
2290 r
= procfs_tasks_set_limit(cgroup_tasks_max_resolve(&c
->tasks_max
));
2291 } else if (u
->manager
->sysctl_pid_max_changed
)
2292 r
= procfs_tasks_set_limit(TASKS_MAX
);
2296 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
,
2297 "Failed to write to tasks limit sysctls: %m");
2300 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
2301 * leave it for the container manager. */
2302 if (!is_local_root
) {
2303 if (cgroup_tasks_max_isset(&c
->tasks_max
)) {
2304 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
2306 xsprintf(buf
, "%" PRIu64
"\n", cgroup_tasks_max_resolve(&c
->tasks_max
));
2307 (void) set_attribute_and_warn(u
, "pids", "pids.max", buf
);
2309 (void) set_attribute_and_warn(u
, "pids", "pids.max", "max\n");
2313 if (apply_mask
& CGROUP_MASK_BPF_FIREWALL
)
2314 cgroup_apply_firewall(u
);
2316 if (apply_mask
& CGROUP_MASK_BPF_FOREIGN
)
2317 cgroup_apply_bpf_foreign_program(u
);
2319 if (apply_mask
& CGROUP_MASK_BPF_SOCKET_BIND
)
2320 cgroup_apply_socket_bind(u
);
2322 if (apply_mask
& CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES
)
2323 cgroup_apply_restrict_network_interfaces(u
);
2325 unit_modify_nft_set(u
, /* add = */ true);
2328 static bool unit_get_needs_bpf_firewall(Unit
*u
) {
2332 c
= unit_get_cgroup_context(u
);
2336 if (c
->ip_accounting
||
2337 !set_isempty(c
->ip_address_allow
) ||
2338 !set_isempty(c
->ip_address_deny
) ||
2339 c
->ip_filters_ingress
||
2340 c
->ip_filters_egress
)
2343 /* If any parent slice has an IP access list defined, it applies too */
2344 for (Unit
*p
= UNIT_GET_SLICE(u
); p
; p
= UNIT_GET_SLICE(p
)) {
2345 c
= unit_get_cgroup_context(p
);
2349 if (!set_isempty(c
->ip_address_allow
) ||
2350 !set_isempty(c
->ip_address_deny
))
2357 static bool unit_get_needs_bpf_foreign_program(Unit
*u
) {
2361 c
= unit_get_cgroup_context(u
);
2365 return !!c
->bpf_foreign_programs
;
2368 static bool unit_get_needs_socket_bind(Unit
*u
) {
2372 c
= unit_get_cgroup_context(u
);
2376 return c
->socket_bind_allow
|| c
->socket_bind_deny
;
2379 static bool unit_get_needs_restrict_network_interfaces(Unit
*u
) {
2383 c
= unit_get_cgroup_context(u
);
2387 return !set_isempty(c
->restrict_network_interfaces
);
2390 static CGroupMask
unit_get_cgroup_mask(Unit
*u
) {
2391 CGroupMask mask
= 0;
2396 assert_se(c
= unit_get_cgroup_context(u
));
2398 /* Figure out which controllers we need, based on the cgroup context object */
2400 if (c
->cpu_accounting
)
2401 mask
|= get_cpu_accounting_mask();
2403 if (cgroup_context_has_cpu_weight(c
) ||
2404 cgroup_context_has_cpu_shares(c
) ||
2405 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
2406 mask
|= CGROUP_MASK_CPU
;
2408 if (cgroup_context_has_allowed_cpus(c
) || cgroup_context_has_allowed_mems(c
))
2409 mask
|= CGROUP_MASK_CPUSET
;
2411 if (cgroup_context_has_io_config(c
) || cgroup_context_has_blockio_config(c
))
2412 mask
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
2414 if (c
->memory_accounting
||
2415 c
->memory_limit
!= CGROUP_LIMIT_MAX
||
2416 unit_has_unified_memory_config(u
))
2417 mask
|= CGROUP_MASK_MEMORY
;
2419 if (c
->device_allow
||
2420 c
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
)
2421 mask
|= CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
;
2423 if (c
->tasks_accounting
||
2424 cgroup_tasks_max_isset(&c
->tasks_max
))
2425 mask
|= CGROUP_MASK_PIDS
;
2427 return CGROUP_MASK_EXTEND_JOINED(mask
);
2430 static CGroupMask
unit_get_bpf_mask(Unit
*u
) {
2431 CGroupMask mask
= 0;
2433 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
2436 if (unit_get_needs_bpf_firewall(u
))
2437 mask
|= CGROUP_MASK_BPF_FIREWALL
;
2439 if (unit_get_needs_bpf_foreign_program(u
))
2440 mask
|= CGROUP_MASK_BPF_FOREIGN
;
2442 if (unit_get_needs_socket_bind(u
))
2443 mask
|= CGROUP_MASK_BPF_SOCKET_BIND
;
2445 if (unit_get_needs_restrict_network_interfaces(u
))
2446 mask
|= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES
;
2451 CGroupMask
unit_get_own_mask(Unit
*u
) {
2454 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
2455 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
2457 if (u
->load_state
!= UNIT_LOADED
)
2460 c
= unit_get_cgroup_context(u
);
2464 return unit_get_cgroup_mask(u
) | unit_get_bpf_mask(u
) | unit_get_delegate_mask(u
);
2467 CGroupMask
unit_get_delegate_mask(Unit
*u
) {
2470 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
2471 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
2473 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
2475 if (!unit_cgroup_delegate(u
))
2478 if (cg_all_unified() <= 0) {
2481 e
= unit_get_exec_context(u
);
2482 if (e
&& !exec_context_maintains_privileges(e
))
2486 assert_se(c
= unit_get_cgroup_context(u
));
2487 return CGROUP_MASK_EXTEND_JOINED(c
->delegate_controllers
);
2490 static CGroupMask
unit_get_subtree_mask(Unit
*u
) {
2492 /* Returns the mask of this subtree, meaning of the group
2493 * itself and its children. */
2495 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
2498 CGroupMask
unit_get_members_mask(Unit
*u
) {
2501 /* Returns the mask of controllers all of the unit's children require, merged */
2503 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2504 if (crt
&& crt
->cgroup_members_mask_valid
)
2505 return crt
->cgroup_members_mask
; /* Use cached value if possible */
2508 if (u
->type
== UNIT_SLICE
) {
2511 UNIT_FOREACH_DEPENDENCY(member
, u
, UNIT_ATOM_SLICE_OF
)
2512 m
|= unit_get_subtree_mask(member
); /* note that this calls ourselves again, for the children */
2516 crt
->cgroup_members_mask
= m
;
2517 crt
->cgroup_members_mask_valid
= true;
2523 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
2527 /* Returns the mask of controllers all of the unit's siblings
2528 * require, i.e. the members mask of the unit's parent slice
2529 * if there is one. */
2531 slice
= UNIT_GET_SLICE(u
);
2533 return unit_get_members_mask(slice
);
2535 return unit_get_subtree_mask(u
); /* we are the top-level slice */
2538 static CGroupMask
unit_get_disable_mask(Unit
*u
) {
2541 c
= unit_get_cgroup_context(u
);
2545 return c
->disable_controllers
;
2548 CGroupMask
unit_get_ancestor_disable_mask(Unit
*u
) {
2553 mask
= unit_get_disable_mask(u
);
2555 /* Returns the mask of controllers which are marked as forcibly
2556 * disabled in any ancestor unit or the unit in question. */
2558 slice
= UNIT_GET_SLICE(u
);
2560 mask
|= unit_get_ancestor_disable_mask(slice
);
2565 CGroupMask
unit_get_target_mask(Unit
*u
) {
2566 CGroupMask own_mask
, mask
;
2568 /* This returns the cgroup mask of all controllers to enable for a specific cgroup, i.e. everything
2569 * it needs itself, plus all that its children need, plus all that its siblings need. This is
2570 * primarily useful on the legacy cgroup hierarchy, where we need to duplicate each cgroup in each
2571 * hierarchy that shall be enabled for it. */
2573 own_mask
= unit_get_own_mask(u
);
2575 if (own_mask
& CGROUP_MASK_BPF_FIREWALL
& ~u
->manager
->cgroup_supported
)
2576 emit_bpf_firewall_warning(u
);
2578 mask
= own_mask
| unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
2580 mask
&= u
->manager
->cgroup_supported
;
2581 mask
&= ~unit_get_ancestor_disable_mask(u
);
2586 CGroupMask
unit_get_enable_mask(Unit
*u
) {
2589 /* This returns the cgroup mask of all controllers to enable
2590 * for the children of a specific cgroup. This is primarily
2591 * useful for the unified cgroup hierarchy, where each cgroup
2592 * controls which controllers are enabled for its children. */
2594 mask
= unit_get_members_mask(u
);
2595 mask
&= u
->manager
->cgroup_supported
;
2596 mask
&= ~unit_get_ancestor_disable_mask(u
);
2601 void unit_invalidate_cgroup_members_masks(Unit
*u
) {
2606 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2610 /* Recurse invalidate the member masks cache all the way up the tree */
2611 crt
->cgroup_members_mask_valid
= false;
2613 slice
= UNIT_GET_SLICE(u
);
2615 unit_invalidate_cgroup_members_masks(slice
);
2618 const char *unit_get_realized_cgroup_path(Unit
*u
, CGroupMask mask
) {
2620 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
2623 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2626 crt
->cgroup_realized
&&
2627 FLAGS_SET(crt
->cgroup_realized_mask
, mask
))
2628 return crt
->cgroup_path
;
2630 u
= UNIT_GET_SLICE(u
);
2636 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
2637 /* If not realized at all, migrate to root ("").
2638 * It may happen if we're upgrading from older version that didn't clean up.
2640 return strempty(unit_get_realized_cgroup_path(userdata
, mask
));
2643 int unit_default_cgroup_path(const Unit
*u
, char **ret
) {
2644 _cleanup_free_
char *p
= NULL
;
2650 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
2651 p
= strdup(u
->manager
->cgroup_root
);
2653 _cleanup_free_
char *escaped
= NULL
, *slice_path
= NULL
;
2656 slice
= UNIT_GET_SLICE(u
);
2657 if (slice
&& !unit_has_name(slice
, SPECIAL_ROOT_SLICE
)) {
2658 r
= cg_slice_to_path(slice
->id
, &slice_path
);
2663 r
= cg_escape(u
->id
, &escaped
);
2667 p
= path_join(empty_to_root(u
->manager
->cgroup_root
), slice_path
, escaped
);
2676 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
2677 _cleanup_free_
char *p
= NULL
;
2683 crt
= unit_get_cgroup_runtime(u
);
2685 if (crt
&& streq_ptr(crt
->cgroup_path
, path
))
2688 unit_release_cgroup(u
);
2690 crt
= unit_setup_cgroup_runtime(u
);
2699 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
2704 assert(!crt
->cgroup_path
);
2705 crt
->cgroup_path
= TAKE_PTR(p
);
2710 int unit_watch_cgroup(Unit
*u
) {
2711 _cleanup_free_
char *events
= NULL
;
2716 /* Watches the "cgroups.events" attribute of this unit's cgroup for "empty" events, but only if
2717 * cgroupv2 is available. */
2719 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2720 if (!crt
|| !crt
->cgroup_path
)
2723 if (crt
->cgroup_control_inotify_wd
>= 0)
2726 /* Only applies to the unified hierarchy */
2727 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2729 return log_error_errno(r
, "Failed to determine whether the name=systemd hierarchy is unified: %m");
2733 /* No point in watch the top-level slice, it's never going to run empty. */
2734 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
2737 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_control_inotify_wd_unit
, &trivial_hash_ops
);
2741 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, crt
->cgroup_path
, "cgroup.events", &events
);
2745 crt
->cgroup_control_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
2746 if (crt
->cgroup_control_inotify_wd
< 0) {
2748 if (errno
== ENOENT
) /* If the directory is already gone we don't need to track it, so this
2749 * is not an error */
2752 return log_unit_error_errno(u
, errno
, "Failed to add control inotify watch descriptor for control group %s: %m", empty_to_root(crt
->cgroup_path
));
2755 r
= hashmap_put(u
->manager
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(crt
->cgroup_control_inotify_wd
), u
);
2757 return log_unit_error_errno(u
, r
, "Failed to add control inotify watch descriptor for control group %s to hash map: %m", empty_to_root(crt
->cgroup_path
));
2762 int unit_watch_cgroup_memory(Unit
*u
) {
2763 _cleanup_free_
char *events
= NULL
;
2768 /* Watches the "memory.events" attribute of this unit's cgroup for "oom_kill" events, but only if
2769 * cgroupv2 is available. */
2771 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2772 if (!crt
|| !crt
->cgroup_path
)
2775 CGroupContext
*c
= unit_get_cgroup_context(u
);
2779 /* The "memory.events" attribute is only available if the memory controller is on. Let's hence tie
2780 * this to memory accounting, in a way watching for OOM kills is a form of memory accounting after
2782 if (!c
->memory_accounting
)
2785 /* Don't watch inner nodes, as the kernel doesn't report oom_kill events recursively currently, and
2786 * we also don't want to generate a log message for each parent cgroup of a process. */
2787 if (u
->type
== UNIT_SLICE
)
2790 if (crt
->cgroup_memory_inotify_wd
>= 0)
2793 /* Only applies to the unified hierarchy */
2794 r
= cg_all_unified();
2796 return log_error_errno(r
, "Failed to determine whether the memory controller is unified: %m");
2800 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_memory_inotify_wd_unit
, &trivial_hash_ops
);
2804 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, crt
->cgroup_path
, "memory.events", &events
);
2808 crt
->cgroup_memory_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
2809 if (crt
->cgroup_memory_inotify_wd
< 0) {
2811 if (errno
== ENOENT
) /* If the directory is already gone we don't need to track it, so this
2812 * is not an error */
2815 return log_unit_error_errno(u
, errno
, "Failed to add memory inotify watch descriptor for control group %s: %m", empty_to_root(crt
->cgroup_path
));
2818 r
= hashmap_put(u
->manager
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(crt
->cgroup_memory_inotify_wd
), u
);
2820 return log_unit_error_errno(u
, r
, "Failed to add memory inotify watch descriptor for control group %s to hash map: %m", empty_to_root(crt
->cgroup_path
));
2825 int unit_pick_cgroup_path(Unit
*u
) {
2826 _cleanup_free_
char *path
= NULL
;
2831 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2834 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
2837 if (crt
->cgroup_path
)
2840 r
= unit_default_cgroup_path(u
, &path
);
2842 return log_unit_error_errno(u
, r
, "Failed to generate default cgroup path: %m");
2844 r
= unit_set_cgroup_path(u
, path
);
2846 return log_unit_error_errno(u
, r
, "Control group %s exists already.", empty_to_root(path
));
2848 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", empty_to_root(path
));
2853 static int unit_update_cgroup(
2855 CGroupMask target_mask
,
2856 CGroupMask enable_mask
,
2857 ManagerState state
) {
2859 bool created
, is_root_slice
;
2860 CGroupMask migrate_mask
= 0;
2861 _cleanup_free_
char *cgroup_full_path
= NULL
;
2866 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2869 /* Figure out our cgroup path */
2870 r
= unit_pick_cgroup_path(u
);
2874 CGroupRuntime
*crt
= ASSERT_PTR(unit_get_cgroup_runtime(u
));
2876 /* First, create our own group */
2877 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, crt
->cgroup_path
);
2879 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", empty_to_root(crt
->cgroup_path
));
2882 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
2883 uint64_t cgroup_id
= 0;
2885 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, crt
->cgroup_path
, NULL
, &cgroup_full_path
);
2887 r
= cg_path_get_cgroupid(cgroup_full_path
, &cgroup_id
);
2889 log_unit_full_errno(u
, ERRNO_IS_NOT_SUPPORTED(r
) ? LOG_DEBUG
: LOG_WARNING
, r
,
2890 "Failed to get cgroup ID of cgroup %s, ignoring: %m", cgroup_full_path
);
2892 log_unit_warning_errno(u
, r
, "Failed to get full cgroup path on cgroup %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
2894 crt
->cgroup_id
= cgroup_id
;
2897 /* Start watching it */
2898 (void) unit_watch_cgroup(u
);
2899 (void) unit_watch_cgroup_memory(u
);
2901 /* For v2 we preserve enabled controllers in delegated units, adjust others,
2902 * for v1 we figure out which controller hierarchies need migration. */
2903 if (created
|| !crt
->cgroup_realized
|| !unit_cgroup_delegate(u
)) {
2904 CGroupMask result_mask
= 0;
2906 /* Enable all controllers we need */
2907 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, crt
->cgroup_path
, &result_mask
);
2909 log_unit_warning_errno(u
, r
, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
2911 /* Remember what's actually enabled now */
2912 crt
->cgroup_enabled_mask
= result_mask
;
2914 migrate_mask
= crt
->cgroup_realized_mask
^ target_mask
;
2917 /* Keep track that this is now realized */
2918 crt
->cgroup_realized
= true;
2919 crt
->cgroup_realized_mask
= target_mask
;
2921 /* Migrate processes in controller hierarchies both downwards (enabling) and upwards (disabling).
2923 * Unnecessary controller cgroups are trimmed (after emptied by upward migration).
2924 * We perform migration also with whole slices for cases when users don't care about leave
2925 * granularity. Since delegated_mask is subset of target mask, we won't trim slice subtree containing
2928 if (cg_all_unified() == 0) {
2929 r
= cg_migrate_v1_controllers(u
->manager
->cgroup_supported
, migrate_mask
, crt
->cgroup_path
, migrate_callback
, u
);
2931 log_unit_warning_errno(u
, r
, "Failed to migrate controller cgroups from %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
2933 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
2934 r
= cg_trim_v1_controllers(u
->manager
->cgroup_supported
, ~target_mask
, crt
->cgroup_path
, !is_root_slice
);
2936 log_unit_warning_errno(u
, r
, "Failed to delete controller cgroups %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
2939 /* Set attributes */
2940 cgroup_context_apply(u
, target_mask
, state
);
2941 cgroup_xattr_apply(u
);
2943 /* For most units we expect that memory monitoring is set up before the unit is started and we won't
2944 * touch it after. For PID 1 this is different though, because we couldn't possibly do that given
2945 * that PID 1 runs before init.scope is even set up. Hence, whenever init.scope is realized, let's
2946 * try to open the memory pressure interface anew. */
2947 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
))
2948 (void) manager_setup_memory_pressure_event_source(u
->manager
);
2953 static int unit_attach_pid_to_cgroup_via_bus(Unit
*u
, pid_t pid
, const char *suffix_path
) {
2954 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2960 if (MANAGER_IS_SYSTEM(u
->manager
))
2963 if (!u
->manager
->system_bus
)
2966 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2967 if (!crt
|| !crt
->cgroup_path
)
2970 /* Determine this unit's cgroup path relative to our cgroup root */
2971 pp
= path_startswith(crt
->cgroup_path
, u
->manager
->cgroup_root
);
2975 pp
= strjoina("/", pp
, suffix_path
);
2978 r
= bus_call_method(u
->manager
->system_bus
,
2980 "AttachProcessesToUnit",
2983 NULL
/* empty unit name means client's unit, i.e. us */, pp
, 1, (uint32_t) pid
);
2985 return log_unit_debug_errno(u
, r
, "Failed to attach unit process " PID_FMT
" via the bus: %s", pid
, bus_error_message(&error
, r
));
2990 int unit_attach_pids_to_cgroup(Unit
*u
, Set
*pids
, const char *suffix_path
) {
2991 _cleanup_free_
char *joined
= NULL
;
2992 CGroupMask delegated_mask
;
2999 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3002 if (set_isempty(pids
))
3005 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
3006 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
3007 r
= bpf_firewall_load_custom(u
);
3011 r
= unit_realize_cgroup(u
);
3015 CGroupRuntime
*crt
= ASSERT_PTR(unit_get_cgroup_runtime(u
));
3017 if (isempty(suffix_path
))
3018 p
= crt
->cgroup_path
;
3020 joined
= path_join(crt
->cgroup_path
, suffix_path
);
3027 delegated_mask
= unit_get_delegate_mask(u
);
3030 SET_FOREACH(pid
, pids
) {
3032 /* Unfortunately we cannot add pids by pidfd to a cgroup. Hence we have to use PIDs instead,
3033 * which of course is racy. Let's shorten the race a bit though, and re-validate the PID
3034 * before we use it */
3035 r
= pidref_verify(pid
);
3037 log_unit_info_errno(u
, r
, "PID " PID_FMT
" vanished before we could move it to target cgroup '%s', skipping: %m", pid
->pid
, empty_to_root(p
));
3041 /* First, attach the PID to the main cgroup hierarchy */
3042 r
= cg_attach(SYSTEMD_CGROUP_CONTROLLER
, p
, pid
->pid
);
3044 bool again
= MANAGER_IS_USER(u
->manager
) && ERRNO_IS_PRIVILEGE(r
);
3046 log_unit_full_errno(u
, again
? LOG_DEBUG
: LOG_INFO
, r
,
3047 "Couldn't move process "PID_FMT
" to%s requested cgroup '%s': %m",
3048 pid
->pid
, again
? " directly" : "", empty_to_root(p
));
3053 /* If we are in a user instance, and we can't move the process ourselves due
3054 * to permission problems, let's ask the system instance about it instead.
3055 * Since it's more privileged it might be able to move the process across the
3056 * leaves of a subtree whose top node is not owned by us. */
3058 z
= unit_attach_pid_to_cgroup_via_bus(u
, pid
->pid
, suffix_path
);
3060 log_unit_info_errno(u
, z
, "Couldn't move process "PID_FMT
" to requested cgroup '%s' (directly or via the system bus): %m", pid
->pid
, empty_to_root(p
));
3063 ret
++; /* Count successful additions */
3064 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
3069 ret
= r
; /* Remember first error */
3072 } else if (ret
>= 0)
3073 ret
++; /* Count successful additions */
3075 r
= cg_all_unified();
3081 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
3082 * innermost realized one */
3084 for (CGroupController c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
3085 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
3086 const char *realized
;
3088 if (!(u
->manager
->cgroup_supported
& bit
))
3091 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
3092 if (delegated_mask
& crt
->cgroup_realized_mask
& bit
) {
3093 r
= cg_attach(cgroup_controller_to_string(c
), p
, pid
->pid
);
3095 continue; /* Success! */
3097 log_unit_debug_errno(u
, r
, "Failed to attach PID " PID_FMT
" to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
3098 pid
->pid
, empty_to_root(p
), cgroup_controller_to_string(c
));
3101 /* So this controller is either not delegate or realized, or something else weird happened. In
3102 * that case let's attach the PID at least to the closest cgroup up the tree that is
3104 realized
= unit_get_realized_cgroup_path(u
, bit
);
3106 continue; /* Not even realized in the root slice? Then let's not bother */
3108 r
= cg_attach(cgroup_controller_to_string(c
), realized
, pid
->pid
);
3110 log_unit_debug_errno(u
, r
, "Failed to attach PID " PID_FMT
" to realized cgroup %s in controller %s, ignoring: %m",
3111 pid
->pid
, realized
, cgroup_controller_to_string(c
));
3118 static bool unit_has_mask_realized(
3120 CGroupMask target_mask
,
3121 CGroupMask enable_mask
) {
3125 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3129 /* Returns true if this unit is fully realized. We check four things:
3131 * 1. Whether the cgroup was created at all
3132 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
3133 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
3134 * 4. Whether the invalidation mask is currently zero
3136 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
3137 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
3138 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
3139 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
3140 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
3141 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
3142 * simply don't matter. */
3144 return crt
->cgroup_realized
&&
3145 ((crt
->cgroup_realized_mask
^ target_mask
) & CGROUP_MASK_V1
) == 0 &&
3146 ((crt
->cgroup_enabled_mask
^ enable_mask
) & CGROUP_MASK_V2
) == 0 &&
3147 crt
->cgroup_invalidated_mask
== 0;
3150 static bool unit_has_mask_disables_realized(
3152 CGroupMask target_mask
,
3153 CGroupMask enable_mask
) {
3157 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3161 /* Returns true if all controllers which should be disabled are indeed disabled.
3163 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
3164 * already removed. */
3166 return !crt
->cgroup_realized
||
3167 (FLAGS_SET(crt
->cgroup_realized_mask
, target_mask
& CGROUP_MASK_V1
) &&
3168 FLAGS_SET(crt
->cgroup_enabled_mask
, enable_mask
& CGROUP_MASK_V2
));
3171 static bool unit_has_mask_enables_realized(
3173 CGroupMask target_mask
,
3174 CGroupMask enable_mask
) {
3178 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3182 /* Returns true if all controllers which should be enabled are indeed enabled.
3184 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
3185 * we want to add is already added. */
3187 return crt
->cgroup_realized
&&
3188 ((crt
->cgroup_realized_mask
| target_mask
) & CGROUP_MASK_V1
) == (crt
->cgroup_realized_mask
& CGROUP_MASK_V1
) &&
3189 ((crt
->cgroup_enabled_mask
| enable_mask
) & CGROUP_MASK_V2
) == (crt
->cgroup_enabled_mask
& CGROUP_MASK_V2
);
3192 void unit_add_to_cgroup_realize_queue(Unit
*u
) {
3195 if (u
->in_cgroup_realize_queue
)
3198 LIST_APPEND(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
3199 u
->in_cgroup_realize_queue
= true;
3202 static void unit_remove_from_cgroup_realize_queue(Unit
*u
) {
3205 if (!u
->in_cgroup_realize_queue
)
3208 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
3209 u
->in_cgroup_realize_queue
= false;
3212 /* Controllers can only be enabled breadth-first, from the root of the
3213 * hierarchy downwards to the unit in question. */
3214 static int unit_realize_cgroup_now_enable(Unit
*u
, ManagerState state
) {
3215 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
3221 /* First go deal with this unit's parent, or we won't be able to enable
3222 * any new controllers at this layer. */
3223 slice
= UNIT_GET_SLICE(u
);
3225 r
= unit_realize_cgroup_now_enable(slice
, state
);
3230 target_mask
= unit_get_target_mask(u
);
3231 enable_mask
= unit_get_enable_mask(u
);
3233 /* We can only enable in this direction, don't try to disable anything.
3235 if (unit_has_mask_enables_realized(u
, target_mask
, enable_mask
))
3238 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3240 new_target_mask
= (crt
? crt
->cgroup_realized_mask
: 0) | target_mask
;
3241 new_enable_mask
= (crt
? crt
->cgroup_enabled_mask
: 0) | enable_mask
;
3243 return unit_update_cgroup(u
, new_target_mask
, new_enable_mask
, state
);
3246 /* Controllers can only be disabled depth-first, from the leaves of the
3247 * hierarchy upwards to the unit in question. */
3248 static int unit_realize_cgroup_now_disable(Unit
*u
, ManagerState state
) {
3253 if (u
->type
!= UNIT_SLICE
)
3256 UNIT_FOREACH_DEPENDENCY(m
, u
, UNIT_ATOM_SLICE_OF
) {
3257 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
3260 CGroupRuntime
*rt
= unit_get_cgroup_runtime(m
);
3264 /* The cgroup for this unit might not actually be fully realised yet, in which case it isn't
3265 * holding any controllers open anyway. */
3266 if (!rt
->cgroup_realized
)
3269 /* We must disable those below us first in order to release the controller. */
3270 if (m
->type
== UNIT_SLICE
)
3271 (void) unit_realize_cgroup_now_disable(m
, state
);
3273 target_mask
= unit_get_target_mask(m
);
3274 enable_mask
= unit_get_enable_mask(m
);
3276 /* We can only disable in this direction, don't try to enable anything. */
3277 if (unit_has_mask_disables_realized(m
, target_mask
, enable_mask
))
3280 new_target_mask
= rt
->cgroup_realized_mask
& target_mask
;
3281 new_enable_mask
= rt
->cgroup_enabled_mask
& enable_mask
;
3283 r
= unit_update_cgroup(m
, new_target_mask
, new_enable_mask
, state
);
3291 /* Check if necessary controllers and attributes for a unit are in place.
3293 * - If so, do nothing.
3294 * - If not, create paths, move processes over, and set attributes.
3296 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
3297 * a depth-first way. As such the process looks like this:
3299 * Suppose we have a cgroup hierarchy which looks like this:
3312 * 1. We want to realise cgroup "d" now.
3313 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
3314 * 3. cgroup "k" just started requesting the memory controller.
3316 * To make this work we must do the following in order:
3318 * 1. Disable CPU controller in k, j
3319 * 2. Disable CPU controller in d
3320 * 3. Enable memory controller in root
3321 * 4. Enable memory controller in a
3322 * 5. Enable memory controller in d
3323 * 6. Enable memory controller in k
3325 * Notice that we need to touch j in one direction, but not the other. We also
3326 * don't go beyond d when disabling -- it's up to "a" to get realized if it
3327 * wants to disable further. The basic rules are therefore:
3329 * - If you're disabling something, you need to realise all of the cgroups from
3330 * your recursive descendants to the root. This starts from the leaves.
3331 * - If you're enabling something, you need to realise from the root cgroup
3332 * downwards, but you don't need to iterate your recursive descendants.
3334 * Returns 0 on success and < 0 on failure. */
3335 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
3336 CGroupMask target_mask
, enable_mask
;
3342 unit_remove_from_cgroup_realize_queue(u
);
3344 target_mask
= unit_get_target_mask(u
);
3345 enable_mask
= unit_get_enable_mask(u
);
3347 if (unit_has_mask_realized(u
, target_mask
, enable_mask
))
3350 /* Disable controllers below us, if there are any */
3351 r
= unit_realize_cgroup_now_disable(u
, state
);
3355 /* Enable controllers above us, if there are any */
3356 slice
= UNIT_GET_SLICE(u
);
3358 r
= unit_realize_cgroup_now_enable(slice
, state
);
3363 /* Now actually deal with the cgroup we were trying to realise and set attributes */
3364 r
= unit_update_cgroup(u
, target_mask
, enable_mask
, state
);
3368 CGroupRuntime
*crt
= ASSERT_PTR(unit_get_cgroup_runtime(u
));
3370 /* Now, reset the invalidation mask */
3371 crt
->cgroup_invalidated_mask
= 0;
3375 unsigned manager_dispatch_cgroup_realize_queue(Manager
*m
) {
3383 state
= manager_state(m
);
3385 while ((i
= m
->cgroup_realize_queue
)) {
3386 assert(i
->in_cgroup_realize_queue
);
3388 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i
))) {
3389 /* Maybe things changed, and the unit is not actually active anymore? */
3390 unit_remove_from_cgroup_realize_queue(i
);
3394 r
= unit_realize_cgroup_now(i
, state
);
3396 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
3404 void unit_add_family_to_cgroup_realize_queue(Unit
*u
) {
3406 assert(u
->type
== UNIT_SLICE
);
3408 /* Family of a unit for is defined as (immediate) children of the unit and immediate children of all
3411 * Ideally we would enqueue ancestor path only (bottom up). However, on cgroup-v1 scheduling becomes
3412 * very weird if two units that own processes reside in the same slice, but one is realized in the
3413 * "cpu" hierarchy and one is not (for example because one has CPUWeight= set and the other does
3414 * not), because that means individual processes need to be scheduled against whole cgroups. Let's
3415 * avoid this asymmetry by always ensuring that siblings of a unit are always realized in their v1
3416 * controller hierarchies too (if unit requires the controller to be realized).
3418 * The function must invalidate cgroup_members_mask of all ancestors in order to calculate up to date
3422 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3424 /* Children of u likely changed when we're called */
3426 crt
->cgroup_members_mask_valid
= false;
3429 UNIT_FOREACH_DEPENDENCY(m
, u
, UNIT_ATOM_SLICE_OF
) {
3431 /* No point in doing cgroup application for units without active processes. */
3432 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
3435 /* We only enqueue siblings if they were realized once at least, in the main
3437 crt
= unit_get_cgroup_runtime(m
);
3438 if (!crt
|| !crt
->cgroup_realized
)
3441 /* If the unit doesn't need any new controllers and has current ones
3442 * realized, it doesn't need any changes. */
3443 if (unit_has_mask_realized(m
,
3444 unit_get_target_mask(m
),
3445 unit_get_enable_mask(m
)))
3448 unit_add_to_cgroup_realize_queue(m
);
3451 /* Parent comes after children */
3452 unit_add_to_cgroup_realize_queue(u
);
3454 u
= UNIT_GET_SLICE(u
);
3458 int unit_realize_cgroup(Unit
*u
) {
3463 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3466 /* So, here's the deal: when realizing the cgroups for this unit, we need to first create all
3467 * parents, but there's more actually: for the weight-based controllers we also need to make sure
3468 * that all our siblings (i.e. units that are in the same slice as we are) have cgroups, too. On the
3469 * other hand, when a controller is removed from realized set, it may become unnecessary in siblings
3470 * and ancestors and they should be (de)realized too.
3472 * This call will defer work on the siblings and derealized ancestors to the next event loop
3473 * iteration and synchronously creates the parent cgroups (unit_realize_cgroup_now). */
3475 slice
= UNIT_GET_SLICE(u
);
3477 unit_add_family_to_cgroup_realize_queue(slice
);
3479 /* And realize this one now (and apply the values) */
3480 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
3483 void unit_release_cgroup(Unit
*u
) {
3486 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
3487 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
3489 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3493 if (crt
->cgroup_path
) {
3494 (void) hashmap_remove(u
->manager
->cgroup_unit
, crt
->cgroup_path
);
3495 crt
->cgroup_path
= mfree(crt
->cgroup_path
);
3498 if (crt
->cgroup_control_inotify_wd
>= 0) {
3499 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, crt
->cgroup_control_inotify_wd
) < 0)
3500 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup control inotify watch %i for %s, ignoring: %m", crt
->cgroup_control_inotify_wd
, u
->id
);
3502 (void) hashmap_remove(u
->manager
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(crt
->cgroup_control_inotify_wd
));
3503 crt
->cgroup_control_inotify_wd
= -1;
3506 if (crt
->cgroup_memory_inotify_wd
>= 0) {
3507 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, crt
->cgroup_memory_inotify_wd
) < 0)
3508 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup memory inotify watch %i for %s, ignoring: %m", crt
->cgroup_memory_inotify_wd
, u
->id
);
3510 (void) hashmap_remove(u
->manager
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(crt
->cgroup_memory_inotify_wd
));
3511 crt
->cgroup_memory_inotify_wd
= -1;
3514 *(CGroupRuntime
**) ((uint8_t*) u
+ UNIT_VTABLE(u
)->cgroup_runtime_offset
) = cgroup_runtime_free(crt
);
3517 int unit_cgroup_is_empty(Unit
*u
) {
3522 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3525 if (!crt
->cgroup_path
)
3528 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, crt
->cgroup_path
);
3530 return log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty, ignoring: %m", empty_to_root(crt
->cgroup_path
));
3535 bool unit_maybe_release_cgroup(Unit
*u
) {
3540 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3541 if (!crt
|| !crt
->cgroup_path
)
3544 /* Don't release the cgroup if there are still processes under it. If we get notified later when all
3545 * the processes exit (e.g. the processes were in D-state and exited after the unit was marked as
3546 * failed) we need the cgroup paths to continue to be tracked by the manager so they can be looked up
3547 * and cleaned up later. */
3548 r
= unit_cgroup_is_empty(u
);
3550 unit_release_cgroup(u
);
3557 void unit_prune_cgroup(Unit
*u
) {
3563 /* Removes the cgroup, if empty and possible, and stops watching it. */
3564 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3565 if (!crt
|| !crt
->cgroup_path
)
3568 /* Cache the last CPU and memory usage values before we destroy the cgroup */
3569 (void) unit_get_cpu_usage(u
, /* ret = */ NULL
);
3571 for (CGroupMemoryAccountingMetric metric
= 0; metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
; metric
++)
3572 (void) unit_get_memory_accounting(u
, metric
, /* ret = */ NULL
);
3575 (void) bpf_restrict_fs_cleanup(u
); /* Remove cgroup from the global LSM BPF map */
3578 unit_modify_nft_set(u
, /* add = */ false);
3580 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
3582 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, crt
->cgroup_path
, !is_root_slice
);
3584 /* One reason we could have failed here is, that the cgroup still contains a process.
3585 * However, if the cgroup becomes removable at a later time, it might be removed when
3586 * the containing slice is stopped. So even if we failed now, this unit shouldn't assume
3587 * that the cgroup is still realized the next time it is started. Do not return early
3588 * on error, continue cleanup. */
3589 log_unit_full_errno(u
, r
== -EBUSY
? LOG_DEBUG
: LOG_WARNING
, r
, "Failed to destroy cgroup %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
3594 if (!unit_maybe_release_cgroup(u
)) /* Returns true if the cgroup was released */
3597 crt
= unit_get_cgroup_runtime(u
); /* The above might have destroyed the runtime object, let's see if it's still there */
3601 crt
->cgroup_realized
= false;
3602 crt
->cgroup_realized_mask
= 0;
3603 crt
->cgroup_enabled_mask
= 0;
3605 crt
->bpf_device_control_installed
= bpf_program_free(crt
->bpf_device_control_installed
);
3608 int unit_search_main_pid(Unit
*u
, PidRef
*ret
) {
3609 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
3610 _cleanup_fclose_
FILE *f
= NULL
;
3616 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3617 if (!crt
|| !crt
->cgroup_path
)
3620 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, crt
->cgroup_path
, &f
);
3625 _cleanup_(pidref_done
) PidRef npidref
= PIDREF_NULL
;
3627 r
= cg_read_pidref(f
, &npidref
);
3633 if (pidref_equal(&pidref
, &npidref
)) /* seen already, cgroupfs reports duplicates! */
3636 if (pidref_is_my_child(&npidref
) <= 0) /* ignore processes further down the tree */
3639 if (pidref_is_set(&pidref
) != 0)
3640 /* Dang, there's more than one daemonized PID in this group, so we don't know what
3641 * process is the main process. */
3644 pidref
= TAKE_PIDREF(npidref
);
3647 if (!pidref_is_set(&pidref
))
3650 *ret
= TAKE_PIDREF(pidref
);
3654 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
3655 _cleanup_closedir_
DIR *d
= NULL
;
3656 _cleanup_fclose_
FILE *f
= NULL
;
3662 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
3667 _cleanup_(pidref_done
) PidRef pid
= PIDREF_NULL
;
3669 r
= cg_read_pidref(f
, &pid
);
3677 RET_GATHER(ret
, unit_watch_pidref(u
, &pid
, /* exclusive= */ false));
3681 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
3686 _cleanup_free_
char *fn
= NULL
, *p
= NULL
;
3688 r
= cg_read_subgroup(d
, &fn
);
3696 p
= path_join(empty_to_root(path
), fn
);
3700 RET_GATHER(ret
, unit_watch_pids_in_path(u
, p
));
3707 int unit_synthesize_cgroup_empty_event(Unit
*u
) {
3712 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
3713 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
3714 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
3716 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3717 if (!crt
|| !crt
->cgroup_path
)
3720 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
3723 if (r
> 0) /* On unified we have reliable notifications, and don't need this */
3726 if (!set_isempty(u
->pids
))
3729 unit_add_to_cgroup_empty_queue(u
);
3733 int unit_watch_all_pids(Unit
*u
) {
3738 /* Adds all PIDs from our cgroup to the set of PIDs we
3739 * watch. This is a fallback logic for cases where we do not
3740 * get reliable cgroup empty notifications: we try to use
3741 * SIGCHLD as replacement. */
3743 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3744 if (!crt
|| !crt
->cgroup_path
)
3747 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
3750 if (r
> 0) /* On unified we can use proper notifications */
3753 return unit_watch_pids_in_path(u
, crt
->cgroup_path
);
3756 static int on_cgroup_empty_event(sd_event_source
*s
, void *userdata
) {
3757 Manager
*m
= ASSERT_PTR(userdata
);
3763 u
= m
->cgroup_empty_queue
;
3767 assert(u
->in_cgroup_empty_queue
);
3768 u
->in_cgroup_empty_queue
= false;
3769 LIST_REMOVE(cgroup_empty_queue
, m
->cgroup_empty_queue
, u
);
3771 if (m
->cgroup_empty_queue
) {
3772 /* More stuff queued, let's make sure we remain enabled */
3773 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
3775 log_debug_errno(r
, "Failed to reenable cgroup empty event source, ignoring: %m");
3778 /* Update state based on OOM kills before we notify about cgroup empty event */
3779 (void) unit_check_oom(u
);
3780 (void) unit_check_oomd_kill(u
);
3782 unit_add_to_gc_queue(u
);
3784 if (IN_SET(unit_active_state(u
), UNIT_INACTIVE
, UNIT_FAILED
))
3785 unit_prune_cgroup(u
);
3786 else if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
3787 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
3792 void unit_add_to_cgroup_empty_queue(Unit
*u
) {
3797 /* Note that there are four different ways how cgroup empty events reach us:
3799 * 1. On the unified hierarchy we get an inotify event on the cgroup
3801 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
3803 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
3805 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
3806 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
3808 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
3809 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
3810 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
3811 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
3812 * case for scope units). */
3814 if (u
->in_cgroup_empty_queue
)
3817 /* Let's verify that the cgroup is really empty */
3818 r
= unit_cgroup_is_empty(u
);
3822 LIST_PREPEND(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
3823 u
->in_cgroup_empty_queue
= true;
3825 /* Trigger the defer event */
3826 r
= sd_event_source_set_enabled(u
->manager
->cgroup_empty_event_source
, SD_EVENT_ONESHOT
);
3828 log_debug_errno(r
, "Failed to enable cgroup empty event source: %m");
3831 static void unit_remove_from_cgroup_empty_queue(Unit
*u
) {
3834 if (!u
->in_cgroup_empty_queue
)
3837 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
3838 u
->in_cgroup_empty_queue
= false;
3841 int unit_check_oomd_kill(Unit
*u
) {
3842 _cleanup_free_
char *value
= NULL
;
3849 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3850 if (!crt
|| !crt
->cgroup_path
)
3853 r
= cg_all_unified();
3855 return log_unit_debug_errno(u
, r
, "Couldn't determine whether we are in all unified mode: %m");
3859 r
= cg_get_xattr_malloc(crt
->cgroup_path
, "user.oomd_ooms", &value
);
3860 if (r
< 0 && !ERRNO_IS_XATTR_ABSENT(r
))
3863 if (!isempty(value
)) {
3864 r
= safe_atou64(value
, &n
);
3869 increased
= n
> crt
->managed_oom_kill_last
;
3870 crt
->managed_oom_kill_last
= n
;
3876 value
= mfree(value
);
3877 r
= cg_get_xattr_malloc(crt
->cgroup_path
, "user.oomd_kill", &value
);
3878 if (r
>= 0 && !isempty(value
))
3879 (void) safe_atou64(value
, &n
);
3882 log_unit_struct(u
, LOG_NOTICE
,
3883 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR
,
3884 LOG_UNIT_INVOCATION_ID(u
),
3885 LOG_UNIT_MESSAGE(u
, "systemd-oomd killed %"PRIu64
" process(es) in this unit.", n
),
3886 "N_PROCESSES=%" PRIu64
, n
);
3888 log_unit_struct(u
, LOG_NOTICE
,
3889 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR
,
3890 LOG_UNIT_INVOCATION_ID(u
),
3891 LOG_UNIT_MESSAGE(u
, "systemd-oomd killed some process(es) in this unit."));
3893 unit_notify_cgroup_oom(u
, /* ManagedOOM= */ true);
3898 int unit_check_oom(Unit
*u
) {
3899 _cleanup_free_
char *oom_kill
= NULL
;
3904 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3905 if (!crt
|| !crt
->cgroup_path
)
3908 r
= cg_get_keyed_attribute(
3912 STRV_MAKE("oom_kill"),
3914 if (IN_SET(r
, -ENOENT
, -ENXIO
)) /* Handle gracefully if cgroup or oom_kill attribute don't exist */
3917 return log_unit_debug_errno(u
, r
, "Failed to read oom_kill field of memory.events cgroup attribute: %m");
3919 r
= safe_atou64(oom_kill
, &c
);
3921 return log_unit_debug_errno(u
, r
, "Failed to parse oom_kill field: %m");
3924 increased
= c
> crt
->oom_kill_last
;
3925 crt
->oom_kill_last
= c
;
3930 log_unit_struct(u
, LOG_NOTICE
,
3931 "MESSAGE_ID=" SD_MESSAGE_UNIT_OUT_OF_MEMORY_STR
,
3932 LOG_UNIT_INVOCATION_ID(u
),
3933 LOG_UNIT_MESSAGE(u
, "A process of this unit has been killed by the OOM killer."));
3935 unit_notify_cgroup_oom(u
, /* ManagedOOM= */ false);
3940 static int on_cgroup_oom_event(sd_event_source
*s
, void *userdata
) {
3941 Manager
*m
= ASSERT_PTR(userdata
);
3947 u
= m
->cgroup_oom_queue
;
3951 assert(u
->in_cgroup_oom_queue
);
3952 u
->in_cgroup_oom_queue
= false;
3953 LIST_REMOVE(cgroup_oom_queue
, m
->cgroup_oom_queue
, u
);
3955 if (m
->cgroup_oom_queue
) {
3956 /* More stuff queued, let's make sure we remain enabled */
3957 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
3959 log_debug_errno(r
, "Failed to reenable cgroup oom event source, ignoring: %m");
3962 (void) unit_check_oom(u
);
3963 unit_add_to_gc_queue(u
);
3968 static void unit_add_to_cgroup_oom_queue(Unit
*u
) {
3973 if (u
->in_cgroup_oom_queue
)
3976 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3977 if (!crt
|| !crt
->cgroup_path
)
3980 LIST_PREPEND(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
3981 u
->in_cgroup_oom_queue
= true;
3983 /* Trigger the defer event */
3984 if (!u
->manager
->cgroup_oom_event_source
) {
3985 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
3987 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_cgroup_oom_event
, u
->manager
);
3989 log_error_errno(r
, "Failed to create cgroup oom event source: %m");
3993 r
= sd_event_source_set_priority(s
, EVENT_PRIORITY_CGROUP_OOM
);
3995 log_error_errno(r
, "Failed to set priority of cgroup oom event source: %m");
3999 (void) sd_event_source_set_description(s
, "cgroup-oom");
4000 u
->manager
->cgroup_oom_event_source
= TAKE_PTR(s
);
4003 r
= sd_event_source_set_enabled(u
->manager
->cgroup_oom_event_source
, SD_EVENT_ONESHOT
);
4005 log_error_errno(r
, "Failed to enable cgroup oom event source: %m");
4008 static int unit_check_cgroup_events(Unit
*u
) {
4009 char *values
[2] = {};
4014 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4015 if (!crt
|| !crt
->cgroup_path
)
4018 r
= cg_get_keyed_attribute_graceful(
4019 SYSTEMD_CGROUP_CONTROLLER
,
4022 STRV_MAKE("populated", "frozen"),
4027 /* The cgroup.events notifications can be merged together so act as we saw the given state for the
4028 * first time. The functions we call to handle given state are idempotent, which makes them
4029 * effectively remember the previous state. */
4031 if (streq(values
[0], "1"))
4032 unit_remove_from_cgroup_empty_queue(u
);
4034 unit_add_to_cgroup_empty_queue(u
);
4037 /* Disregard freezer state changes due to operations not initiated by us.
4038 * See: https://github.com/systemd/systemd/pull/13512/files#r416469963 and
4039 * https://github.com/systemd/systemd/pull/13512#issuecomment-573007207 */
4040 if (values
[1] && IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_THAWING
)) {
4041 if (streq(values
[1], "0"))
4053 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
4054 Manager
*m
= ASSERT_PTR(userdata
);
4060 union inotify_event_buffer buffer
;
4063 l
= read(fd
, &buffer
, sizeof(buffer
));
4065 if (ERRNO_IS_TRANSIENT(errno
))
4068 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
4071 FOREACH_INOTIFY_EVENT_WARN(e
, buffer
, l
) {
4075 /* Queue overflow has no watch descriptor */
4078 if (e
->mask
& IN_IGNORED
)
4079 /* The watch was just removed */
4082 /* Note that inotify might deliver events for a watch even after it was removed,
4083 * because it was queued before the removal. Let's ignore this here safely. */
4085 u
= hashmap_get(m
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
4087 unit_check_cgroup_events(u
);
4089 u
= hashmap_get(m
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
4091 unit_add_to_cgroup_oom_queue(u
);
4096 static int cg_bpf_mask_supported(CGroupMask
*ret
) {
4097 CGroupMask mask
= 0;
4100 /* BPF-based firewall */
4101 r
= bpf_firewall_supported();
4105 mask
|= CGROUP_MASK_BPF_FIREWALL
;
4107 /* BPF-based device access control */
4108 r
= bpf_devices_supported();
4112 mask
|= CGROUP_MASK_BPF_DEVICES
;
4114 /* BPF pinned prog */
4115 r
= bpf_foreign_supported();
4119 mask
|= CGROUP_MASK_BPF_FOREIGN
;
4121 /* BPF-based bind{4|6} hooks */
4122 r
= bpf_socket_bind_supported();
4126 mask
|= CGROUP_MASK_BPF_SOCKET_BIND
;
4128 /* BPF-based cgroup_skb/{egress|ingress} hooks */
4129 r
= bpf_restrict_ifaces_supported();
4133 mask
|= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES
;
4139 int manager_setup_cgroup(Manager
*m
) {
4140 _cleanup_free_
char *path
= NULL
;
4141 const char *scope_path
;
4148 /* 1. Determine hierarchy */
4149 m
->cgroup_root
= mfree(m
->cgroup_root
);
4150 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
4152 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
4154 /* Chop off the init scope, if we are already located in it */
4155 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
4157 /* LEGACY: Also chop off the system slice if we are in
4158 * it. This is to support live upgrades from older systemd
4159 * versions where PID 1 was moved there. Also see
4160 * cg_get_root_path(). */
4161 if (!e
&& MANAGER_IS_SYSTEM(m
)) {
4162 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
4164 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
4169 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
4170 * easily prepend it everywhere. */
4171 delete_trailing_chars(m
->cgroup_root
, "/");
4174 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
4176 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
4180 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
4182 all_unified
= cg_all_unified();
4183 if (all_unified
< 0)
4184 return log_error_errno(all_unified
, "Couldn't determine whether we are in all unified mode: %m");
4185 if (all_unified
> 0)
4186 log_debug("Unified cgroup hierarchy is located at %s.", path
);
4188 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
4190 return log_error_errno(r
, "Failed to determine whether systemd's own controller is in unified mode: %m");
4192 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path
);
4194 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY
". File system hierarchy is at %s.", path
);
4197 /* 3. Allocate cgroup empty defer event source */
4198 m
->cgroup_empty_event_source
= sd_event_source_disable_unref(m
->cgroup_empty_event_source
);
4199 r
= sd_event_add_defer(m
->event
, &m
->cgroup_empty_event_source
, on_cgroup_empty_event
, m
);
4201 return log_error_errno(r
, "Failed to create cgroup empty event source: %m");
4203 /* Schedule cgroup empty checks early, but after having processed service notification messages or
4204 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
4205 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
4206 r
= sd_event_source_set_priority(m
->cgroup_empty_event_source
, EVENT_PRIORITY_CGROUP_EMPTY
);
4208 return log_error_errno(r
, "Failed to set priority of cgroup empty event source: %m");
4210 r
= sd_event_source_set_enabled(m
->cgroup_empty_event_source
, SD_EVENT_OFF
);
4212 return log_error_errno(r
, "Failed to disable cgroup empty event source: %m");
4214 (void) sd_event_source_set_description(m
->cgroup_empty_event_source
, "cgroup-empty");
4216 /* 4. Install notifier inotify object, or agent */
4217 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
4219 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
4221 m
->cgroup_inotify_event_source
= sd_event_source_disable_unref(m
->cgroup_inotify_event_source
);
4222 safe_close(m
->cgroup_inotify_fd
);
4224 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
4225 if (m
->cgroup_inotify_fd
< 0)
4226 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
4228 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
4230 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
4232 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
4233 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
4234 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
4235 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, EVENT_PRIORITY_CGROUP_INOTIFY
);
4237 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
4239 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
4241 } else if (MANAGER_IS_SYSTEM(m
) && manager_owns_host_root_cgroup(m
) && !MANAGER_IS_TEST_RUN(m
)) {
4243 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
4244 * since it does not generate events when control groups with children run empty. */
4246 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUPS_AGENT_PATH
);
4248 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
4250 log_debug("Installed release agent.");
4252 log_debug("Release agent already installed.");
4255 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
4256 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
4257 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
4259 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
4260 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
4262 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
4264 /* 6. And pin it, so that it cannot be unmounted */
4265 safe_close(m
->pin_cgroupfs_fd
);
4266 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
4267 if (m
->pin_cgroupfs_fd
< 0)
4268 return log_error_errno(errno
, "Failed to open pin file: %m");
4270 } else if (!MANAGER_IS_TEST_RUN(m
))
4271 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
4273 /* 7. Always enable hierarchical support if it exists... */
4274 if (!all_unified
&& !MANAGER_IS_TEST_RUN(m
))
4275 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
4277 /* 8. Figure out which controllers are supported */
4278 r
= cg_mask_supported_subtree(m
->cgroup_root
, &m
->cgroup_supported
);
4280 return log_error_errno(r
, "Failed to determine supported controllers: %m");
4282 /* 9. Figure out which bpf-based pseudo-controllers are supported */
4283 r
= cg_bpf_mask_supported(&mask
);
4285 return log_error_errno(r
, "Failed to determine supported bpf-based pseudo-controllers: %m");
4286 m
->cgroup_supported
|= mask
;
4288 /* 10. Log which controllers are supported */
4289 for (CGroupController c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
4290 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
),
4291 yes_no(m
->cgroup_supported
& CGROUP_CONTROLLER_TO_MASK(c
)));
4296 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
4299 /* We can't really delete the group, since we are in it. But
4301 if (delete && m
->cgroup_root
&& !FLAGS_SET(m
->test_run_flags
, MANAGER_TEST_RUN_MINIMAL
))
4302 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
4304 m
->cgroup_empty_event_source
= sd_event_source_disable_unref(m
->cgroup_empty_event_source
);
4306 m
->cgroup_control_inotify_wd_unit
= hashmap_free(m
->cgroup_control_inotify_wd_unit
);
4307 m
->cgroup_memory_inotify_wd_unit
= hashmap_free(m
->cgroup_memory_inotify_wd_unit
);
4309 m
->cgroup_inotify_event_source
= sd_event_source_disable_unref(m
->cgroup_inotify_event_source
);
4310 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
4312 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
4314 m
->cgroup_root
= mfree(m
->cgroup_root
);
4317 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
4324 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
4328 p
= strdupa_safe(cgroup
);
4332 e
= strrchr(p
, '/');
4334 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
4338 u
= hashmap_get(m
->cgroup_unit
, p
);
4344 Unit
*manager_get_unit_by_pidref_cgroup(Manager
*m
, const PidRef
*pid
) {
4345 _cleanup_free_
char *cgroup
= NULL
;
4349 if (cg_pidref_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
) < 0)
4352 return manager_get_unit_by_cgroup(m
, cgroup
);
4355 Unit
*manager_get_unit_by_pidref_watching(Manager
*m
, const PidRef
*pid
) {
4360 if (!pidref_is_set(pid
))
4363 u
= hashmap_get(m
->watch_pids
, pid
);
4367 array
= hashmap_get(m
->watch_pids_more
, pid
);
4374 Unit
*manager_get_unit_by_pidref(Manager
*m
, const PidRef
*pid
) {
4379 /* Note that a process might be owned by multiple units, we return only one here, which is good
4380 * enough for most cases, though not strictly correct. We prefer the one reported by cgroup
4381 * membership, as that's the most relevant one as children of the process will be assigned to that
4382 * one, too, before all else. */
4384 if (!pidref_is_set(pid
))
4387 if (pidref_is_self(pid
))
4388 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
4392 u
= manager_get_unit_by_pidref_cgroup(m
, pid
);
4396 u
= manager_get_unit_by_pidref_watching(m
, pid
);
4403 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
4406 if (!pid_is_valid(pid
))
4409 return manager_get_unit_by_pidref(m
, &PIDREF_MAKE_FROM_PID(pid
));
4412 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
4418 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
4419 * or from the --system instance */
4421 log_debug("Got cgroup empty notification for: %s", cgroup
);
4423 u
= manager_get_unit_by_cgroup(m
, cgroup
);
4427 unit_add_to_cgroup_empty_queue(u
);
4431 int unit_get_memory_available(Unit
*u
, uint64_t *ret
) {
4432 uint64_t available
= UINT64_MAX
, current
= 0;
4437 /* If data from cgroups can be accessed, try to find out how much more memory a unit can
4438 * claim before hitting the configured cgroup limits (if any). Consider both MemoryHigh
4439 * and MemoryMax, and also any slice the unit might be nested below. */
4442 uint64_t unit_available
, unit_limit
= UINT64_MAX
;
4443 CGroupContext
*unit_context
;
4445 /* No point in continuing if we can't go any lower */
4449 unit_context
= unit_get_cgroup_context(u
);
4453 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4454 if (!crt
|| !crt
->cgroup_path
)
4457 (void) unit_get_memory_current(u
, ¤t
);
4458 /* in case of error, previous current propagates as lower bound */
4460 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
4461 unit_limit
= physical_memory();
4462 else if (unit_context
->memory_max
== UINT64_MAX
&& unit_context
->memory_high
== UINT64_MAX
)
4464 unit_limit
= MIN3(unit_limit
, unit_context
->memory_max
, unit_context
->memory_high
);
4466 unit_available
= LESS_BY(unit_limit
, current
);
4467 available
= MIN(unit_available
, available
);
4468 } while ((u
= UNIT_GET_SLICE(u
)));
4475 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
4478 // FIXME: Merge this into unit_get_memory_accounting after support for cgroup v1 is dropped
4483 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
4486 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4487 if (!crt
|| !crt
->cgroup_path
)
4490 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
4491 if (unit_has_host_root_cgroup(u
))
4492 return procfs_memory_get_used(ret
);
4494 if ((crt
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
4497 r
= cg_all_unified();
4501 return cg_get_attribute_as_uint64("memory", crt
->cgroup_path
, r
> 0 ? "memory.current" : "memory.usage_in_bytes", ret
);
4504 int unit_get_memory_accounting(Unit
*u
, CGroupMemoryAccountingMetric metric
, uint64_t *ret
) {
4506 static const char* const attributes_table
[_CGROUP_MEMORY_ACCOUNTING_METRIC_MAX
] = {
4507 [CGROUP_MEMORY_PEAK
] = "memory.peak",
4508 [CGROUP_MEMORY_SWAP_CURRENT
] = "memory.swap.current",
4509 [CGROUP_MEMORY_SWAP_PEAK
] = "memory.swap.peak",
4510 [CGROUP_MEMORY_ZSWAP_CURRENT
] = "memory.zswap.current",
4514 bool updated
= false;
4518 assert(metric
>= 0);
4519 assert(metric
< _CGROUP_MEMORY_ACCOUNTING_METRIC_MAX
);
4521 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
4524 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4527 if (!crt
->cgroup_path
)
4528 /* If the cgroup is already gone, we try to find the last cached value. */
4531 /* The root cgroup doesn't expose this information. */
4532 if (unit_has_host_root_cgroup(u
))
4535 if (!FLAGS_SET(crt
->cgroup_realized_mask
, CGROUP_MASK_MEMORY
))
4538 r
= cg_all_unified();
4544 r
= cg_get_attribute_as_uint64("memory", crt
->cgroup_path
, attributes_table
[metric
], &bytes
);
4545 if (r
< 0 && r
!= -ENODATA
)
4550 if (metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
) {
4551 uint64_t *last
= &crt
->memory_accounting_last
[metric
];
4555 else if (*last
!= UINT64_MAX
)
4560 } else if (!updated
)
4569 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
4573 if (!UNIT_CGROUP_BOOL(u
, tasks_accounting
))
4576 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4577 if (!crt
|| !crt
->cgroup_path
)
4580 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
4581 if (unit_has_host_root_cgroup(u
))
4582 return procfs_tasks_get_current(ret
);
4584 if ((crt
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
4587 return cg_get_attribute_as_uint64("pids", crt
->cgroup_path
, "pids.current", ret
);
4590 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
4597 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4598 if (!crt
|| !crt
->cgroup_path
)
4601 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
4602 if (unit_has_host_root_cgroup(u
))
4603 return procfs_cpu_get_usage(ret
);
4605 /* Requisite controllers for CPU accounting are not enabled */
4606 if ((get_cpu_accounting_mask() & ~crt
->cgroup_realized_mask
) != 0)
4609 r
= cg_all_unified();
4613 _cleanup_free_
char *val
= NULL
;
4616 r
= cg_get_keyed_attribute("cpu", crt
->cgroup_path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
4617 if (IN_SET(r
, -ENOENT
, -ENXIO
))
4622 r
= safe_atou64(val
, &us
);
4626 ns
= us
* NSEC_PER_USEC
;
4628 return cg_get_attribute_as_uint64("cpuacct", crt
->cgroup_path
, "cpuacct.usage", ret
);
4634 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
4640 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
4641 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
4642 * call this function with a NULL return value. */
4644 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4645 if (!crt
|| !crt
->cgroup_path
)
4648 if (!UNIT_CGROUP_BOOL(u
, cpu_accounting
))
4651 r
= unit_get_cpu_usage_raw(u
, &ns
);
4652 if (r
== -ENODATA
&& crt
->cpu_usage_last
!= NSEC_INFINITY
) {
4653 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
4657 *ret
= crt
->cpu_usage_last
;
4663 if (ns
> crt
->cpu_usage_base
)
4664 ns
-= crt
->cpu_usage_base
;
4668 crt
->cpu_usage_last
= ns
;
4675 int unit_get_ip_accounting(
4677 CGroupIPAccountingMetric metric
,
4684 assert(metric
>= 0);
4685 assert(metric
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
);
4688 if (!UNIT_CGROUP_BOOL(u
, ip_accounting
))
4691 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4692 if (!crt
|| !crt
->cgroup_path
)
4695 fd
= IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_INGRESS_PACKETS
) ?
4696 crt
->ip_accounting_ingress_map_fd
:
4697 crt
->ip_accounting_egress_map_fd
;
4701 if (IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
4702 r
= bpf_firewall_read_accounting(fd
, &value
, NULL
);
4704 r
= bpf_firewall_read_accounting(fd
, NULL
, &value
);
4708 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
4709 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
4710 * ip_accounting_extra[] field, and add them in here transparently. */
4712 *ret
= value
+ crt
->ip_accounting_extra
[metric
];
4717 static uint64_t unit_get_effective_limit_one(Unit
*u
, CGroupLimitType type
) {
4721 assert(UNIT_HAS_CGROUP_CONTEXT(u
));
4723 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
4725 case CGROUP_LIMIT_MEMORY_MAX
:
4726 case CGROUP_LIMIT_MEMORY_HIGH
:
4727 return physical_memory();
4728 case CGROUP_LIMIT_TASKS_MAX
:
4729 return system_tasks_max();
4731 assert_not_reached();
4734 cc
= ASSERT_PTR(unit_get_cgroup_context(u
));
4736 /* Note: on legacy/hybrid hierarchies memory_max stays CGROUP_LIMIT_MAX unless configured
4737 * explicitly. Effective value of MemoryLimit= (cgroup v1) is not implemented. */
4738 case CGROUP_LIMIT_MEMORY_MAX
:
4739 return cc
->memory_max
;
4740 case CGROUP_LIMIT_MEMORY_HIGH
:
4741 return cc
->memory_high
;
4742 case CGROUP_LIMIT_TASKS_MAX
:
4743 return cgroup_tasks_max_resolve(&cc
->tasks_max
);
4745 assert_not_reached();
4749 int unit_get_effective_limit(Unit
*u
, CGroupLimitType type
, uint64_t *ret
) {
4755 assert(type
< _CGROUP_LIMIT_TYPE_MAX
);
4757 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
4760 infimum
= unit_get_effective_limit_one(u
, type
);
4761 for (Unit
*slice
= UNIT_GET_SLICE(u
); slice
; slice
= UNIT_GET_SLICE(slice
))
4762 infimum
= MIN(infimum
, unit_get_effective_limit_one(slice
, type
));
4768 static int unit_get_io_accounting_raw(Unit
*u
, uint64_t ret
[static _CGROUP_IO_ACCOUNTING_METRIC_MAX
]) {
4769 static const char *const field_names
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
4770 [CGROUP_IO_READ_BYTES
] = "rbytes=",
4771 [CGROUP_IO_WRITE_BYTES
] = "wbytes=",
4772 [CGROUP_IO_READ_OPERATIONS
] = "rios=",
4773 [CGROUP_IO_WRITE_OPERATIONS
] = "wios=",
4775 uint64_t acc
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {};
4776 _cleanup_free_
char *path
= NULL
;
4777 _cleanup_fclose_
FILE *f
= NULL
;
4782 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4783 if (!crt
|| !crt
->cgroup_path
)
4786 if (unit_has_host_root_cgroup(u
))
4787 return -ENODATA
; /* TODO: return useful data for the top-level cgroup */
4789 r
= cg_all_unified();
4795 if (!FLAGS_SET(crt
->cgroup_realized_mask
, CGROUP_MASK_IO
))
4798 r
= cg_get_path("io", crt
->cgroup_path
, "io.stat", &path
);
4802 f
= fopen(path
, "re");
4807 _cleanup_free_
char *line
= NULL
;
4810 r
= read_line(f
, LONG_LINE_MAX
, &line
);
4817 p
+= strcspn(p
, WHITESPACE
); /* Skip over device major/minor */
4818 p
+= strspn(p
, WHITESPACE
); /* Skip over following whitespace */
4821 _cleanup_free_
char *word
= NULL
;
4823 r
= extract_first_word(&p
, &word
, NULL
, EXTRACT_RETAIN_ESCAPE
);
4829 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++) {
4832 x
= startswith(word
, field_names
[i
]);
4836 r
= safe_atou64(x
, &w
);
4840 /* Sum up the stats of all devices */
4848 memcpy(ret
, acc
, sizeof(acc
));
4852 int unit_get_io_accounting(
4854 CGroupIOAccountingMetric metric
,
4858 uint64_t raw
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
];
4861 /* Retrieve an IO account parameter. This will subtract the counter when the unit was started. */
4863 if (!UNIT_CGROUP_BOOL(u
, io_accounting
))
4866 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4867 if (!crt
|| !crt
->cgroup_path
)
4870 if (allow_cache
&& crt
->io_accounting_last
[metric
] != UINT64_MAX
)
4873 r
= unit_get_io_accounting_raw(u
, raw
);
4874 if (r
== -ENODATA
&& crt
->io_accounting_last
[metric
] != UINT64_MAX
)
4879 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++) {
4880 /* Saturated subtraction */
4881 if (raw
[i
] > crt
->io_accounting_base
[i
])
4882 crt
->io_accounting_last
[i
] = raw
[i
] - crt
->io_accounting_base
[i
];
4884 crt
->io_accounting_last
[i
] = 0;
4889 *ret
= crt
->io_accounting_last
[metric
];
4894 int unit_reset_cpu_accounting(Unit
*u
) {
4899 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4900 if (!crt
|| !crt
->cgroup_path
)
4903 crt
->cpu_usage_last
= NSEC_INFINITY
;
4905 r
= unit_get_cpu_usage_raw(u
, &crt
->cpu_usage_base
);
4907 crt
->cpu_usage_base
= 0;
4914 void unit_reset_memory_accounting_last(Unit
*u
) {
4917 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4918 if (!crt
|| !crt
->cgroup_path
)
4921 FOREACH_ELEMENT(i
, crt
->memory_accounting_last
)
4925 int unit_reset_ip_accounting(Unit
*u
) {
4930 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4931 if (!crt
|| !crt
->cgroup_path
)
4934 if (crt
->ip_accounting_ingress_map_fd
>= 0)
4935 RET_GATHER(r
, bpf_firewall_reset_accounting(crt
->ip_accounting_ingress_map_fd
));
4937 if (crt
->ip_accounting_egress_map_fd
>= 0)
4938 RET_GATHER(r
, bpf_firewall_reset_accounting(crt
->ip_accounting_egress_map_fd
));
4940 zero(crt
->ip_accounting_extra
);
4945 void unit_reset_io_accounting_last(Unit
*u
) {
4948 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4949 if (!crt
|| !crt
->cgroup_path
)
4952 FOREACH_ARRAY(i
, crt
->io_accounting_last
, _CGROUP_IO_ACCOUNTING_METRIC_MAX
)
4956 int unit_reset_io_accounting(Unit
*u
) {
4961 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4962 if (!crt
|| !crt
->cgroup_path
)
4965 unit_reset_io_accounting_last(u
);
4967 r
= unit_get_io_accounting_raw(u
, crt
->io_accounting_base
);
4969 zero(crt
->io_accounting_base
);
4976 int unit_reset_accounting(Unit
*u
) {
4981 RET_GATHER(r
, unit_reset_cpu_accounting(u
));
4982 RET_GATHER(r
, unit_reset_io_accounting(u
));
4983 RET_GATHER(r
, unit_reset_ip_accounting(u
));
4984 unit_reset_memory_accounting_last(u
);
4989 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
4992 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
4995 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5002 /* always invalidate compat pairs together */
5003 if (m
& (CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
))
5004 m
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
5006 if (m
& (CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
))
5007 m
|= CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
;
5009 if (FLAGS_SET(crt
->cgroup_invalidated_mask
, m
)) /* NOP? */
5012 crt
->cgroup_invalidated_mask
|= m
;
5013 unit_add_to_cgroup_realize_queue(u
);
5016 void unit_invalidate_cgroup_bpf(Unit
*u
) {
5019 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
5022 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5026 if (crt
->cgroup_invalidated_mask
& CGROUP_MASK_BPF_FIREWALL
) /* NOP? */
5029 crt
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
5030 unit_add_to_cgroup_realize_queue(u
);
5032 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
5033 * list of our children includes our own. */
5034 if (u
->type
== UNIT_SLICE
) {
5037 UNIT_FOREACH_DEPENDENCY(member
, u
, UNIT_ATOM_SLICE_OF
)
5038 unit_invalidate_cgroup_bpf(member
);
5042 void unit_cgroup_catchup(Unit
*u
) {
5045 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
5048 /* We dropped the inotify watch during reexec/reload, so we need to
5049 * check these as they may have changed.
5050 * Note that (currently) the kernel doesn't actually update cgroup
5051 * file modification times, so we can't just serialize and then check
5052 * the mtime for file(s) we are interested in. */
5053 (void) unit_check_cgroup_events(u
);
5054 unit_add_to_cgroup_oom_queue(u
);
5057 bool unit_cgroup_delegate(Unit
*u
) {
5062 if (!UNIT_VTABLE(u
)->can_delegate
)
5065 c
= unit_get_cgroup_context(u
);
5072 void manager_invalidate_startup_units(Manager
*m
) {
5077 SET_FOREACH(u
, m
->startup_units
)
5078 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_IO
|CGROUP_MASK_BLKIO
|CGROUP_MASK_CPUSET
);
5081 static int unit_cgroup_freezer_kernel_state(Unit
*u
, FreezerState
*ret
) {
5082 _cleanup_free_
char *val
= NULL
;
5089 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5090 if (!crt
|| !crt
->cgroup_path
)
5093 r
= cg_get_keyed_attribute(
5094 SYSTEMD_CGROUP_CONTROLLER
,
5097 STRV_MAKE("frozen"),
5099 if (IN_SET(r
, -ENOENT
, -ENXIO
))
5104 if (streq(val
, "0"))
5105 s
= FREEZER_RUNNING
;
5106 else if (streq(val
, "1"))
5109 log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "Unexpected cgroup frozen state: %s", val
);
5110 s
= _FREEZER_STATE_INVALID
;
5117 int unit_cgroup_freezer_action(Unit
*u
, FreezerAction action
) {
5118 _cleanup_free_
char *path
= NULL
;
5119 FreezerState target
, current
, next
;
5123 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_PARENT_FREEZE
,
5124 FREEZER_THAW
, FREEZER_PARENT_THAW
));
5126 if (!cg_freezer_supported())
5129 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5130 if (!crt
|| !crt
->cgroup_realized
)
5131 return 0; /* No cgroup = nothing running to freeze */
5133 unit_next_freezer_state(u
, action
, &next
, &target
);
5135 r
= unit_cgroup_freezer_kernel_state(u
, ¤t
);
5139 if (current
== target
)
5140 next
= freezer_state_finish(next
);
5141 else if (IN_SET(next
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
, FREEZER_RUNNING
)) {
5142 /* We're transitioning into a finished state, which implies that the cgroup's
5143 * current state already matches the target and thus we'd return 0. But, reality
5144 * shows otherwise. This indicates that our freezer_state tracking has diverged
5145 * from the real state of the cgroup, which can happen if someone meddles with the
5146 * cgroup from underneath us. This really shouldn't happen during normal operation,
5147 * though. So, let's warn about it and fix up the state to be valid */
5149 log_unit_warning(u
, "Unit wants to transition to %s freezer state but cgroup is unexpectedly %s, fixing up.",
5150 freezer_state_to_string(next
), freezer_state_to_string(current
) ?: "(invalid)");
5152 if (next
== FREEZER_FROZEN
)
5153 next
= FREEZER_FREEZING
;
5154 else if (next
== FREEZER_FROZEN_BY_PARENT
)
5155 next
= FREEZER_FREEZING_BY_PARENT
;
5156 else if (next
== FREEZER_RUNNING
)
5157 next
= FREEZER_THAWING
;
5160 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, crt
->cgroup_path
, "cgroup.freeze", &path
);
5164 log_unit_debug(u
, "Unit freezer state was %s, now %s.",
5165 freezer_state_to_string(u
->freezer_state
),
5166 freezer_state_to_string(next
));
5168 r
= write_string_file(path
, one_zero(target
== FREEZER_FROZEN
), WRITE_STRING_FILE_DISABLE_BUFFER
);
5172 u
->freezer_state
= next
;
5173 return target
!= current
;
5176 int unit_get_cpuset(Unit
*u
, CPUSet
*cpus
, const char *name
) {
5177 _cleanup_free_
char *v
= NULL
;
5183 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5184 if (!crt
|| !crt
->cgroup_path
)
5187 if ((crt
->cgroup_realized_mask
& CGROUP_MASK_CPUSET
) == 0)
5190 r
= cg_all_unified();
5196 r
= cg_get_attribute("cpuset", crt
->cgroup_path
, name
, &v
);
5202 return parse_cpu_set_full(v
, cpus
, false, NULL
, NULL
, 0, NULL
);
5205 CGroupRuntime
*cgroup_runtime_new(void) {
5206 _cleanup_(cgroup_runtime_freep
) CGroupRuntime
*crt
= NULL
;
5208 crt
= new(CGroupRuntime
, 1);
5212 *crt
= (CGroupRuntime
) {
5213 .cpu_usage_last
= NSEC_INFINITY
,
5215 .cgroup_control_inotify_wd
= -1,
5216 .cgroup_memory_inotify_wd
= -1,
5218 .ip_accounting_ingress_map_fd
= -EBADF
,
5219 .ip_accounting_egress_map_fd
= -EBADF
,
5221 .ipv4_allow_map_fd
= -EBADF
,
5222 .ipv6_allow_map_fd
= -EBADF
,
5223 .ipv4_deny_map_fd
= -EBADF
,
5224 .ipv6_deny_map_fd
= -EBADF
,
5226 .cgroup_invalidated_mask
= _CGROUP_MASK_ALL
,
5229 FOREACH_ELEMENT(i
, crt
->memory_accounting_last
)
5231 FOREACH_ELEMENT(i
, crt
->io_accounting_base
)
5233 FOREACH_ELEMENT(i
, crt
->io_accounting_last
)
5235 FOREACH_ELEMENT(i
, crt
->ip_accounting_extra
)
5238 return TAKE_PTR(crt
);
5241 CGroupRuntime
*cgroup_runtime_free(CGroupRuntime
*crt
) {
5245 fdset_free(crt
->initial_socket_bind_link_fds
);
5247 bpf_link_free(crt
->ipv4_socket_bind_link
);
5248 bpf_link_free(crt
->ipv6_socket_bind_link
);
5250 hashmap_free(crt
->bpf_foreign_by_key
);
5252 bpf_program_free(crt
->bpf_device_control_installed
);
5255 bpf_link_free(crt
->restrict_ifaces_ingress_bpf_link
);
5256 bpf_link_free(crt
->restrict_ifaces_egress_bpf_link
);
5258 fdset_free(crt
->initial_restrict_ifaces_link_fds
);
5260 safe_close(crt
->ipv4_allow_map_fd
);
5261 safe_close(crt
->ipv6_allow_map_fd
);
5262 safe_close(crt
->ipv4_deny_map_fd
);
5263 safe_close(crt
->ipv6_deny_map_fd
);
5265 bpf_program_free(crt
->ip_bpf_ingress
);
5266 bpf_program_free(crt
->ip_bpf_ingress_installed
);
5267 bpf_program_free(crt
->ip_bpf_egress
);
5268 bpf_program_free(crt
->ip_bpf_egress_installed
);
5270 set_free(crt
->ip_bpf_custom_ingress
);
5271 set_free(crt
->ip_bpf_custom_ingress_installed
);
5272 set_free(crt
->ip_bpf_custom_egress
);
5273 set_free(crt
->ip_bpf_custom_egress_installed
);
5275 free(crt
->cgroup_path
);
5280 static const char* const ip_accounting_metric_field_table
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
5281 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
5282 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
5283 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
5284 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
5287 DEFINE_PRIVATE_STRING_TABLE_LOOKUP(ip_accounting_metric_field
, CGroupIPAccountingMetric
);
5289 static const char* const io_accounting_metric_field_base_table
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
5290 [CGROUP_IO_READ_BYTES
] = "io-accounting-read-bytes-base",
5291 [CGROUP_IO_WRITE_BYTES
] = "io-accounting-write-bytes-base",
5292 [CGROUP_IO_READ_OPERATIONS
] = "io-accounting-read-operations-base",
5293 [CGROUP_IO_WRITE_OPERATIONS
] = "io-accounting-write-operations-base",
5296 DEFINE_PRIVATE_STRING_TABLE_LOOKUP(io_accounting_metric_field_base
, CGroupIOAccountingMetric
);
5298 static const char* const io_accounting_metric_field_last_table
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
5299 [CGROUP_IO_READ_BYTES
] = "io-accounting-read-bytes-last",
5300 [CGROUP_IO_WRITE_BYTES
] = "io-accounting-write-bytes-last",
5301 [CGROUP_IO_READ_OPERATIONS
] = "io-accounting-read-operations-last",
5302 [CGROUP_IO_WRITE_OPERATIONS
] = "io-accounting-write-operations-last",
5305 DEFINE_PRIVATE_STRING_TABLE_LOOKUP(io_accounting_metric_field_last
, CGroupIOAccountingMetric
);
5307 static const char* const memory_accounting_metric_field_last_table
[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1] = {
5308 [CGROUP_MEMORY_PEAK
] = "memory-accounting-peak",
5309 [CGROUP_MEMORY_SWAP_PEAK
] = "memory-accounting-swap-peak",
5312 DEFINE_PRIVATE_STRING_TABLE_LOOKUP(memory_accounting_metric_field_last
, CGroupMemoryAccountingMetric
);
5314 static int serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
5315 _cleanup_free_
char *s
= NULL
;
5324 r
= cg_mask_to_string(mask
, &s
);
5326 return log_error_errno(r
, "Failed to format cgroup mask: %m");
5328 return serialize_item(f
, key
, s
);
5331 int cgroup_runtime_serialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
5338 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5342 (void) serialize_item_format(f
, "cpu-usage-base", "%" PRIu64
, crt
->cpu_usage_base
);
5343 if (crt
->cpu_usage_last
!= NSEC_INFINITY
)
5344 (void) serialize_item_format(f
, "cpu-usage-last", "%" PRIu64
, crt
->cpu_usage_last
);
5346 if (crt
->managed_oom_kill_last
> 0)
5347 (void) serialize_item_format(f
, "managed-oom-kill-last", "%" PRIu64
, crt
->managed_oom_kill_last
);
5349 if (crt
->oom_kill_last
> 0)
5350 (void) serialize_item_format(f
, "oom-kill-last", "%" PRIu64
, crt
->oom_kill_last
);
5352 for (CGroupMemoryAccountingMetric metric
= 0; metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
; metric
++) {
5355 r
= unit_get_memory_accounting(u
, metric
, &v
);
5357 (void) serialize_item_format(f
, memory_accounting_metric_field_last_to_string(metric
), "%" PRIu64
, v
);
5360 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
5363 r
= unit_get_ip_accounting(u
, m
, &v
);
5365 (void) serialize_item_format(f
, ip_accounting_metric_field_to_string(m
), "%" PRIu64
, v
);
5368 for (CGroupIOAccountingMetric im
= 0; im
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; im
++) {
5369 (void) serialize_item_format(f
, io_accounting_metric_field_base_to_string(im
), "%" PRIu64
, crt
->io_accounting_base
[im
]);
5371 if (crt
->io_accounting_last
[im
] != UINT64_MAX
)
5372 (void) serialize_item_format(f
, io_accounting_metric_field_last_to_string(im
), "%" PRIu64
, crt
->io_accounting_last
[im
]);
5375 if (crt
->cgroup_path
)
5376 (void) serialize_item(f
, "cgroup", crt
->cgroup_path
);
5377 if (crt
->cgroup_id
!= 0)
5378 (void) serialize_item_format(f
, "cgroup-id", "%" PRIu64
, crt
->cgroup_id
);
5380 (void) serialize_bool(f
, "cgroup-realized", crt
->cgroup_realized
);
5381 (void) serialize_cgroup_mask(f
, "cgroup-realized-mask", crt
->cgroup_realized_mask
);
5382 (void) serialize_cgroup_mask(f
, "cgroup-enabled-mask", crt
->cgroup_enabled_mask
);
5383 (void) serialize_cgroup_mask(f
, "cgroup-invalidated-mask", crt
->cgroup_invalidated_mask
);
5385 (void) bpf_socket_bind_serialize(u
, f
, fds
);
5387 (void) bpf_program_serialize_attachment(f
, fds
, "ip-bpf-ingress-installed", crt
->ip_bpf_ingress_installed
);
5388 (void) bpf_program_serialize_attachment(f
, fds
, "ip-bpf-egress-installed", crt
->ip_bpf_egress_installed
);
5389 (void) bpf_program_serialize_attachment(f
, fds
, "bpf-device-control-installed", crt
->bpf_device_control_installed
);
5390 (void) bpf_program_serialize_attachment_set(f
, fds
, "ip-bpf-custom-ingress-installed", crt
->ip_bpf_custom_ingress_installed
);
5391 (void) bpf_program_serialize_attachment_set(f
, fds
, "ip-bpf-custom-egress-installed", crt
->ip_bpf_custom_egress_installed
);
5393 (void) bpf_restrict_ifaces_serialize(u
, f
, fds
);
5398 #define MATCH_DESERIALIZE(u, key, l, v, parse_func, target) \
5400 bool _deserialize_matched = streq(l, key); \
5401 if (_deserialize_matched) { \
5402 CGroupRuntime *crt = unit_setup_cgroup_runtime(u); \
5406 int _deserialize_r = parse_func(v); \
5407 if (_deserialize_r < 0) \
5408 log_unit_debug_errno(u, _deserialize_r, \
5409 "Failed to parse \"%s=%s\", ignoring.", l, v); \
5411 crt->target = _deserialize_r; \
5414 _deserialize_matched; \
5417 #define MATCH_DESERIALIZE_IMMEDIATE(u, key, l, v, parse_func, target) \
5419 bool _deserialize_matched = streq(l, key); \
5420 if (_deserialize_matched) { \
5421 CGroupRuntime *crt = unit_setup_cgroup_runtime(u); \
5425 int _deserialize_r = parse_func(v, &crt->target); \
5426 if (_deserialize_r < 0) \
5427 log_unit_debug_errno(u, _deserialize_r, \
5428 "Failed to parse \"%s=%s\", ignoring", l, v); \
5431 _deserialize_matched; \
5434 #define MATCH_DESERIALIZE_METRIC(u, key, l, v, parse_func, target) \
5436 bool _deserialize_matched = streq(l, key); \
5437 if (_deserialize_matched) { \
5438 CGroupRuntime *crt = unit_setup_cgroup_runtime(u); \
5442 int _deserialize_r = parse_func(v); \
5443 if (_deserialize_r < 0) \
5444 log_unit_debug_errno(u, _deserialize_r, \
5445 "Failed to parse \"%s=%s\", ignoring.", l, v); \
5447 crt->target = _deserialize_r; \
5450 _deserialize_matched; \
5453 int cgroup_runtime_deserialize_one(Unit
*u
, const char *key
, const char *value
, FDSet
*fds
) {
5459 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
5462 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "cpu-usage-base", key
, value
, safe_atou64
, cpu_usage_base
) ||
5463 MATCH_DESERIALIZE_IMMEDIATE(u
, "cpuacct-usage-base", key
, value
, safe_atou64
, cpu_usage_base
))
5466 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "cpu-usage-last", key
, value
, safe_atou64
, cpu_usage_last
))
5469 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "managed-oom-kill-last", key
, value
, safe_atou64
, managed_oom_kill_last
))
5472 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "oom-kill-last", key
, value
, safe_atou64
, oom_kill_last
))
5475 if (streq(key
, "cgroup")) {
5476 r
= unit_set_cgroup_path(u
, value
);
5478 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", value
);
5480 (void) unit_watch_cgroup(u
);
5481 (void) unit_watch_cgroup_memory(u
);
5485 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "cgroup-id", key
, value
, safe_atou64
, cgroup_id
))
5488 if (MATCH_DESERIALIZE(u
, "cgroup-realized", key
, value
, parse_boolean
, cgroup_realized
))
5491 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "cgroup-realized-mask", key
, value
, cg_mask_from_string
, cgroup_realized_mask
))
5494 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "cgroup-enabled-mask", key
, value
, cg_mask_from_string
, cgroup_enabled_mask
))
5497 if (MATCH_DESERIALIZE_IMMEDIATE(u
, "cgroup-invalidated-mask", key
, value
, cg_mask_from_string
, cgroup_invalidated_mask
))
5500 if (STR_IN_SET(key
, "ipv4-socket-bind-bpf-link-fd", "ipv6-socket-bind-bpf-link-fd")) {
5503 fd
= deserialize_fd(fds
, value
);
5505 (void) bpf_socket_bind_add_initial_link_fd(u
, fd
);
5511 "ip-bpf-ingress-installed", "ip-bpf-egress-installed",
5512 "bpf-device-control-installed",
5513 "ip-bpf-custom-ingress-installed", "ip-bpf-custom-egress-installed")) {
5515 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
5519 if (streq(key
, "ip-bpf-ingress-installed"))
5520 (void) bpf_program_deserialize_attachment(value
, fds
, &crt
->ip_bpf_ingress_installed
);
5522 if (streq(key
, "ip-bpf-egress-installed"))
5523 (void) bpf_program_deserialize_attachment(value
, fds
, &crt
->ip_bpf_egress_installed
);
5525 if (streq(key
, "bpf-device-control-installed"))
5526 (void) bpf_program_deserialize_attachment(value
, fds
, &crt
->bpf_device_control_installed
);
5528 if (streq(key
, "ip-bpf-custom-ingress-installed"))
5529 (void) bpf_program_deserialize_attachment_set(value
, fds
, &crt
->ip_bpf_custom_ingress_installed
);
5531 if (streq(key
, "ip-bpf-custom-egress-installed"))
5532 (void) bpf_program_deserialize_attachment_set(value
, fds
, &crt
->ip_bpf_custom_egress_installed
);
5538 if (streq(key
, "restrict-ifaces-bpf-fd")) {
5541 fd
= deserialize_fd(fds
, value
);
5543 (void) bpf_restrict_ifaces_add_initial_link_fd(u
, fd
);
5547 CGroupMemoryAccountingMetric mm
= memory_accounting_metric_field_last_from_string(key
);
5551 r
= safe_atou64(value
, &c
);
5553 log_unit_debug(u
, "Failed to parse memory accounting last value %s, ignoring.", value
);
5555 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
5559 crt
->memory_accounting_last
[mm
] = c
;
5565 CGroupIPAccountingMetric ipm
= ip_accounting_metric_field_from_string(key
);
5569 r
= safe_atou64(value
, &c
);
5571 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", value
);
5573 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
5577 crt
->ip_accounting_extra
[ipm
] = c
;
5583 CGroupIOAccountingMetric iom
= io_accounting_metric_field_base_from_string(key
);
5587 r
= safe_atou64(value
, &c
);
5589 log_unit_debug(u
, "Failed to parse IO accounting base value %s, ignoring.", value
);
5591 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
5595 crt
->io_accounting_base
[iom
] = c
;
5601 iom
= io_accounting_metric_field_last_from_string(key
);
5605 r
= safe_atou64(value
, &c
);
5607 log_unit_debug(u
, "Failed to parse IO accounting last value %s, ignoring.", value
);
5609 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
5613 crt
->io_accounting_last
[iom
] = c
;
5621 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
5622 [CGROUP_DEVICE_POLICY_AUTO
] = "auto",
5623 [CGROUP_DEVICE_POLICY_CLOSED
] = "closed",
5624 [CGROUP_DEVICE_POLICY_STRICT
] = "strict",
5627 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);
5629 static const char* const cgroup_pressure_watch_table
[_CGROUP_PRESSURE_WATCH_MAX
] = {
5630 [CGROUP_PRESSURE_WATCH_OFF
] = "off",
5631 [CGROUP_PRESSURE_WATCH_AUTO
] = "auto",
5632 [CGROUP_PRESSURE_WATCH_ON
] = "on",
5633 [CGROUP_PRESSURE_WATCH_SKIP
] = "skip",
5636 DEFINE_STRING_TABLE_LOOKUP_WITH_BOOLEAN(cgroup_pressure_watch
, CGroupPressureWatch
, CGROUP_PRESSURE_WATCH_ON
);
5638 static const char* const cgroup_ip_accounting_metric_table
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
5639 [CGROUP_IP_INGRESS_BYTES
] = "IPIngressBytes",
5640 [CGROUP_IP_EGRESS_BYTES
] = "IPEgressBytes",
5641 [CGROUP_IP_INGRESS_PACKETS
] = "IPIngressPackets",
5642 [CGROUP_IP_EGRESS_PACKETS
] = "IPEgressPackets",
5645 DEFINE_STRING_TABLE_LOOKUP(cgroup_ip_accounting_metric
, CGroupIPAccountingMetric
);
5647 static const char* const cgroup_io_accounting_metric_table
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
5648 [CGROUP_IO_READ_BYTES
] = "IOReadBytes",
5649 [CGROUP_IO_WRITE_BYTES
] = "IOWriteBytes",
5650 [CGROUP_IO_READ_OPERATIONS
] = "IOReadOperations",
5651 [CGROUP_IO_WRITE_OPERATIONS
] = "IOWriteOperations",
5654 DEFINE_STRING_TABLE_LOOKUP(cgroup_io_accounting_metric
, CGroupIOAccountingMetric
);
5656 static const char* const cgroup_memory_accounting_metric_table
[_CGROUP_MEMORY_ACCOUNTING_METRIC_MAX
] = {
5657 [CGROUP_MEMORY_PEAK
] = "MemoryPeak",
5658 [CGROUP_MEMORY_SWAP_CURRENT
] = "MemorySwapCurrent",
5659 [CGROUP_MEMORY_SWAP_PEAK
] = "MemorySwapPeak",
5660 [CGROUP_MEMORY_ZSWAP_CURRENT
] = "MemoryZSwapCurrent",
5663 DEFINE_STRING_TABLE_LOOKUP(cgroup_memory_accounting_metric
, CGroupMemoryAccountingMetric
);
5665 static const char *const cgroup_effective_limit_type_table
[_CGROUP_LIMIT_TYPE_MAX
] = {
5666 [CGROUP_LIMIT_MEMORY_MAX
] = "EffectiveMemoryMax",
5667 [CGROUP_LIMIT_MEMORY_HIGH
] = "EffectiveMemoryHigh",
5668 [CGROUP_LIMIT_TASKS_MAX
] = "EffectiveTasksMax",
5671 DEFINE_STRING_TABLE_LOOKUP(cgroup_effective_limit_type
, CGroupLimitType
);