1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
5 #include "sd-messages.h"
8 #include "alloc-util.h"
9 #include "blockdev-util.h"
10 #include "bpf-devices.h"
11 #include "bpf-firewall.h"
12 #include "bpf-foreign.h"
13 #include "bpf-socket-bind.h"
14 #include "btrfs-util.h"
15 #include "bus-error.h"
16 #include "cgroup-setup.h"
17 #include "cgroup-util.h"
21 #include "in-addr-prefix-util.h"
22 #include "inotify-util.h"
24 #include "ip-protocol-list.h"
25 #include "limits-util.h"
26 #include "nulstr-util.h"
27 #include "parse-util.h"
28 #include "path-util.h"
29 #include "percent-util.h"
30 #include "process-util.h"
31 #include "procfs-util.h"
32 #include "restrict-ifaces.h"
34 #include "stat-util.h"
35 #include "stdio-util.h"
36 #include "string-table.h"
37 #include "string-util.h"
41 #include "bpf-dlopen.h"
43 #include "bpf/restrict_fs/restrict-fs-skel.h"
46 #define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
48 /* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
49 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
50 * out specific attributes from us. */
51 #define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
53 uint64_t tasks_max_resolve(const TasksMax
*tasks_max
) {
54 if (tasks_max
->scale
== 0)
55 return tasks_max
->value
;
57 return system_tasks_max_scale(tasks_max
->value
, tasks_max
->scale
);
60 bool manager_owns_host_root_cgroup(Manager
*m
) {
63 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
64 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
65 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
66 * we run in any kind of container virtualization. */
68 if (MANAGER_IS_USER(m
))
71 if (detect_container() > 0)
74 return empty_or_root(m
->cgroup_root
);
77 bool unit_has_startup_cgroup_constraints(Unit
*u
) {
80 /* Returns true if this unit has any directives which apply during
81 * startup/shutdown phases. */
85 c
= unit_get_cgroup_context(u
);
89 return c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
90 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
91 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
92 c
->startup_cpuset_cpus
.set
||
93 c
->startup_cpuset_mems
.set
;
96 bool unit_has_host_root_cgroup(Unit
*u
) {
99 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
100 * the manager manages the root cgroup. */
102 if (!manager_owns_host_root_cgroup(u
->manager
))
105 return unit_has_name(u
, SPECIAL_ROOT_SLICE
);
108 static int set_attribute_and_warn(Unit
*u
, const char *controller
, const char *attribute
, const char *value
) {
111 r
= cg_set_attribute(controller
, u
->cgroup_path
, attribute
, value
);
113 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
114 strna(attribute
), empty_to_root(u
->cgroup_path
), (int) strcspn(value
, NEWLINE
), value
);
119 static void cgroup_compat_warn(void) {
120 static bool cgroup_compat_warned
= false;
122 if (cgroup_compat_warned
)
125 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
126 "See cgroup-compat debug messages for details.");
128 cgroup_compat_warned
= true;
131 #define log_cgroup_compat(unit, fmt, ...) do { \
132 cgroup_compat_warn(); \
133 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
136 void cgroup_context_init(CGroupContext
*c
) {
139 /* Initialize everything to the kernel defaults. */
141 *c
= (CGroupContext
) {
142 .cpu_weight
= CGROUP_WEIGHT_INVALID
,
143 .startup_cpu_weight
= CGROUP_WEIGHT_INVALID
,
144 .cpu_quota_per_sec_usec
= USEC_INFINITY
,
145 .cpu_quota_period_usec
= USEC_INFINITY
,
147 .cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
148 .startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
,
150 .memory_high
= CGROUP_LIMIT_MAX
,
151 .memory_max
= CGROUP_LIMIT_MAX
,
152 .memory_swap_max
= CGROUP_LIMIT_MAX
,
154 .memory_limit
= CGROUP_LIMIT_MAX
,
156 .io_weight
= CGROUP_WEIGHT_INVALID
,
157 .startup_io_weight
= CGROUP_WEIGHT_INVALID
,
159 .blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
160 .startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
,
162 .tasks_max
= TASKS_MAX_UNSET
,
164 .moom_swap
= MANAGED_OOM_AUTO
,
165 .moom_mem_pressure
= MANAGED_OOM_AUTO
,
166 .moom_preference
= MANAGED_OOM_PREFERENCE_NONE
,
170 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
174 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
179 void cgroup_context_free_io_device_weight(CGroupContext
*c
, CGroupIODeviceWeight
*w
) {
183 LIST_REMOVE(device_weights
, c
->io_device_weights
, w
);
188 void cgroup_context_free_io_device_latency(CGroupContext
*c
, CGroupIODeviceLatency
*l
) {
192 LIST_REMOVE(device_latencies
, c
->io_device_latencies
, l
);
197 void cgroup_context_free_io_device_limit(CGroupContext
*c
, CGroupIODeviceLimit
*l
) {
201 LIST_REMOVE(device_limits
, c
->io_device_limits
, l
);
206 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
210 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
215 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
219 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
224 void cgroup_context_remove_bpf_foreign_program(CGroupContext
*c
, CGroupBPFForeignProgram
*p
) {
228 LIST_REMOVE(programs
, c
->bpf_foreign_programs
, p
);
233 void cgroup_context_remove_socket_bind(CGroupSocketBindItem
**head
) {
237 CGroupSocketBindItem
*h
= *head
;
238 LIST_REMOVE(socket_bind_items
, *head
, h
);
243 void cgroup_context_done(CGroupContext
*c
) {
246 while (c
->io_device_weights
)
247 cgroup_context_free_io_device_weight(c
, c
->io_device_weights
);
249 while (c
->io_device_latencies
)
250 cgroup_context_free_io_device_latency(c
, c
->io_device_latencies
);
252 while (c
->io_device_limits
)
253 cgroup_context_free_io_device_limit(c
, c
->io_device_limits
);
255 while (c
->blockio_device_weights
)
256 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
258 while (c
->blockio_device_bandwidths
)
259 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
261 while (c
->device_allow
)
262 cgroup_context_free_device_allow(c
, c
->device_allow
);
264 cgroup_context_remove_socket_bind(&c
->socket_bind_allow
);
265 cgroup_context_remove_socket_bind(&c
->socket_bind_deny
);
267 c
->ip_address_allow
= set_free(c
->ip_address_allow
);
268 c
->ip_address_deny
= set_free(c
->ip_address_deny
);
270 c
->ip_filters_ingress
= strv_free(c
->ip_filters_ingress
);
271 c
->ip_filters_egress
= strv_free(c
->ip_filters_egress
);
273 while (c
->bpf_foreign_programs
)
274 cgroup_context_remove_bpf_foreign_program(c
, c
->bpf_foreign_programs
);
276 c
->restrict_network_interfaces
= set_free(c
->restrict_network_interfaces
);
278 cpu_set_reset(&c
->cpuset_cpus
);
279 cpu_set_reset(&c
->startup_cpuset_cpus
);
280 cpu_set_reset(&c
->cpuset_mems
);
281 cpu_set_reset(&c
->startup_cpuset_mems
);
284 static int unit_get_kernel_memory_limit(Unit
*u
, const char *file
, uint64_t *ret
) {
287 if (!u
->cgroup_realized
)
290 return cg_get_attribute_as_uint64("memory", u
->cgroup_path
, file
, ret
);
293 static int unit_compare_memory_limit(Unit
*u
, const char *property_name
, uint64_t *ret_unit_value
, uint64_t *ret_kernel_value
) {
300 /* Compare kernel memcg configuration against our internal systemd state. Unsupported (and will
301 * return -ENODATA) on cgroup v1.
306 * 0: If the kernel memory setting doesn't match our configuration.
307 * >0: If the kernel memory setting matches our configuration.
309 * The following values are only guaranteed to be populated on return >=0:
311 * - ret_unit_value will contain our internal expected value for the unit, page-aligned.
312 * - ret_kernel_value will contain the actual value presented by the kernel. */
316 r
= cg_all_unified();
318 return log_debug_errno(r
, "Failed to determine cgroup hierarchy version: %m");
320 /* Unsupported on v1.
322 * We don't return ENOENT, since that could actually mask a genuine problem where somebody else has
323 * silently masked the controller. */
327 /* The root slice doesn't have any controller files, so we can't compare anything. */
328 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
331 /* It's possible to have MemoryFoo set without systemd wanting to have the memory controller enabled,
332 * for example, in the case of DisableControllers= or cgroup_disable on the kernel command line. To
333 * avoid specious errors in these scenarios, check that we even expect the memory controller to be
335 m
= unit_get_target_mask(u
);
336 if (!FLAGS_SET(m
, CGROUP_MASK_MEMORY
))
339 assert_se(c
= unit_get_cgroup_context(u
));
341 if (streq(property_name
, "MemoryLow")) {
342 unit_value
= unit_get_ancestor_memory_low(u
);
344 } else if (streq(property_name
, "MemoryMin")) {
345 unit_value
= unit_get_ancestor_memory_min(u
);
347 } else if (streq(property_name
, "MemoryHigh")) {
348 unit_value
= c
->memory_high
;
349 file
= "memory.high";
350 } else if (streq(property_name
, "MemoryMax")) {
351 unit_value
= c
->memory_max
;
353 } else if (streq(property_name
, "MemorySwapMax")) {
354 unit_value
= c
->memory_swap_max
;
355 file
= "memory.swap.max";
359 r
= unit_get_kernel_memory_limit(u
, file
, ret_kernel_value
);
361 return log_unit_debug_errno(u
, r
, "Failed to parse %s: %m", file
);
363 /* It's intended (soon) in a future kernel to not expose cgroup memory limits rounded to page
364 * boundaries, but instead separate the user-exposed limit, which is whatever userspace told us, from
365 * our internal page-counting. To support those future kernels, just check the value itself first
366 * without any page-alignment. */
367 if (*ret_kernel_value
== unit_value
) {
368 *ret_unit_value
= unit_value
;
372 /* The current kernel behaviour, by comparison, is that even if you write a particular number of
373 * bytes into a cgroup memory file, it always returns that number page-aligned down (since the kernel
374 * internally stores cgroup limits in pages). As such, so long as it aligns properly, everything is
376 if (unit_value
!= CGROUP_LIMIT_MAX
)
377 unit_value
= PAGE_ALIGN_DOWN(unit_value
);
379 *ret_unit_value
= unit_value
;
381 return *ret_kernel_value
== *ret_unit_value
;
384 #define FORMAT_CGROUP_DIFF_MAX 128
386 static char *format_cgroup_memory_limit_comparison(char *buf
, size_t l
, Unit
*u
, const char *property_name
) {
394 r
= unit_compare_memory_limit(u
, property_name
, &sval
, &kval
);
396 /* memory.swap.max is special in that it relies on CONFIG_MEMCG_SWAP (and the default swapaccount=1).
397 * In the absence of reliably being able to detect whether memcg swap support is available or not,
398 * only complain if the error is not ENOENT. */
399 if (r
> 0 || IN_SET(r
, -ENODATA
, -EOWNERDEAD
) ||
400 (r
== -ENOENT
&& streq(property_name
, "MemorySwapMax"))) {
406 (void) snprintf(buf
, l
, " (error getting kernel value: %s)", strerror_safe(r
));
410 (void) snprintf(buf
, l
, " (different value in kernel: %" PRIu64
")", kval
);
415 void cgroup_context_dump(Unit
*u
, FILE* f
, const char *prefix
) {
416 _cleanup_free_
char *disable_controllers_str
= NULL
, *cpuset_cpus
= NULL
, *cpuset_mems
= NULL
, *startup_cpuset_cpus
= NULL
, *startup_cpuset_mems
= NULL
;
418 struct in_addr_prefix
*iaai
;
420 char cda
[FORMAT_CGROUP_DIFF_MAX
];
421 char cdb
[FORMAT_CGROUP_DIFF_MAX
];
422 char cdc
[FORMAT_CGROUP_DIFF_MAX
];
423 char cdd
[FORMAT_CGROUP_DIFF_MAX
];
424 char cde
[FORMAT_CGROUP_DIFF_MAX
];
429 assert_se(c
= unit_get_cgroup_context(u
));
431 prefix
= strempty(prefix
);
433 (void) cg_mask_to_string(c
->disable_controllers
, &disable_controllers_str
);
435 cpuset_cpus
= cpu_set_to_range_string(&c
->cpuset_cpus
);
436 startup_cpuset_cpus
= cpu_set_to_range_string(&c
->startup_cpuset_cpus
);
437 cpuset_mems
= cpu_set_to_range_string(&c
->cpuset_mems
);
438 startup_cpuset_mems
= cpu_set_to_range_string(&c
->startup_cpuset_mems
);
441 "%sCPUAccounting: %s\n"
442 "%sIOAccounting: %s\n"
443 "%sBlockIOAccounting: %s\n"
444 "%sMemoryAccounting: %s\n"
445 "%sTasksAccounting: %s\n"
446 "%sIPAccounting: %s\n"
447 "%sCPUWeight: %" PRIu64
"\n"
448 "%sStartupCPUWeight: %" PRIu64
"\n"
449 "%sCPUShares: %" PRIu64
"\n"
450 "%sStartupCPUShares: %" PRIu64
"\n"
451 "%sCPUQuotaPerSecSec: %s\n"
452 "%sCPUQuotaPeriodSec: %s\n"
453 "%sAllowedCPUs: %s\n"
454 "%sStartupAllowedCPUs: %s\n"
455 "%sAllowedMemoryNodes: %s\n"
456 "%sStartupAllowedMemoryNodes: %s\n"
457 "%sIOWeight: %" PRIu64
"\n"
458 "%sStartupIOWeight: %" PRIu64
"\n"
459 "%sBlockIOWeight: %" PRIu64
"\n"
460 "%sStartupBlockIOWeight: %" PRIu64
"\n"
461 "%sDefaultMemoryMin: %" PRIu64
"\n"
462 "%sDefaultMemoryLow: %" PRIu64
"\n"
463 "%sMemoryMin: %" PRIu64
"%s\n"
464 "%sMemoryLow: %" PRIu64
"%s\n"
465 "%sMemoryHigh: %" PRIu64
"%s\n"
466 "%sMemoryMax: %" PRIu64
"%s\n"
467 "%sMemorySwapMax: %" PRIu64
"%s\n"
468 "%sMemoryLimit: %" PRIu64
"\n"
469 "%sTasksMax: %" PRIu64
"\n"
470 "%sDevicePolicy: %s\n"
471 "%sDisableControllers: %s\n"
473 "%sManagedOOMSwap: %s\n"
474 "%sManagedOOMMemoryPressure: %s\n"
475 "%sManagedOOMMemoryPressureLimit: " PERMYRIAD_AS_PERCENT_FORMAT_STR
"\n"
476 "%sManagedOOMPreference: %s\n",
477 prefix
, yes_no(c
->cpu_accounting
),
478 prefix
, yes_no(c
->io_accounting
),
479 prefix
, yes_no(c
->blockio_accounting
),
480 prefix
, yes_no(c
->memory_accounting
),
481 prefix
, yes_no(c
->tasks_accounting
),
482 prefix
, yes_no(c
->ip_accounting
),
483 prefix
, c
->cpu_weight
,
484 prefix
, c
->startup_cpu_weight
,
485 prefix
, c
->cpu_shares
,
486 prefix
, c
->startup_cpu_shares
,
487 prefix
, FORMAT_TIMESPAN(c
->cpu_quota_per_sec_usec
, 1),
488 prefix
, FORMAT_TIMESPAN(c
->cpu_quota_period_usec
, 1),
489 prefix
, strempty(cpuset_cpus
),
490 prefix
, strempty(startup_cpuset_cpus
),
491 prefix
, strempty(cpuset_mems
),
492 prefix
, strempty(startup_cpuset_mems
),
493 prefix
, c
->io_weight
,
494 prefix
, c
->startup_io_weight
,
495 prefix
, c
->blockio_weight
,
496 prefix
, c
->startup_blockio_weight
,
497 prefix
, c
->default_memory_min
,
498 prefix
, c
->default_memory_low
,
499 prefix
, c
->memory_min
, format_cgroup_memory_limit_comparison(cda
, sizeof(cda
), u
, "MemoryMin"),
500 prefix
, c
->memory_low
, format_cgroup_memory_limit_comparison(cdb
, sizeof(cdb
), u
, "MemoryLow"),
501 prefix
, c
->memory_high
, format_cgroup_memory_limit_comparison(cdc
, sizeof(cdc
), u
, "MemoryHigh"),
502 prefix
, c
->memory_max
, format_cgroup_memory_limit_comparison(cdd
, sizeof(cdd
), u
, "MemoryMax"),
503 prefix
, c
->memory_swap_max
, format_cgroup_memory_limit_comparison(cde
, sizeof(cde
), u
, "MemorySwapMax"),
504 prefix
, c
->memory_limit
,
505 prefix
, tasks_max_resolve(&c
->tasks_max
),
506 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
507 prefix
, strempty(disable_controllers_str
),
508 prefix
, yes_no(c
->delegate
),
509 prefix
, managed_oom_mode_to_string(c
->moom_swap
),
510 prefix
, managed_oom_mode_to_string(c
->moom_mem_pressure
),
511 prefix
, PERMYRIAD_AS_PERCENT_FORMAT_VAL(UINT32_SCALE_TO_PERMYRIAD(c
->moom_mem_pressure_limit
)),
512 prefix
, managed_oom_preference_to_string(c
->moom_preference
));
515 _cleanup_free_
char *t
= NULL
;
517 (void) cg_mask_to_string(c
->delegate_controllers
, &t
);
519 fprintf(f
, "%sDelegateControllers: %s\n",
524 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
526 "%sDeviceAllow: %s %s%s%s\n",
529 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
531 LIST_FOREACH(device_weights
, iw
, c
->io_device_weights
)
533 "%sIODeviceWeight: %s %" PRIu64
"\n",
538 LIST_FOREACH(device_latencies
, l
, c
->io_device_latencies
)
540 "%sIODeviceLatencyTargetSec: %s %s\n",
543 FORMAT_TIMESPAN(l
->target_usec
, 1));
545 LIST_FOREACH(device_limits
, il
, c
->io_device_limits
)
546 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
547 if (il
->limits
[type
] != cgroup_io_limit_defaults
[type
])
551 cgroup_io_limit_type_to_string(type
),
553 FORMAT_BYTES(il
->limits
[type
]));
555 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
557 "%sBlockIODeviceWeight: %s %" PRIu64
,
562 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
563 if (b
->rbps
!= CGROUP_LIMIT_MAX
)
565 "%sBlockIOReadBandwidth: %s %s\n",
568 FORMAT_BYTES(b
->rbps
));
569 if (b
->wbps
!= CGROUP_LIMIT_MAX
)
571 "%sBlockIOWriteBandwidth: %s %s\n",
574 FORMAT_BYTES(b
->wbps
));
577 SET_FOREACH(iaai
, c
->ip_address_allow
) {
578 _cleanup_free_
char *k
= NULL
;
580 (void) in_addr_prefix_to_string(iaai
->family
, &iaai
->address
, iaai
->prefixlen
, &k
);
581 fprintf(f
, "%sIPAddressAllow: %s\n", prefix
, strnull(k
));
584 SET_FOREACH(iaai
, c
->ip_address_deny
) {
585 _cleanup_free_
char *k
= NULL
;
587 (void) in_addr_prefix_to_string(iaai
->family
, &iaai
->address
, iaai
->prefixlen
, &k
);
588 fprintf(f
, "%sIPAddressDeny: %s\n", prefix
, strnull(k
));
591 STRV_FOREACH(path
, c
->ip_filters_ingress
)
592 fprintf(f
, "%sIPIngressFilterPath: %s\n", prefix
, *path
);
594 STRV_FOREACH(path
, c
->ip_filters_egress
)
595 fprintf(f
, "%sIPEgressFilterPath: %s\n", prefix
, *path
);
597 LIST_FOREACH(programs
, p
, c
->bpf_foreign_programs
)
598 fprintf(f
, "%sBPFProgram: %s:%s",
599 prefix
, bpf_cgroup_attach_type_to_string(p
->attach_type
), p
->bpffs_path
);
601 if (c
->socket_bind_allow
) {
602 fprintf(f
, "%sSocketBindAllow:", prefix
);
603 LIST_FOREACH(socket_bind_items
, bi
, c
->socket_bind_allow
)
604 cgroup_context_dump_socket_bind_item(bi
, f
);
608 if (c
->socket_bind_deny
) {
609 fprintf(f
, "%sSocketBindDeny:", prefix
);
610 LIST_FOREACH(socket_bind_items
, bi
, c
->socket_bind_deny
)
611 cgroup_context_dump_socket_bind_item(bi
, f
);
615 if (c
->restrict_network_interfaces
) {
617 SET_FOREACH(iface
, c
->restrict_network_interfaces
)
618 fprintf(f
, "%sRestrictNetworkInterfaces: %s\n", prefix
, iface
);
622 void cgroup_context_dump_socket_bind_item(const CGroupSocketBindItem
*item
, FILE *f
) {
623 const char *family
, *colon1
, *protocol
= "", *colon2
= "";
625 family
= strempty(af_to_ipv4_ipv6(item
->address_family
));
626 colon1
= isempty(family
) ? "" : ":";
628 if (item
->ip_protocol
!= 0) {
629 protocol
= ip_protocol_to_tcp_udp(item
->ip_protocol
);
633 if (item
->nr_ports
== 0)
634 fprintf(f
, " %s%s%s%sany", family
, colon1
, protocol
, colon2
);
635 else if (item
->nr_ports
== 1)
636 fprintf(f
, " %s%s%s%s%" PRIu16
, family
, colon1
, protocol
, colon2
, item
->port_min
);
638 uint16_t port_max
= item
->port_min
+ item
->nr_ports
- 1;
639 fprintf(f
, " %s%s%s%s%" PRIu16
"-%" PRIu16
, family
, colon1
, protocol
, colon2
,
640 item
->port_min
, port_max
);
644 int cgroup_add_device_allow(CGroupContext
*c
, const char *dev
, const char *mode
) {
645 _cleanup_free_ CGroupDeviceAllow
*a
= NULL
;
646 _cleanup_free_
char *d
= NULL
;
650 assert(isempty(mode
) || in_charset(mode
, "rwm"));
652 a
= new(CGroupDeviceAllow
, 1);
660 *a
= (CGroupDeviceAllow
) {
662 .r
= isempty(mode
) || strchr(mode
, 'r'),
663 .w
= isempty(mode
) || strchr(mode
, 'w'),
664 .m
= isempty(mode
) || strchr(mode
, 'm'),
667 LIST_PREPEND(device_allow
, c
->device_allow
, a
);
673 int cgroup_add_bpf_foreign_program(CGroupContext
*c
, uint32_t attach_type
, const char *bpffs_path
) {
674 CGroupBPFForeignProgram
*p
;
675 _cleanup_free_
char *d
= NULL
;
680 if (!path_is_normalized(bpffs_path
) || !path_is_absolute(bpffs_path
))
681 return log_error_errno(SYNTHETIC_ERRNO(EINVAL
), "Path is not normalized: %m");
683 d
= strdup(bpffs_path
);
687 p
= new(CGroupBPFForeignProgram
, 1);
691 *p
= (CGroupBPFForeignProgram
) {
692 .attach_type
= attach_type
,
693 .bpffs_path
= TAKE_PTR(d
),
696 LIST_PREPEND(programs
, c
->bpf_foreign_programs
, TAKE_PTR(p
));
701 #define UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(entry) \
702 uint64_t unit_get_ancestor_##entry(Unit *u) { \
705 /* 1. Is entry set in this unit? If so, use that. \
706 * 2. Is the default for this entry set in any \
707 * ancestor? If so, use that. \
708 * 3. Otherwise, return CGROUP_LIMIT_MIN. */ \
712 c = unit_get_cgroup_context(u); \
713 if (c && c->entry##_set) \
716 while ((u = UNIT_GET_SLICE(u))) { \
717 c = unit_get_cgroup_context(u); \
718 if (c && c->default_##entry##_set) \
719 return c->default_##entry; \
722 /* We've reached the root, but nobody had default for \
723 * this entry set, so set it to the kernel default. */ \
724 return CGROUP_LIMIT_MIN; \
727 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_low
);
728 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_min
);
730 static void unit_set_xattr_graceful(Unit
*u
, const char *cgroup_path
, const char *name
, const void *data
, size_t size
) {
740 cgroup_path
= u
->cgroup_path
;
743 r
= cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER
, cgroup_path
, name
, data
, size
, 0);
745 log_unit_debug_errno(u
, r
, "Failed to set '%s' xattr on control group %s, ignoring: %m", name
, empty_to_root(cgroup_path
));
748 static void unit_remove_xattr_graceful(Unit
*u
, const char *cgroup_path
, const char *name
) {
758 cgroup_path
= u
->cgroup_path
;
761 r
= cg_remove_xattr(SYSTEMD_CGROUP_CONTROLLER
, cgroup_path
, name
);
762 if (r
< 0 && r
!= -ENODATA
)
763 log_unit_debug_errno(u
, r
, "Failed to remove '%s' xattr flag on control group %s, ignoring: %m", name
, empty_to_root(cgroup_path
));
766 void cgroup_oomd_xattr_apply(Unit
*u
, const char *cgroup_path
) {
771 c
= unit_get_cgroup_context(u
);
775 if (c
->moom_preference
== MANAGED_OOM_PREFERENCE_OMIT
)
776 unit_set_xattr_graceful(u
, cgroup_path
, "user.oomd_omit", "1", 1);
778 if (c
->moom_preference
== MANAGED_OOM_PREFERENCE_AVOID
)
779 unit_set_xattr_graceful(u
, cgroup_path
, "user.oomd_avoid", "1", 1);
781 if (c
->moom_preference
!= MANAGED_OOM_PREFERENCE_AVOID
)
782 unit_remove_xattr_graceful(u
, cgroup_path
, "user.oomd_avoid");
784 if (c
->moom_preference
!= MANAGED_OOM_PREFERENCE_OMIT
)
785 unit_remove_xattr_graceful(u
, cgroup_path
, "user.oomd_omit");
788 static void cgroup_xattr_apply(Unit
*u
) {
793 if (!MANAGER_IS_SYSTEM(u
->manager
))
796 b
= !sd_id128_is_null(u
->invocation_id
);
797 FOREACH_STRING(xn
, "trusted.invocation_id", "user.invocation_id") {
799 unit_set_xattr_graceful(u
, NULL
, xn
, SD_ID128_TO_STRING(u
->invocation_id
), 32);
801 unit_remove_xattr_graceful(u
, NULL
, xn
);
804 /* Indicate on the cgroup whether delegation is on, via an xattr. This is best-effort, as old kernels
805 * didn't support xattrs on cgroups at all. Later they got support for setting 'trusted.*' xattrs,
806 * and even later 'user.*' xattrs. We started setting this field when 'trusted.*' was added, and
807 * given this is now pretty much API, let's continue to support that. But also set 'user.*' as well,
808 * since it is readable by any user, not just CAP_SYS_ADMIN. This hence comes with slightly weaker
809 * security (as users who got delegated cgroups could turn it off if they like), but this shouldn't
810 * be a big problem given this communicates delegation state to clients, but the manager never reads
812 b
= unit_cgroup_delegate(u
);
813 FOREACH_STRING(xn
, "trusted.delegate", "user.delegate") {
815 unit_set_xattr_graceful(u
, NULL
, xn
, "1", 1);
817 unit_remove_xattr_graceful(u
, NULL
, xn
);
820 cgroup_oomd_xattr_apply(u
, u
->cgroup_path
);
823 static int lookup_block_device(const char *p
, dev_t
*ret
) {
831 r
= device_path_parse_major_minor(p
, &mode
, &rdev
);
832 if (r
== -ENODEV
) { /* not a parsable device node, need to go to disk */
835 if (stat(p
, &st
) < 0)
836 return log_warning_errno(errno
, "Couldn't stat device '%s': %m", p
);
842 return log_warning_errno(r
, "Failed to parse major/minor from path '%s': %m", p
);
845 return log_warning_errno(SYNTHETIC_ERRNO(ENOTBLK
),
846 "Device node '%s' is a character device, but block device needed.", p
);
849 else if (major(dev
) != 0)
850 *ret
= dev
; /* If this is not a device node then use the block device this file is stored on */
852 /* If this is btrfs, getting the backing block device is a bit harder */
853 r
= btrfs_get_block_device(p
, ret
);
855 return log_warning_errno(SYNTHETIC_ERRNO(ENODEV
),
856 "'%s' is not a block device node, and file system block device cannot be determined or is not local.", p
);
858 return log_warning_errno(r
, "Failed to determine block device backing btrfs file system '%s': %m", p
);
861 /* If this is a LUKS/DM device, recursively try to get the originating block device */
862 while (block_get_originating(*ret
, ret
) > 0);
864 /* If this is a partition, try to get the originating block device */
865 (void) block_get_whole_disk(*ret
, ret
);
869 static bool cgroup_context_has_cpu_weight(CGroupContext
*c
) {
870 return c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
||
871 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
;
874 static bool cgroup_context_has_cpu_shares(CGroupContext
*c
) {
875 return c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
876 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
;
879 static bool cgroup_context_has_allowed_cpus(CGroupContext
*c
) {
880 return c
->cpuset_cpus
.set
|| c
->startup_cpuset_cpus
.set
;
883 static bool cgroup_context_has_allowed_mems(CGroupContext
*c
) {
884 return c
->cpuset_mems
.set
|| c
->startup_cpuset_mems
.set
;
887 static uint64_t cgroup_context_cpu_weight(CGroupContext
*c
, ManagerState state
) {
888 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
889 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
)
890 return c
->startup_cpu_weight
;
891 else if (c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
)
892 return c
->cpu_weight
;
894 return CGROUP_WEIGHT_DEFAULT
;
897 static uint64_t cgroup_context_cpu_shares(CGroupContext
*c
, ManagerState state
) {
898 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
899 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
900 return c
->startup_cpu_shares
;
901 else if (c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
902 return c
->cpu_shares
;
904 return CGROUP_CPU_SHARES_DEFAULT
;
907 static CPUSet
*cgroup_context_allowed_cpus(CGroupContext
*c
, ManagerState state
) {
908 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
909 c
->startup_cpuset_cpus
.set
)
910 return &c
->startup_cpuset_cpus
;
912 return &c
->cpuset_cpus
;
915 static CPUSet
*cgroup_context_allowed_mems(CGroupContext
*c
, ManagerState state
) {
916 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
917 c
->startup_cpuset_mems
.set
)
918 return &c
->startup_cpuset_mems
;
920 return &c
->cpuset_mems
;
923 usec_t
cgroup_cpu_adjust_period(usec_t period
, usec_t quota
, usec_t resolution
, usec_t max_period
) {
924 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
925 * need to be higher than that boundary. quota is specified in USecPerSec.
926 * Additionally, period must be at most max_period. */
929 return MIN(MAX3(period
, resolution
, resolution
* USEC_PER_SEC
/ quota
), max_period
);
932 static usec_t
cgroup_cpu_adjust_period_and_log(Unit
*u
, usec_t period
, usec_t quota
) {
935 if (quota
== USEC_INFINITY
)
936 /* Always use default period for infinity quota. */
937 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
939 if (period
== USEC_INFINITY
)
940 /* Default period was requested. */
941 period
= CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC
;
943 /* Clamp to interval [1ms, 1s] */
944 new_period
= cgroup_cpu_adjust_period(period
, quota
, USEC_PER_MSEC
, USEC_PER_SEC
);
946 if (new_period
!= period
) {
947 log_unit_full(u
, u
->warned_clamping_cpu_quota_period
? LOG_DEBUG
: LOG_WARNING
,
948 "Clamping CPU interval for cpu.max: period is now %s",
949 FORMAT_TIMESPAN(new_period
, 1));
950 u
->warned_clamping_cpu_quota_period
= true;
956 static void cgroup_apply_unified_cpu_weight(Unit
*u
, uint64_t weight
) {
957 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
959 xsprintf(buf
, "%" PRIu64
"\n", weight
);
960 (void) set_attribute_and_warn(u
, "cpu", "cpu.weight", buf
);
963 static void cgroup_apply_unified_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
964 char buf
[(DECIMAL_STR_MAX(usec_t
) + 1) * 2 + 1];
966 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
967 if (quota
!= USEC_INFINITY
)
968 xsprintf(buf
, USEC_FMT
" " USEC_FMT
"\n",
969 MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
), period
);
971 xsprintf(buf
, "max " USEC_FMT
"\n", period
);
972 (void) set_attribute_and_warn(u
, "cpu", "cpu.max", buf
);
975 static void cgroup_apply_legacy_cpu_shares(Unit
*u
, uint64_t shares
) {
976 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
978 xsprintf(buf
, "%" PRIu64
"\n", shares
);
979 (void) set_attribute_and_warn(u
, "cpu", "cpu.shares", buf
);
982 static void cgroup_apply_legacy_cpu_quota(Unit
*u
, usec_t quota
, usec_t period
) {
983 char buf
[DECIMAL_STR_MAX(usec_t
) + 2];
985 period
= cgroup_cpu_adjust_period_and_log(u
, period
, quota
);
987 xsprintf(buf
, USEC_FMT
"\n", period
);
988 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_period_us", buf
);
990 if (quota
!= USEC_INFINITY
) {
991 xsprintf(buf
, USEC_FMT
"\n", MAX(quota
* period
/ USEC_PER_SEC
, USEC_PER_MSEC
));
992 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", buf
);
994 (void) set_attribute_and_warn(u
, "cpu", "cpu.cfs_quota_us", "-1\n");
997 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares
) {
998 return CLAMP(shares
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_CPU_SHARES_DEFAULT
,
999 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
1002 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight
) {
1003 return CLAMP(weight
* CGROUP_CPU_SHARES_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
1004 CGROUP_CPU_SHARES_MIN
, CGROUP_CPU_SHARES_MAX
);
1007 static void cgroup_apply_unified_cpuset(Unit
*u
, const CPUSet
*cpus
, const char *name
) {
1008 _cleanup_free_
char *buf
= NULL
;
1010 buf
= cpu_set_to_range_string(cpus
);
1016 (void) set_attribute_and_warn(u
, "cpuset", name
, buf
);
1019 static bool cgroup_context_has_io_config(CGroupContext
*c
) {
1020 return c
->io_accounting
||
1021 c
->io_weight
!= CGROUP_WEIGHT_INVALID
||
1022 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
1023 c
->io_device_weights
||
1024 c
->io_device_latencies
||
1025 c
->io_device_limits
;
1028 static bool cgroup_context_has_blockio_config(CGroupContext
*c
) {
1029 return c
->blockio_accounting
||
1030 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
1031 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
1032 c
->blockio_device_weights
||
1033 c
->blockio_device_bandwidths
;
1036 static uint64_t cgroup_context_io_weight(CGroupContext
*c
, ManagerState state
) {
1037 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1038 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
)
1039 return c
->startup_io_weight
;
1040 else if (c
->io_weight
!= CGROUP_WEIGHT_INVALID
)
1041 return c
->io_weight
;
1043 return CGROUP_WEIGHT_DEFAULT
;
1046 static uint64_t cgroup_context_blkio_weight(CGroupContext
*c
, ManagerState state
) {
1047 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
, MANAGER_STOPPING
) &&
1048 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
1049 return c
->startup_blockio_weight
;
1050 else if (c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
1051 return c
->blockio_weight
;
1053 return CGROUP_BLKIO_WEIGHT_DEFAULT
;
1056 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight
) {
1057 return CLAMP(blkio_weight
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_BLKIO_WEIGHT_DEFAULT
,
1058 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
1061 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight
) {
1062 return CLAMP(io_weight
* CGROUP_BLKIO_WEIGHT_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
1063 CGROUP_BLKIO_WEIGHT_MIN
, CGROUP_BLKIO_WEIGHT_MAX
);
1066 static int set_bfq_weight(Unit
*u
, const char *controller
, dev_t dev
, uint64_t io_weight
) {
1067 static const char * const prop_names
[] = {
1071 "BlockIODeviceWeight",
1073 static bool warned
= false;
1074 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+STRLEN("\n")];
1076 uint64_t bfq_weight
;
1079 /* FIXME: drop this function when distro kernels properly support BFQ through "io.weight"
1080 * See also: https://github.com/systemd/systemd/pull/13335 and
1081 * https://github.com/torvalds/linux/commit/65752aef0a407e1ef17ec78a7fc31ba4e0b360f9. */
1082 p
= strjoina(controller
, ".bfq.weight");
1083 /* Adjust to kernel range is 1..1000, the default is 100. */
1084 bfq_weight
= BFQ_WEIGHT(io_weight
);
1087 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), bfq_weight
);
1089 xsprintf(buf
, "%" PRIu64
"\n", bfq_weight
);
1091 r
= cg_set_attribute(controller
, u
->cgroup_path
, p
, buf
);
1093 /* FIXME: drop this when kernels prior
1094 * 795fe54c2a82 ("bfq: Add per-device weight") v5.4
1095 * are not interesting anymore. Old kernels will fail with EINVAL, while new kernels won't return
1096 * EINVAL on properly formatted input by us. Treat EINVAL accordingly. */
1097 if (r
== -EINVAL
&& major(dev
) > 0) {
1099 log_unit_warning(u
, "Kernel version does not accept per-device setting in %s.", p
);
1102 r
= -EOPNOTSUPP
; /* mask as unconfigured device */
1103 } else if (r
>= 0 && io_weight
!= bfq_weight
)
1104 log_unit_debug(u
, "%s=%" PRIu64
" scaled to %s=%" PRIu64
,
1105 prop_names
[2*(major(dev
) > 0) + streq(controller
, "blkio")],
1106 io_weight
, p
, bfq_weight
);
1110 static void cgroup_apply_io_device_weight(Unit
*u
, const char *dev_path
, uint64_t io_weight
) {
1111 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1115 if (lookup_block_device(dev_path
, &dev
) < 0)
1118 r1
= set_bfq_weight(u
, "io", dev
, io_weight
);
1120 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), io_weight
);
1121 r2
= cg_set_attribute("io", u
->cgroup_path
, "io.weight", buf
);
1123 /* Look at the configured device, when both fail, prefer io.weight errno. */
1124 r
= r2
== -EOPNOTSUPP
? r1
: r2
;
1127 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
),
1128 r
, "Failed to set 'io[.bfq].weight' attribute on '%s' to '%.*s': %m",
1129 empty_to_root(u
->cgroup_path
), (int) strcspn(buf
, NEWLINE
), buf
);
1132 static void cgroup_apply_blkio_device_weight(Unit
*u
, const char *dev_path
, uint64_t blkio_weight
) {
1133 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1137 r
= lookup_block_device(dev_path
, &dev
);
1141 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), blkio_weight
);
1142 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight_device", buf
);
1145 static void cgroup_apply_io_device_latency(Unit
*u
, const char *dev_path
, usec_t target
) {
1146 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
1150 r
= lookup_block_device(dev_path
, &dev
);
1154 if (target
!= USEC_INFINITY
)
1155 xsprintf(buf
, "%u:%u target=%" PRIu64
"\n", major(dev
), minor(dev
), target
);
1157 xsprintf(buf
, "%u:%u target=max\n", major(dev
), minor(dev
));
1159 (void) set_attribute_and_warn(u
, "io", "io.latency", buf
);
1162 static void cgroup_apply_io_device_limit(Unit
*u
, const char *dev_path
, uint64_t *limits
) {
1163 char limit_bufs
[_CGROUP_IO_LIMIT_TYPE_MAX
][DECIMAL_STR_MAX(uint64_t)],
1164 buf
[DECIMAL_STR_MAX(dev_t
)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
1167 if (lookup_block_device(dev_path
, &dev
) < 0)
1170 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
1171 if (limits
[type
] != cgroup_io_limit_defaults
[type
])
1172 xsprintf(limit_bufs
[type
], "%" PRIu64
, limits
[type
]);
1174 xsprintf(limit_bufs
[type
], "%s", limits
[type
] == CGROUP_LIMIT_MAX
? "max" : "0");
1176 xsprintf(buf
, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev
), minor(dev
),
1177 limit_bufs
[CGROUP_IO_RBPS_MAX
], limit_bufs
[CGROUP_IO_WBPS_MAX
],
1178 limit_bufs
[CGROUP_IO_RIOPS_MAX
], limit_bufs
[CGROUP_IO_WIOPS_MAX
]);
1179 (void) set_attribute_and_warn(u
, "io", "io.max", buf
);
1182 static void cgroup_apply_blkio_device_limit(Unit
*u
, const char *dev_path
, uint64_t rbps
, uint64_t wbps
) {
1183 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1186 if (lookup_block_device(dev_path
, &dev
) < 0)
1189 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), rbps
);
1190 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.read_bps_device", buf
);
1192 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), wbps
);
1193 (void) set_attribute_and_warn(u
, "blkio", "blkio.throttle.write_bps_device", buf
);
1196 static bool unit_has_unified_memory_config(Unit
*u
) {
1201 assert_se(c
= unit_get_cgroup_context(u
));
1203 return unit_get_ancestor_memory_min(u
) > 0 || unit_get_ancestor_memory_low(u
) > 0 ||
1204 c
->memory_high
!= CGROUP_LIMIT_MAX
|| c
->memory_max
!= CGROUP_LIMIT_MAX
||
1205 c
->memory_swap_max
!= CGROUP_LIMIT_MAX
;
1208 static void cgroup_apply_unified_memory_limit(Unit
*u
, const char *file
, uint64_t v
) {
1209 char buf
[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
1211 if (v
!= CGROUP_LIMIT_MAX
)
1212 xsprintf(buf
, "%" PRIu64
"\n", v
);
1214 (void) set_attribute_and_warn(u
, "memory", file
, buf
);
1217 static void cgroup_apply_firewall(Unit
*u
) {
1220 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
1222 if (bpf_firewall_compile(u
) < 0)
1225 (void) bpf_firewall_load_custom(u
);
1226 (void) bpf_firewall_install(u
);
1229 static void cgroup_apply_socket_bind(Unit
*u
) {
1232 (void) bpf_socket_bind_install(u
);
1235 static void cgroup_apply_restrict_network_interfaces(Unit
*u
) {
1238 (void) restrict_network_interfaces_install(u
);
1241 static int cgroup_apply_devices(Unit
*u
) {
1242 _cleanup_(bpf_program_freep
) BPFProgram
*prog
= NULL
;
1245 CGroupDevicePolicy policy
;
1248 assert_se(c
= unit_get_cgroup_context(u
));
1249 assert_se(path
= u
->cgroup_path
);
1251 policy
= c
->device_policy
;
1253 if (cg_all_unified() > 0) {
1254 r
= bpf_devices_cgroup_init(&prog
, policy
, c
->device_allow
);
1256 return log_unit_warning_errno(u
, r
, "Failed to initialize device control bpf program: %m");
1259 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore
1262 if (c
->device_allow
|| policy
!= CGROUP_DEVICE_POLICY_AUTO
)
1263 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
1265 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
1267 log_unit_full_errno(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
, -EPERM
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1268 "Failed to reset devices.allow/devices.deny: %m");
1271 bool allow_list_static
= policy
== CGROUP_DEVICE_POLICY_CLOSED
||
1272 (policy
== CGROUP_DEVICE_POLICY_AUTO
&& c
->device_allow
);
1273 if (allow_list_static
)
1274 (void) bpf_devices_allow_list_static(prog
, path
);
1276 bool any
= allow_list_static
;
1277 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
1291 if (path_startswith(a
->path
, "/dev/"))
1292 r
= bpf_devices_allow_list_device(prog
, path
, a
->path
, acc
);
1293 else if ((val
= startswith(a
->path
, "block-")))
1294 r
= bpf_devices_allow_list_major(prog
, path
, val
, 'b', acc
);
1295 else if ((val
= startswith(a
->path
, "char-")))
1296 r
= bpf_devices_allow_list_major(prog
, path
, val
, 'c', acc
);
1298 log_unit_debug(u
, "Ignoring device '%s' while writing cgroup attribute.", a
->path
);
1307 log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENODEV
), "No devices matched by device filter.");
1309 /* The kernel verifier would reject a program we would build with the normal intro and outro
1310 but no allow-listing rules (outro would contain an unreachable instruction for successful
1312 policy
= CGROUP_DEVICE_POLICY_STRICT
;
1315 r
= bpf_devices_apply_policy(&prog
, policy
, any
, path
, &u
->bpf_device_control_installed
);
1317 static bool warned
= false;
1319 log_full_errno(warned
? LOG_DEBUG
: LOG_WARNING
, r
,
1320 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1321 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1322 "(This warning is only shown for the first loaded unit using device ACL.)", u
->id
);
1329 static void set_io_weight(Unit
*u
, uint64_t weight
) {
1330 char buf
[STRLEN("default \n")+DECIMAL_STR_MAX(uint64_t)];
1334 (void) set_bfq_weight(u
, "io", makedev(0, 0), weight
);
1336 xsprintf(buf
, "default %" PRIu64
"\n", weight
);
1337 (void) set_attribute_and_warn(u
, "io", "io.weight", buf
);
1340 static void set_blkio_weight(Unit
*u
, uint64_t weight
) {
1341 char buf
[STRLEN("\n")+DECIMAL_STR_MAX(uint64_t)];
1345 (void) set_bfq_weight(u
, "blkio", makedev(0, 0), weight
);
1347 xsprintf(buf
, "%" PRIu64
"\n", weight
);
1348 (void) set_attribute_and_warn(u
, "blkio", "blkio.weight", buf
);
1351 static void cgroup_apply_bpf_foreign_program(Unit
*u
) {
1354 (void) bpf_foreign_install(u
);
1357 static void cgroup_context_apply(
1359 CGroupMask apply_mask
,
1360 ManagerState state
) {
1364 bool is_host_root
, is_local_root
;
1369 /* Nothing to do? Exit early! */
1370 if (apply_mask
== 0)
1373 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
1374 * attributes should only be managed for cgroups further down the tree. */
1375 is_local_root
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1376 is_host_root
= unit_has_host_root_cgroup(u
);
1378 assert_se(c
= unit_get_cgroup_context(u
));
1379 assert_se(path
= u
->cgroup_path
);
1381 if (is_local_root
) /* Make sure we don't try to display messages with an empty path. */
1384 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
1385 * then), and missing cgroups, i.e. EROFS and ENOENT. */
1387 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
1388 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
1389 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
1390 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
1391 * we couldn't even write to them if we wanted to). */
1392 if ((apply_mask
& CGROUP_MASK_CPU
) && !is_local_root
) {
1394 if (cg_all_unified() > 0) {
1397 if (cgroup_context_has_cpu_weight(c
))
1398 weight
= cgroup_context_cpu_weight(c
, state
);
1399 else if (cgroup_context_has_cpu_shares(c
)) {
1402 shares
= cgroup_context_cpu_shares(c
, state
);
1403 weight
= cgroup_cpu_shares_to_weight(shares
);
1405 log_cgroup_compat(u
, "Applying [Startup]CPUShares=%" PRIu64
" as [Startup]CPUWeight=%" PRIu64
" on %s",
1406 shares
, weight
, path
);
1408 weight
= CGROUP_WEIGHT_DEFAULT
;
1410 cgroup_apply_unified_cpu_weight(u
, weight
);
1411 cgroup_apply_unified_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
1416 if (cgroup_context_has_cpu_weight(c
)) {
1419 weight
= cgroup_context_cpu_weight(c
, state
);
1420 shares
= cgroup_cpu_weight_to_shares(weight
);
1422 log_cgroup_compat(u
, "Applying [Startup]CPUWeight=%" PRIu64
" as [Startup]CPUShares=%" PRIu64
" on %s",
1423 weight
, shares
, path
);
1424 } else if (cgroup_context_has_cpu_shares(c
))
1425 shares
= cgroup_context_cpu_shares(c
, state
);
1427 shares
= CGROUP_CPU_SHARES_DEFAULT
;
1429 cgroup_apply_legacy_cpu_shares(u
, shares
);
1430 cgroup_apply_legacy_cpu_quota(u
, c
->cpu_quota_per_sec_usec
, c
->cpu_quota_period_usec
);
1434 if ((apply_mask
& CGROUP_MASK_CPUSET
) && !is_local_root
) {
1435 cgroup_apply_unified_cpuset(u
, cgroup_context_allowed_cpus(c
, state
), "cpuset.cpus");
1436 cgroup_apply_unified_cpuset(u
, cgroup_context_allowed_mems(c
, state
), "cpuset.mems");
1439 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
1440 * controller), and in case of containers we want to leave control of these attributes to the container manager
1441 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
1442 if ((apply_mask
& CGROUP_MASK_IO
) && !is_local_root
) {
1443 bool has_io
, has_blockio
;
1446 has_io
= cgroup_context_has_io_config(c
);
1447 has_blockio
= cgroup_context_has_blockio_config(c
);
1450 weight
= cgroup_context_io_weight(c
, state
);
1451 else if (has_blockio
) {
1452 uint64_t blkio_weight
;
1454 blkio_weight
= cgroup_context_blkio_weight(c
, state
);
1455 weight
= cgroup_weight_blkio_to_io(blkio_weight
);
1457 log_cgroup_compat(u
, "Applying [Startup]BlockIOWeight=%" PRIu64
" as [Startup]IOWeight=%" PRIu64
,
1458 blkio_weight
, weight
);
1460 weight
= CGROUP_WEIGHT_DEFAULT
;
1462 set_io_weight(u
, weight
);
1465 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
)
1466 cgroup_apply_io_device_weight(u
, w
->path
, w
->weight
);
1468 LIST_FOREACH(device_limits
, limit
, c
->io_device_limits
)
1469 cgroup_apply_io_device_limit(u
, limit
->path
, limit
->limits
);
1471 LIST_FOREACH(device_latencies
, latency
, c
->io_device_latencies
)
1472 cgroup_apply_io_device_latency(u
, latency
->path
, latency
->target_usec
);
1474 } else if (has_blockio
) {
1475 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
1476 weight
= cgroup_weight_blkio_to_io(w
->weight
);
1478 log_cgroup_compat(u
, "Applying BlockIODeviceWeight=%" PRIu64
" as IODeviceWeight=%" PRIu64
" for %s",
1479 w
->weight
, weight
, w
->path
);
1481 cgroup_apply_io_device_weight(u
, w
->path
, weight
);
1484 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
1485 uint64_t limits
[_CGROUP_IO_LIMIT_TYPE_MAX
];
1487 for (CGroupIOLimitType type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
1488 limits
[type
] = cgroup_io_limit_defaults
[type
];
1490 limits
[CGROUP_IO_RBPS_MAX
] = b
->rbps
;
1491 limits
[CGROUP_IO_WBPS_MAX
] = b
->wbps
;
1493 log_cgroup_compat(u
, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as IO{Read|Write}BandwidthMax= for %s",
1494 b
->rbps
, b
->wbps
, b
->path
);
1496 cgroup_apply_io_device_limit(u
, b
->path
, limits
);
1501 if (apply_mask
& CGROUP_MASK_BLKIO
) {
1502 bool has_io
, has_blockio
;
1504 has_io
= cgroup_context_has_io_config(c
);
1505 has_blockio
= cgroup_context_has_blockio_config(c
);
1507 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
1508 * left to our container manager, too. */
1509 if (!is_local_root
) {
1515 io_weight
= cgroup_context_io_weight(c
, state
);
1516 weight
= cgroup_weight_io_to_blkio(cgroup_context_io_weight(c
, state
));
1518 log_cgroup_compat(u
, "Applying [Startup]IOWeight=%" PRIu64
" as [Startup]BlockIOWeight=%" PRIu64
,
1520 } else if (has_blockio
)
1521 weight
= cgroup_context_blkio_weight(c
, state
);
1523 weight
= CGROUP_BLKIO_WEIGHT_DEFAULT
;
1525 set_blkio_weight(u
, weight
);
1528 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
) {
1529 weight
= cgroup_weight_io_to_blkio(w
->weight
);
1531 log_cgroup_compat(u
, "Applying IODeviceWeight=%" PRIu64
" as BlockIODeviceWeight=%" PRIu64
" for %s",
1532 w
->weight
, weight
, w
->path
);
1534 cgroup_apply_blkio_device_weight(u
, w
->path
, weight
);
1536 else if (has_blockio
)
1537 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
1538 cgroup_apply_blkio_device_weight(u
, w
->path
, w
->weight
);
1541 /* The bandwidth limits are something that make sense to be applied to the host's root but not container
1542 * roots, as there we want the container manager to handle it */
1543 if (is_host_root
|| !is_local_root
) {
1545 LIST_FOREACH(device_limits
, l
, c
->io_device_limits
) {
1546 log_cgroup_compat(u
, "Applying IO{Read|Write}Bandwidth=%" PRIu64
" %" PRIu64
" as BlockIO{Read|Write}BandwidthMax= for %s",
1547 l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
], l
->path
);
1549 cgroup_apply_blkio_device_limit(u
, l
->path
, l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
]);
1551 else if (has_blockio
)
1552 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
)
1553 cgroup_apply_blkio_device_limit(u
, b
->path
, b
->rbps
, b
->wbps
);
1557 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
1558 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
1559 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
1560 * write to this if we wanted to.) */
1561 if ((apply_mask
& CGROUP_MASK_MEMORY
) && !is_local_root
) {
1563 if (cg_all_unified() > 0) {
1564 uint64_t max
, swap_max
= CGROUP_LIMIT_MAX
;
1566 if (unit_has_unified_memory_config(u
)) {
1567 max
= c
->memory_max
;
1568 swap_max
= c
->memory_swap_max
;
1570 max
= c
->memory_limit
;
1572 if (max
!= CGROUP_LIMIT_MAX
)
1573 log_cgroup_compat(u
, "Applying MemoryLimit=%" PRIu64
" as MemoryMax=", max
);
1576 cgroup_apply_unified_memory_limit(u
, "memory.min", unit_get_ancestor_memory_min(u
));
1577 cgroup_apply_unified_memory_limit(u
, "memory.low", unit_get_ancestor_memory_low(u
));
1578 cgroup_apply_unified_memory_limit(u
, "memory.high", c
->memory_high
);
1579 cgroup_apply_unified_memory_limit(u
, "memory.max", max
);
1580 cgroup_apply_unified_memory_limit(u
, "memory.swap.max", swap_max
);
1582 (void) set_attribute_and_warn(u
, "memory", "memory.oom.group", one_zero(c
->memory_oom_group
));
1585 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
1588 if (unit_has_unified_memory_config(u
)) {
1589 val
= c
->memory_max
;
1590 log_cgroup_compat(u
, "Applying MemoryMax=%" PRIi64
" as MemoryLimit=", val
);
1592 val
= c
->memory_limit
;
1594 if (val
== CGROUP_LIMIT_MAX
)
1595 strncpy(buf
, "-1\n", sizeof(buf
));
1597 xsprintf(buf
, "%" PRIu64
"\n", val
);
1599 (void) set_attribute_and_warn(u
, "memory", "memory.limit_in_bytes", buf
);
1603 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
1604 * containers, where we leave this to the manager */
1605 if ((apply_mask
& (CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
)) &&
1606 (is_host_root
|| cg_all_unified() > 0 || !is_local_root
))
1607 (void) cgroup_apply_devices(u
);
1609 if (apply_mask
& CGROUP_MASK_PIDS
) {
1612 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1613 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1614 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1615 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1616 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1617 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1618 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1619 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1620 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1621 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1622 * which is desirable so that there's an official way to release control of the sysctl from
1623 * systemd: set the limit to unbounded and reload. */
1625 if (tasks_max_isset(&c
->tasks_max
)) {
1626 u
->manager
->sysctl_pid_max_changed
= true;
1627 r
= procfs_tasks_set_limit(tasks_max_resolve(&c
->tasks_max
));
1628 } else if (u
->manager
->sysctl_pid_max_changed
)
1629 r
= procfs_tasks_set_limit(TASKS_MAX
);
1633 log_unit_full_errno(u
, LOG_LEVEL_CGROUP_WRITE(r
), r
,
1634 "Failed to write to tasks limit sysctls: %m");
1637 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
1638 * leave it for the container manager. */
1639 if (!is_local_root
) {
1640 if (tasks_max_isset(&c
->tasks_max
)) {
1641 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
1643 xsprintf(buf
, "%" PRIu64
"\n", tasks_max_resolve(&c
->tasks_max
));
1644 (void) set_attribute_and_warn(u
, "pids", "pids.max", buf
);
1646 (void) set_attribute_and_warn(u
, "pids", "pids.max", "max\n");
1650 if (apply_mask
& CGROUP_MASK_BPF_FIREWALL
)
1651 cgroup_apply_firewall(u
);
1653 if (apply_mask
& CGROUP_MASK_BPF_FOREIGN
)
1654 cgroup_apply_bpf_foreign_program(u
);
1656 if (apply_mask
& CGROUP_MASK_BPF_SOCKET_BIND
)
1657 cgroup_apply_socket_bind(u
);
1659 if (apply_mask
& CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES
)
1660 cgroup_apply_restrict_network_interfaces(u
);
1663 static bool unit_get_needs_bpf_firewall(Unit
*u
) {
1667 c
= unit_get_cgroup_context(u
);
1671 if (c
->ip_accounting
||
1672 !set_isempty(c
->ip_address_allow
) ||
1673 !set_isempty(c
->ip_address_deny
) ||
1674 c
->ip_filters_ingress
||
1675 c
->ip_filters_egress
)
1678 /* If any parent slice has an IP access list defined, it applies too */
1679 for (Unit
*p
= UNIT_GET_SLICE(u
); p
; p
= UNIT_GET_SLICE(p
)) {
1680 c
= unit_get_cgroup_context(p
);
1684 if (!set_isempty(c
->ip_address_allow
) ||
1685 !set_isempty(c
->ip_address_deny
))
1692 static bool unit_get_needs_bpf_foreign_program(Unit
*u
) {
1696 c
= unit_get_cgroup_context(u
);
1700 return !LIST_IS_EMPTY(c
->bpf_foreign_programs
);
1703 static bool unit_get_needs_socket_bind(Unit
*u
) {
1707 c
= unit_get_cgroup_context(u
);
1711 return c
->socket_bind_allow
|| c
->socket_bind_deny
;
1714 static bool unit_get_needs_restrict_network_interfaces(Unit
*u
) {
1718 c
= unit_get_cgroup_context(u
);
1722 return !set_isempty(c
->restrict_network_interfaces
);
1725 static CGroupMask
unit_get_cgroup_mask(Unit
*u
) {
1726 CGroupMask mask
= 0;
1731 assert_se(c
= unit_get_cgroup_context(u
));
1733 /* Figure out which controllers we need, based on the cgroup context object */
1735 if (c
->cpu_accounting
)
1736 mask
|= get_cpu_accounting_mask();
1738 if (cgroup_context_has_cpu_weight(c
) ||
1739 cgroup_context_has_cpu_shares(c
) ||
1740 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
1741 mask
|= CGROUP_MASK_CPU
;
1743 if (cgroup_context_has_allowed_cpus(c
) || cgroup_context_has_allowed_mems(c
))
1744 mask
|= CGROUP_MASK_CPUSET
;
1746 if (cgroup_context_has_io_config(c
) || cgroup_context_has_blockio_config(c
))
1747 mask
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
1749 if (c
->memory_accounting
||
1750 c
->memory_limit
!= CGROUP_LIMIT_MAX
||
1751 unit_has_unified_memory_config(u
))
1752 mask
|= CGROUP_MASK_MEMORY
;
1754 if (c
->device_allow
||
1755 c
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
)
1756 mask
|= CGROUP_MASK_DEVICES
| CGROUP_MASK_BPF_DEVICES
;
1758 if (c
->tasks_accounting
||
1759 tasks_max_isset(&c
->tasks_max
))
1760 mask
|= CGROUP_MASK_PIDS
;
1762 return CGROUP_MASK_EXTEND_JOINED(mask
);
1765 static CGroupMask
unit_get_bpf_mask(Unit
*u
) {
1766 CGroupMask mask
= 0;
1768 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
1771 if (unit_get_needs_bpf_firewall(u
))
1772 mask
|= CGROUP_MASK_BPF_FIREWALL
;
1774 if (unit_get_needs_bpf_foreign_program(u
))
1775 mask
|= CGROUP_MASK_BPF_FOREIGN
;
1777 if (unit_get_needs_socket_bind(u
))
1778 mask
|= CGROUP_MASK_BPF_SOCKET_BIND
;
1780 if (unit_get_needs_restrict_network_interfaces(u
))
1781 mask
|= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES
;
1786 CGroupMask
unit_get_own_mask(Unit
*u
) {
1789 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
1790 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
1792 if (u
->load_state
!= UNIT_LOADED
)
1795 c
= unit_get_cgroup_context(u
);
1799 return unit_get_cgroup_mask(u
) | unit_get_bpf_mask(u
) | unit_get_delegate_mask(u
);
1802 CGroupMask
unit_get_delegate_mask(Unit
*u
) {
1805 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1806 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1808 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1810 if (!unit_cgroup_delegate(u
))
1813 if (cg_all_unified() <= 0) {
1816 e
= unit_get_exec_context(u
);
1817 if (e
&& !exec_context_maintains_privileges(e
))
1821 assert_se(c
= unit_get_cgroup_context(u
));
1822 return CGROUP_MASK_EXTEND_JOINED(c
->delegate_controllers
);
1825 static CGroupMask
unit_get_subtree_mask(Unit
*u
) {
1827 /* Returns the mask of this subtree, meaning of the group
1828 * itself and its children. */
1830 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
1833 CGroupMask
unit_get_members_mask(Unit
*u
) {
1836 /* Returns the mask of controllers all of the unit's children require, merged */
1838 if (u
->cgroup_members_mask_valid
)
1839 return u
->cgroup_members_mask
; /* Use cached value if possible */
1841 u
->cgroup_members_mask
= 0;
1843 if (u
->type
== UNIT_SLICE
) {
1846 UNIT_FOREACH_DEPENDENCY(member
, u
, UNIT_ATOM_SLICE_OF
)
1847 u
->cgroup_members_mask
|= unit_get_subtree_mask(member
); /* note that this calls ourselves again, for the children */
1850 u
->cgroup_members_mask_valid
= true;
1851 return u
->cgroup_members_mask
;
1854 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
1858 /* Returns the mask of controllers all of the unit's siblings
1859 * require, i.e. the members mask of the unit's parent slice
1860 * if there is one. */
1862 slice
= UNIT_GET_SLICE(u
);
1864 return unit_get_members_mask(slice
);
1866 return unit_get_subtree_mask(u
); /* we are the top-level slice */
1869 static CGroupMask
unit_get_disable_mask(Unit
*u
) {
1872 c
= unit_get_cgroup_context(u
);
1876 return c
->disable_controllers
;
1879 CGroupMask
unit_get_ancestor_disable_mask(Unit
*u
) {
1884 mask
= unit_get_disable_mask(u
);
1886 /* Returns the mask of controllers which are marked as forcibly
1887 * disabled in any ancestor unit or the unit in question. */
1889 slice
= UNIT_GET_SLICE(u
);
1891 mask
|= unit_get_ancestor_disable_mask(slice
);
1896 CGroupMask
unit_get_target_mask(Unit
*u
) {
1897 CGroupMask own_mask
, mask
;
1899 /* This returns the cgroup mask of all controllers to enable for a specific cgroup, i.e. everything
1900 * it needs itself, plus all that its children need, plus all that its siblings need. This is
1901 * primarily useful on the legacy cgroup hierarchy, where we need to duplicate each cgroup in each
1902 * hierarchy that shall be enabled for it. */
1904 own_mask
= unit_get_own_mask(u
);
1906 if (own_mask
& CGROUP_MASK_BPF_FIREWALL
& ~u
->manager
->cgroup_supported
)
1907 emit_bpf_firewall_warning(u
);
1909 mask
= own_mask
| unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
1911 mask
&= u
->manager
->cgroup_supported
;
1912 mask
&= ~unit_get_ancestor_disable_mask(u
);
1917 CGroupMask
unit_get_enable_mask(Unit
*u
) {
1920 /* This returns the cgroup mask of all controllers to enable
1921 * for the children of a specific cgroup. This is primarily
1922 * useful for the unified cgroup hierarchy, where each cgroup
1923 * controls which controllers are enabled for its children. */
1925 mask
= unit_get_members_mask(u
);
1926 mask
&= u
->manager
->cgroup_supported
;
1927 mask
&= ~unit_get_ancestor_disable_mask(u
);
1932 void unit_invalidate_cgroup_members_masks(Unit
*u
) {
1937 /* Recurse invalidate the member masks cache all the way up the tree */
1938 u
->cgroup_members_mask_valid
= false;
1940 slice
= UNIT_GET_SLICE(u
);
1942 unit_invalidate_cgroup_members_masks(slice
);
1945 const char *unit_get_realized_cgroup_path(Unit
*u
, CGroupMask mask
) {
1947 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
1951 if (u
->cgroup_path
&&
1952 u
->cgroup_realized
&&
1953 FLAGS_SET(u
->cgroup_realized_mask
, mask
))
1954 return u
->cgroup_path
;
1956 u
= UNIT_GET_SLICE(u
);
1962 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
1963 /* If not realized at all, migrate to root ("").
1964 * It may happen if we're upgrading from older version that didn't clean up.
1966 return strempty(unit_get_realized_cgroup_path(userdata
, mask
));
1969 char *unit_default_cgroup_path(const Unit
*u
) {
1970 _cleanup_free_
char *escaped
= NULL
, *slice_path
= NULL
;
1976 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1977 return strdup(u
->manager
->cgroup_root
);
1979 slice
= UNIT_GET_SLICE(u
);
1980 if (slice
&& !unit_has_name(slice
, SPECIAL_ROOT_SLICE
)) {
1981 r
= cg_slice_to_path(slice
->id
, &slice_path
);
1986 escaped
= cg_escape(u
->id
);
1990 return path_join(empty_to_root(u
->manager
->cgroup_root
), slice_path
, escaped
);
1993 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
1994 _cleanup_free_
char *p
= NULL
;
1999 if (streq_ptr(u
->cgroup_path
, path
))
2009 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
2014 unit_release_cgroup(u
);
2015 u
->cgroup_path
= TAKE_PTR(p
);
2020 int unit_watch_cgroup(Unit
*u
) {
2021 _cleanup_free_
char *events
= NULL
;
2026 /* Watches the "cgroups.events" attribute of this unit's cgroup for "empty" events, but only if
2027 * cgroupv2 is available. */
2029 if (!u
->cgroup_path
)
2032 if (u
->cgroup_control_inotify_wd
>= 0)
2035 /* Only applies to the unified hierarchy */
2036 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2038 return log_error_errno(r
, "Failed to determine whether the name=systemd hierarchy is unified: %m");
2042 /* No point in watch the top-level slice, it's never going to run empty. */
2043 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
2046 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_control_inotify_wd_unit
, &trivial_hash_ops
);
2050 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events", &events
);
2054 u
->cgroup_control_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
2055 if (u
->cgroup_control_inotify_wd
< 0) {
2057 if (errno
== ENOENT
) /* If the directory is already gone we don't need to track it, so this
2058 * is not an error */
2061 return log_unit_error_errno(u
, errno
, "Failed to add control inotify watch descriptor for control group %s: %m", empty_to_root(u
->cgroup_path
));
2064 r
= hashmap_put(u
->manager
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_control_inotify_wd
), u
);
2066 return log_unit_error_errno(u
, r
, "Failed to add control inotify watch descriptor for control group %s to hash map: %m", empty_to_root(u
->cgroup_path
));
2071 int unit_watch_cgroup_memory(Unit
*u
) {
2072 _cleanup_free_
char *events
= NULL
;
2078 /* Watches the "memory.events" attribute of this unit's cgroup for "oom_kill" events, but only if
2079 * cgroupv2 is available. */
2081 if (!u
->cgroup_path
)
2084 c
= unit_get_cgroup_context(u
);
2088 /* The "memory.events" attribute is only available if the memory controller is on. Let's hence tie
2089 * this to memory accounting, in a way watching for OOM kills is a form of memory accounting after
2091 if (!c
->memory_accounting
)
2094 /* Don't watch inner nodes, as the kernel doesn't report oom_kill events recursively currently, and
2095 * we also don't want to generate a log message for each parent cgroup of a process. */
2096 if (u
->type
== UNIT_SLICE
)
2099 if (u
->cgroup_memory_inotify_wd
>= 0)
2102 /* Only applies to the unified hierarchy */
2103 r
= cg_all_unified();
2105 return log_error_errno(r
, "Failed to determine whether the memory controller is unified: %m");
2109 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_memory_inotify_wd_unit
, &trivial_hash_ops
);
2113 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "memory.events", &events
);
2117 u
->cgroup_memory_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
2118 if (u
->cgroup_memory_inotify_wd
< 0) {
2120 if (errno
== ENOENT
) /* If the directory is already gone we don't need to track it, so this
2121 * is not an error */
2124 return log_unit_error_errno(u
, errno
, "Failed to add memory inotify watch descriptor for control group %s: %m", empty_to_root(u
->cgroup_path
));
2127 r
= hashmap_put(u
->manager
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_memory_inotify_wd
), u
);
2129 return log_unit_error_errno(u
, r
, "Failed to add memory inotify watch descriptor for control group %s to hash map: %m", empty_to_root(u
->cgroup_path
));
2134 int unit_pick_cgroup_path(Unit
*u
) {
2135 _cleanup_free_
char *path
= NULL
;
2143 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2146 path
= unit_default_cgroup_path(u
);
2150 r
= unit_set_cgroup_path(u
, path
);
2152 return log_unit_error_errno(u
, r
, "Control group %s exists already.", empty_to_root(path
));
2154 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", empty_to_root(path
));
2159 static int unit_update_cgroup(
2161 CGroupMask target_mask
,
2162 CGroupMask enable_mask
,
2163 ManagerState state
) {
2165 bool created
, is_root_slice
;
2166 CGroupMask migrate_mask
= 0;
2167 _cleanup_free_
char *cgroup_full_path
= NULL
;
2172 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2175 /* Figure out our cgroup path */
2176 r
= unit_pick_cgroup_path(u
);
2180 /* First, create our own group */
2181 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
2183 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", empty_to_root(u
->cgroup_path
));
2186 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
2187 uint64_t cgroup_id
= 0;
2189 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, NULL
, &cgroup_full_path
);
2191 r
= cg_path_get_cgroupid(cgroup_full_path
, &cgroup_id
);
2193 log_unit_full_errno(u
, ERRNO_IS_NOT_SUPPORTED(r
) ? LOG_DEBUG
: LOG_WARNING
, r
,
2194 "Failed to get cgroup ID of cgroup %s, ignoring: %m", cgroup_full_path
);
2196 log_unit_warning_errno(u
, r
, "Failed to get full cgroup path on cgroup %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
2198 u
->cgroup_id
= cgroup_id
;
2201 /* Start watching it */
2202 (void) unit_watch_cgroup(u
);
2203 (void) unit_watch_cgroup_memory(u
);
2205 /* For v2 we preserve enabled controllers in delegated units, adjust others,
2206 * for v1 we figure out which controller hierarchies need migration. */
2207 if (created
|| !u
->cgroup_realized
|| !unit_cgroup_delegate(u
)) {
2208 CGroupMask result_mask
= 0;
2210 /* Enable all controllers we need */
2211 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
, &result_mask
);
2213 log_unit_warning_errno(u
, r
, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
2215 /* Remember what's actually enabled now */
2216 u
->cgroup_enabled_mask
= result_mask
;
2218 migrate_mask
= u
->cgroup_realized_mask
^ target_mask
;
2221 /* Keep track that this is now realized */
2222 u
->cgroup_realized
= true;
2223 u
->cgroup_realized_mask
= target_mask
;
2225 /* Migrate processes in controller hierarchies both downwards (enabling) and upwards (disabling).
2227 * Unnecessary controller cgroups are trimmed (after emptied by upward migration).
2228 * We perform migration also with whole slices for cases when users don't care about leave
2229 * granularity. Since delegated_mask is subset of target mask, we won't trim slice subtree containing
2232 if (cg_all_unified() == 0) {
2233 r
= cg_migrate_v1_controllers(u
->manager
->cgroup_supported
, migrate_mask
, u
->cgroup_path
, migrate_callback
, u
);
2235 log_unit_warning_errno(u
, r
, "Failed to migrate controller cgroups from %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
2237 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
2238 r
= cg_trim_v1_controllers(u
->manager
->cgroup_supported
, ~target_mask
, u
->cgroup_path
, !is_root_slice
);
2240 log_unit_warning_errno(u
, r
, "Failed to delete controller cgroups %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
2243 /* Set attributes */
2244 cgroup_context_apply(u
, target_mask
, state
);
2245 cgroup_xattr_apply(u
);
2250 static int unit_attach_pid_to_cgroup_via_bus(Unit
*u
, pid_t pid
, const char *suffix_path
) {
2251 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2257 if (MANAGER_IS_SYSTEM(u
->manager
))
2260 if (!u
->manager
->system_bus
)
2263 if (!u
->cgroup_path
)
2266 /* Determine this unit's cgroup path relative to our cgroup root */
2267 pp
= path_startswith(u
->cgroup_path
, u
->manager
->cgroup_root
);
2271 pp
= strjoina("/", pp
, suffix_path
);
2274 r
= sd_bus_call_method(u
->manager
->system_bus
,
2275 "org.freedesktop.systemd1",
2276 "/org/freedesktop/systemd1",
2277 "org.freedesktop.systemd1.Manager",
2278 "AttachProcessesToUnit",
2281 NULL
/* empty unit name means client's unit, i.e. us */, pp
, 1, (uint32_t) pid
);
2283 return log_unit_debug_errno(u
, r
, "Failed to attach unit process " PID_FMT
" via the bus: %s", pid
, bus_error_message(&error
, r
));
2288 int unit_attach_pids_to_cgroup(Unit
*u
, Set
*pids
, const char *suffix_path
) {
2289 CGroupMask delegated_mask
;
2296 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2299 if (set_isempty(pids
))
2302 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
2303 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
2304 r
= bpf_firewall_load_custom(u
);
2308 r
= unit_realize_cgroup(u
);
2312 if (isempty(suffix_path
))
2315 p
= prefix_roota(u
->cgroup_path
, suffix_path
);
2317 delegated_mask
= unit_get_delegate_mask(u
);
2320 SET_FOREACH(pidp
, pids
) {
2321 pid_t pid
= PTR_TO_PID(pidp
);
2323 /* First, attach the PID to the main cgroup hierarchy */
2324 r
= cg_attach(SYSTEMD_CGROUP_CONTROLLER
, p
, pid
);
2326 bool again
= MANAGER_IS_USER(u
->manager
) && ERRNO_IS_PRIVILEGE(r
);
2328 log_unit_full_errno(u
, again
? LOG_DEBUG
: LOG_INFO
, r
,
2329 "Couldn't move process "PID_FMT
" to%s requested cgroup '%s': %m",
2330 pid
, again
? " directly" : "", empty_to_root(p
));
2335 /* If we are in a user instance, and we can't move the process ourselves due
2336 * to permission problems, let's ask the system instance about it instead.
2337 * Since it's more privileged it might be able to move the process across the
2338 * leaves of a subtree whose top node is not owned by us. */
2340 z
= unit_attach_pid_to_cgroup_via_bus(u
, pid
, suffix_path
);
2342 log_unit_info_errno(u
, z
, "Couldn't move process "PID_FMT
" to requested cgroup '%s' (directly or via the system bus): %m", pid
, empty_to_root(p
));
2345 ret
++; /* Count successful additions */
2346 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
2351 ret
= r
; /* Remember first error */
2354 } else if (ret
>= 0)
2355 ret
++; /* Count successful additions */
2357 r
= cg_all_unified();
2363 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
2364 * innermost realized one */
2366 for (CGroupController c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
2367 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
2368 const char *realized
;
2370 if (!(u
->manager
->cgroup_supported
& bit
))
2373 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
2374 if (delegated_mask
& u
->cgroup_realized_mask
& bit
) {
2375 r
= cg_attach(cgroup_controller_to_string(c
), p
, pid
);
2377 continue; /* Success! */
2379 log_unit_debug_errno(u
, r
, "Failed to attach PID " PID_FMT
" to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
2380 pid
, empty_to_root(p
), cgroup_controller_to_string(c
));
2383 /* So this controller is either not delegate or realized, or something else weird happened. In
2384 * that case let's attach the PID at least to the closest cgroup up the tree that is
2386 realized
= unit_get_realized_cgroup_path(u
, bit
);
2388 continue; /* Not even realized in the root slice? Then let's not bother */
2390 r
= cg_attach(cgroup_controller_to_string(c
), realized
, pid
);
2392 log_unit_debug_errno(u
, r
, "Failed to attach PID " PID_FMT
" to realized cgroup %s in controller %s, ignoring: %m",
2393 pid
, realized
, cgroup_controller_to_string(c
));
2400 static bool unit_has_mask_realized(
2402 CGroupMask target_mask
,
2403 CGroupMask enable_mask
) {
2407 /* Returns true if this unit is fully realized. We check four things:
2409 * 1. Whether the cgroup was created at all
2410 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
2411 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
2412 * 4. Whether the invalidation mask is currently zero
2414 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
2415 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
2416 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
2417 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
2418 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
2419 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
2420 * simply don't matter. */
2422 return u
->cgroup_realized
&&
2423 ((u
->cgroup_realized_mask
^ target_mask
) & CGROUP_MASK_V1
) == 0 &&
2424 ((u
->cgroup_enabled_mask
^ enable_mask
) & CGROUP_MASK_V2
) == 0 &&
2425 u
->cgroup_invalidated_mask
== 0;
2428 static bool unit_has_mask_disables_realized(
2430 CGroupMask target_mask
,
2431 CGroupMask enable_mask
) {
2435 /* Returns true if all controllers which should be disabled are indeed disabled.
2437 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
2438 * already removed. */
2440 return !u
->cgroup_realized
||
2441 (FLAGS_SET(u
->cgroup_realized_mask
, target_mask
& CGROUP_MASK_V1
) &&
2442 FLAGS_SET(u
->cgroup_enabled_mask
, enable_mask
& CGROUP_MASK_V2
));
2445 static bool unit_has_mask_enables_realized(
2447 CGroupMask target_mask
,
2448 CGroupMask enable_mask
) {
2452 /* Returns true if all controllers which should be enabled are indeed enabled.
2454 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
2455 * we want to add is already added. */
2457 return u
->cgroup_realized
&&
2458 ((u
->cgroup_realized_mask
| target_mask
) & CGROUP_MASK_V1
) == (u
->cgroup_realized_mask
& CGROUP_MASK_V1
) &&
2459 ((u
->cgroup_enabled_mask
| enable_mask
) & CGROUP_MASK_V2
) == (u
->cgroup_enabled_mask
& CGROUP_MASK_V2
);
2462 static void unit_add_to_cgroup_realize_queue(Unit
*u
) {
2465 if (u
->in_cgroup_realize_queue
)
2468 LIST_APPEND(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
2469 u
->in_cgroup_realize_queue
= true;
2472 static void unit_remove_from_cgroup_realize_queue(Unit
*u
) {
2475 if (!u
->in_cgroup_realize_queue
)
2478 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
2479 u
->in_cgroup_realize_queue
= false;
2482 /* Controllers can only be enabled breadth-first, from the root of the
2483 * hierarchy downwards to the unit in question. */
2484 static int unit_realize_cgroup_now_enable(Unit
*u
, ManagerState state
) {
2485 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
2491 /* First go deal with this unit's parent, or we won't be able to enable
2492 * any new controllers at this layer. */
2493 slice
= UNIT_GET_SLICE(u
);
2495 r
= unit_realize_cgroup_now_enable(slice
, state
);
2500 target_mask
= unit_get_target_mask(u
);
2501 enable_mask
= unit_get_enable_mask(u
);
2503 /* We can only enable in this direction, don't try to disable anything.
2505 if (unit_has_mask_enables_realized(u
, target_mask
, enable_mask
))
2508 new_target_mask
= u
->cgroup_realized_mask
| target_mask
;
2509 new_enable_mask
= u
->cgroup_enabled_mask
| enable_mask
;
2511 return unit_update_cgroup(u
, new_target_mask
, new_enable_mask
, state
);
2514 /* Controllers can only be disabled depth-first, from the leaves of the
2515 * hierarchy upwards to the unit in question. */
2516 static int unit_realize_cgroup_now_disable(Unit
*u
, ManagerState state
) {
2521 if (u
->type
!= UNIT_SLICE
)
2524 UNIT_FOREACH_DEPENDENCY(m
, u
, UNIT_ATOM_SLICE_OF
) {
2525 CGroupMask target_mask
, enable_mask
, new_target_mask
, new_enable_mask
;
2528 /* The cgroup for this unit might not actually be fully realised yet, in which case it isn't
2529 * holding any controllers open anyway. */
2530 if (!m
->cgroup_realized
)
2533 /* We must disable those below us first in order to release the controller. */
2534 if (m
->type
== UNIT_SLICE
)
2535 (void) unit_realize_cgroup_now_disable(m
, state
);
2537 target_mask
= unit_get_target_mask(m
);
2538 enable_mask
= unit_get_enable_mask(m
);
2540 /* We can only disable in this direction, don't try to enable anything. */
2541 if (unit_has_mask_disables_realized(m
, target_mask
, enable_mask
))
2544 new_target_mask
= m
->cgroup_realized_mask
& target_mask
;
2545 new_enable_mask
= m
->cgroup_enabled_mask
& enable_mask
;
2547 r
= unit_update_cgroup(m
, new_target_mask
, new_enable_mask
, state
);
2555 /* Check if necessary controllers and attributes for a unit are in place.
2557 * - If so, do nothing.
2558 * - If not, create paths, move processes over, and set attributes.
2560 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
2561 * a depth-first way. As such the process looks like this:
2563 * Suppose we have a cgroup hierarchy which looks like this:
2576 * 1. We want to realise cgroup "d" now.
2577 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
2578 * 3. cgroup "k" just started requesting the memory controller.
2580 * To make this work we must do the following in order:
2582 * 1. Disable CPU controller in k, j
2583 * 2. Disable CPU controller in d
2584 * 3. Enable memory controller in root
2585 * 4. Enable memory controller in a
2586 * 5. Enable memory controller in d
2587 * 6. Enable memory controller in k
2589 * Notice that we need to touch j in one direction, but not the other. We also
2590 * don't go beyond d when disabling -- it's up to "a" to get realized if it
2591 * wants to disable further. The basic rules are therefore:
2593 * - If you're disabling something, you need to realise all of the cgroups from
2594 * your recursive descendants to the root. This starts from the leaves.
2595 * - If you're enabling something, you need to realise from the root cgroup
2596 * downwards, but you don't need to iterate your recursive descendants.
2598 * Returns 0 on success and < 0 on failure. */
2599 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
2600 CGroupMask target_mask
, enable_mask
;
2606 unit_remove_from_cgroup_realize_queue(u
);
2608 target_mask
= unit_get_target_mask(u
);
2609 enable_mask
= unit_get_enable_mask(u
);
2611 if (unit_has_mask_realized(u
, target_mask
, enable_mask
))
2614 /* Disable controllers below us, if there are any */
2615 r
= unit_realize_cgroup_now_disable(u
, state
);
2619 /* Enable controllers above us, if there are any */
2620 slice
= UNIT_GET_SLICE(u
);
2622 r
= unit_realize_cgroup_now_enable(slice
, state
);
2627 /* Now actually deal with the cgroup we were trying to realise and set attributes */
2628 r
= unit_update_cgroup(u
, target_mask
, enable_mask
, state
);
2632 /* Now, reset the invalidation mask */
2633 u
->cgroup_invalidated_mask
= 0;
2637 unsigned manager_dispatch_cgroup_realize_queue(Manager
*m
) {
2645 state
= manager_state(m
);
2647 while ((i
= m
->cgroup_realize_queue
)) {
2648 assert(i
->in_cgroup_realize_queue
);
2650 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i
))) {
2651 /* Maybe things changed, and the unit is not actually active anymore? */
2652 unit_remove_from_cgroup_realize_queue(i
);
2656 r
= unit_realize_cgroup_now(i
, state
);
2658 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
2666 void unit_add_family_to_cgroup_realize_queue(Unit
*u
) {
2668 assert(u
->type
== UNIT_SLICE
);
2670 /* Family of a unit for is defined as (immediate) children of the unit and immediate children of all
2673 * Ideally we would enqueue ancestor path only (bottom up). However, on cgroup-v1 scheduling becomes
2674 * very weird if two units that own processes reside in the same slice, but one is realized in the
2675 * "cpu" hierarchy and one is not (for example because one has CPUWeight= set and the other does
2676 * not), because that means individual processes need to be scheduled against whole cgroups. Let's
2677 * avoid this asymmetry by always ensuring that siblings of a unit are always realized in their v1
2678 * controller hierarchies too (if unit requires the controller to be realized).
2680 * The function must invalidate cgroup_members_mask of all ancestors in order to calculate up to date
2686 /* Children of u likely changed when we're called */
2687 u
->cgroup_members_mask_valid
= false;
2689 UNIT_FOREACH_DEPENDENCY(m
, u
, UNIT_ATOM_SLICE_OF
) {
2691 /* No point in doing cgroup application for units without active processes. */
2692 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
2695 /* We only enqueue siblings if they were realized once at least, in the main
2697 if (!m
->cgroup_realized
)
2700 /* If the unit doesn't need any new controllers and has current ones
2701 * realized, it doesn't need any changes. */
2702 if (unit_has_mask_realized(m
,
2703 unit_get_target_mask(m
),
2704 unit_get_enable_mask(m
)))
2707 unit_add_to_cgroup_realize_queue(m
);
2710 /* Parent comes after children */
2711 unit_add_to_cgroup_realize_queue(u
);
2713 u
= UNIT_GET_SLICE(u
);
2717 int unit_realize_cgroup(Unit
*u
) {
2722 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2725 /* So, here's the deal: when realizing the cgroups for this unit, we need to first create all
2726 * parents, but there's more actually: for the weight-based controllers we also need to make sure
2727 * that all our siblings (i.e. units that are in the same slice as we are) have cgroups, too. On the
2728 * other hand, when a controller is removed from realized set, it may become unnecessary in siblings
2729 * and ancestors and they should be (de)realized too.
2731 * This call will defer work on the siblings and derealized ancestors to the next event loop
2732 * iteration and synchronously creates the parent cgroups (unit_realize_cgroup_now). */
2734 slice
= UNIT_GET_SLICE(u
);
2736 unit_add_family_to_cgroup_realize_queue(slice
);
2738 /* And realize this one now (and apply the values) */
2739 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
2742 void unit_release_cgroup(Unit
*u
) {
2745 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
2746 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
2748 if (u
->cgroup_path
) {
2749 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
2750 u
->cgroup_path
= mfree(u
->cgroup_path
);
2753 if (u
->cgroup_control_inotify_wd
>= 0) {
2754 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_control_inotify_wd
) < 0)
2755 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup control inotify watch %i for %s, ignoring: %m", u
->cgroup_control_inotify_wd
, u
->id
);
2757 (void) hashmap_remove(u
->manager
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_control_inotify_wd
));
2758 u
->cgroup_control_inotify_wd
= -1;
2761 if (u
->cgroup_memory_inotify_wd
>= 0) {
2762 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_memory_inotify_wd
) < 0)
2763 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup memory inotify watch %i for %s, ignoring: %m", u
->cgroup_memory_inotify_wd
, u
->id
);
2765 (void) hashmap_remove(u
->manager
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_memory_inotify_wd
));
2766 u
->cgroup_memory_inotify_wd
= -1;
2770 bool unit_maybe_release_cgroup(Unit
*u
) {
2775 if (!u
->cgroup_path
)
2778 /* Don't release the cgroup if there are still processes under it. If we get notified later when all the
2779 * processes exit (e.g. the processes were in D-state and exited after the unit was marked as failed)
2780 * we need the cgroup paths to continue to be tracked by the manager so they can be looked up and cleaned
2782 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
2784 log_unit_debug_errno(u
, r
, "Error checking if the cgroup is recursively empty, ignoring: %m");
2786 unit_release_cgroup(u
);
2793 void unit_prune_cgroup(Unit
*u
) {
2799 /* Removes the cgroup, if empty and possible, and stops watching it. */
2801 if (!u
->cgroup_path
)
2804 (void) unit_get_cpu_usage(u
, NULL
); /* Cache the last CPU usage value before we destroy the cgroup */
2807 (void) lsm_bpf_cleanup(u
); /* Remove cgroup from the global LSM BPF map */
2810 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
2812 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
2814 /* One reason we could have failed here is, that the cgroup still contains a process.
2815 * However, if the cgroup becomes removable at a later time, it might be removed when
2816 * the containing slice is stopped. So even if we failed now, this unit shouldn't assume
2817 * that the cgroup is still realized the next time it is started. Do not return early
2818 * on error, continue cleanup. */
2819 log_unit_full_errno(u
, r
== -EBUSY
? LOG_DEBUG
: LOG_WARNING
, r
, "Failed to destroy cgroup %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
2824 if (!unit_maybe_release_cgroup(u
)) /* Returns true if the cgroup was released */
2827 u
->cgroup_realized
= false;
2828 u
->cgroup_realized_mask
= 0;
2829 u
->cgroup_enabled_mask
= 0;
2831 u
->bpf_device_control_installed
= bpf_program_free(u
->bpf_device_control_installed
);
2834 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
2835 _cleanup_fclose_
FILE *f
= NULL
;
2836 pid_t pid
= 0, npid
;
2842 if (!u
->cgroup_path
)
2845 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
2849 while (cg_read_pid(f
, &npid
) > 0) {
2854 if (pid_is_my_child(npid
) == 0)
2858 /* Dang, there's more than one daemonized PID
2859 in this group, so we don't know what process
2860 is the main process. */
2871 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
2872 _cleanup_closedir_
DIR *d
= NULL
;
2873 _cleanup_fclose_
FILE *f
= NULL
;
2879 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
2885 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
2886 r
= unit_watch_pid(u
, pid
, false);
2887 if (r
< 0 && ret
>= 0)
2891 if (r
< 0 && ret
>= 0)
2895 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
2902 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
2903 _cleanup_free_
char *p
= NULL
;
2905 p
= path_join(empty_to_root(path
), fn
);
2911 r
= unit_watch_pids_in_path(u
, p
);
2912 if (r
< 0 && ret
>= 0)
2916 if (r
< 0 && ret
>= 0)
2923 int unit_synthesize_cgroup_empty_event(Unit
*u
) {
2928 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2929 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2930 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2932 if (!u
->cgroup_path
)
2935 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2938 if (r
> 0) /* On unified we have reliable notifications, and don't need this */
2941 if (!set_isempty(u
->pids
))
2944 unit_add_to_cgroup_empty_queue(u
);
2948 int unit_watch_all_pids(Unit
*u
) {
2953 /* Adds all PIDs from our cgroup to the set of PIDs we
2954 * watch. This is a fallback logic for cases where we do not
2955 * get reliable cgroup empty notifications: we try to use
2956 * SIGCHLD as replacement. */
2958 if (!u
->cgroup_path
)
2961 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2964 if (r
> 0) /* On unified we can use proper notifications */
2967 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
2970 static int on_cgroup_empty_event(sd_event_source
*s
, void *userdata
) {
2971 Manager
*m
= userdata
;
2978 u
= m
->cgroup_empty_queue
;
2982 assert(u
->in_cgroup_empty_queue
);
2983 u
->in_cgroup_empty_queue
= false;
2984 LIST_REMOVE(cgroup_empty_queue
, m
->cgroup_empty_queue
, u
);
2986 if (m
->cgroup_empty_queue
) {
2987 /* More stuff queued, let's make sure we remain enabled */
2988 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
2990 log_debug_errno(r
, "Failed to reenable cgroup empty event source, ignoring: %m");
2993 /* Update state based on OOM kills before we notify about cgroup empty event */
2994 (void) unit_check_oom(u
);
2995 (void) unit_check_oomd_kill(u
);
2997 unit_add_to_gc_queue(u
);
2999 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
3000 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
3005 void unit_add_to_cgroup_empty_queue(Unit
*u
) {
3010 /* Note that there are four different ways how cgroup empty events reach us:
3012 * 1. On the unified hierarchy we get an inotify event on the cgroup
3014 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
3016 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
3018 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
3019 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
3021 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
3022 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
3023 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
3024 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
3025 * case for scope units). */
3027 if (u
->in_cgroup_empty_queue
)
3030 /* Let's verify that the cgroup is really empty */
3031 if (!u
->cgroup_path
)
3034 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
3036 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u
->cgroup_path
));
3042 LIST_PREPEND(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
3043 u
->in_cgroup_empty_queue
= true;
3045 /* Trigger the defer event */
3046 r
= sd_event_source_set_enabled(u
->manager
->cgroup_empty_event_source
, SD_EVENT_ONESHOT
);
3048 log_debug_errno(r
, "Failed to enable cgroup empty event source: %m");
3051 static void unit_remove_from_cgroup_empty_queue(Unit
*u
) {
3054 if (!u
->in_cgroup_empty_queue
)
3057 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
3058 u
->in_cgroup_empty_queue
= false;
3061 int unit_check_oomd_kill(Unit
*u
) {
3062 _cleanup_free_
char *value
= NULL
;
3067 if (!u
->cgroup_path
)
3070 r
= cg_all_unified();
3072 return log_unit_debug_errno(u
, r
, "Couldn't determine whether we are in all unified mode: %m");
3076 r
= cg_get_xattr_malloc(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "user.oomd_ooms", &value
);
3077 if (r
< 0 && r
!= -ENODATA
)
3080 if (!isempty(value
)) {
3081 r
= safe_atou64(value
, &n
);
3086 increased
= n
> u
->managed_oom_kill_last
;
3087 u
->managed_oom_kill_last
= n
;
3093 value
= mfree(value
);
3094 r
= cg_get_xattr_malloc(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "user.oomd_kill", &value
);
3095 if (r
>= 0 && !isempty(value
))
3096 (void) safe_atou64(value
, &n
);
3099 log_unit_struct(u
, LOG_NOTICE
,
3100 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR
,
3101 LOG_UNIT_INVOCATION_ID(u
),
3102 LOG_UNIT_MESSAGE(u
, "systemd-oomd killed %"PRIu64
" process(es) in this unit.", n
),
3103 "N_PROCESSES=%" PRIu64
, n
);
3105 log_unit_struct(u
, LOG_NOTICE
,
3106 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR
,
3107 LOG_UNIT_INVOCATION_ID(u
),
3108 LOG_UNIT_MESSAGE(u
, "systemd-oomd killed some process(es) in this unit."));
3110 unit_notify_cgroup_oom(u
, /* ManagedOOM= */ true);
3115 int unit_check_oom(Unit
*u
) {
3116 _cleanup_free_
char *oom_kill
= NULL
;
3121 if (!u
->cgroup_path
)
3124 r
= cg_get_keyed_attribute("memory", u
->cgroup_path
, "memory.events", STRV_MAKE("oom_kill"), &oom_kill
);
3125 if (IN_SET(r
, -ENOENT
, -ENXIO
)) /* Handle gracefully if cgroup or oom_kill attribute don't exist */
3128 return log_unit_debug_errno(u
, r
, "Failed to read oom_kill field of memory.events cgroup attribute: %m");
3130 r
= safe_atou64(oom_kill
, &c
);
3132 return log_unit_debug_errno(u
, r
, "Failed to parse oom_kill field: %m");
3135 increased
= c
> u
->oom_kill_last
;
3136 u
->oom_kill_last
= c
;
3141 log_unit_struct(u
, LOG_NOTICE
,
3142 "MESSAGE_ID=" SD_MESSAGE_UNIT_OUT_OF_MEMORY_STR
,
3143 LOG_UNIT_INVOCATION_ID(u
),
3144 LOG_UNIT_MESSAGE(u
, "A process of this unit has been killed by the OOM killer."));
3146 unit_notify_cgroup_oom(u
, /* ManagedOOM= */ false);
3151 static int on_cgroup_oom_event(sd_event_source
*s
, void *userdata
) {
3152 Manager
*m
= userdata
;
3159 u
= m
->cgroup_oom_queue
;
3163 assert(u
->in_cgroup_oom_queue
);
3164 u
->in_cgroup_oom_queue
= false;
3165 LIST_REMOVE(cgroup_oom_queue
, m
->cgroup_oom_queue
, u
);
3167 if (m
->cgroup_oom_queue
) {
3168 /* More stuff queued, let's make sure we remain enabled */
3169 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
3171 log_debug_errno(r
, "Failed to reenable cgroup oom event source, ignoring: %m");
3174 (void) unit_check_oom(u
);
3178 static void unit_add_to_cgroup_oom_queue(Unit
*u
) {
3183 if (u
->in_cgroup_oom_queue
)
3185 if (!u
->cgroup_path
)
3188 LIST_PREPEND(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
3189 u
->in_cgroup_oom_queue
= true;
3191 /* Trigger the defer event */
3192 if (!u
->manager
->cgroup_oom_event_source
) {
3193 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
3195 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_cgroup_oom_event
, u
->manager
);
3197 log_error_errno(r
, "Failed to create cgroup oom event source: %m");
3201 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_NORMAL
-8);
3203 log_error_errno(r
, "Failed to set priority of cgroup oom event source: %m");
3207 (void) sd_event_source_set_description(s
, "cgroup-oom");
3208 u
->manager
->cgroup_oom_event_source
= TAKE_PTR(s
);
3211 r
= sd_event_source_set_enabled(u
->manager
->cgroup_oom_event_source
, SD_EVENT_ONESHOT
);
3213 log_error_errno(r
, "Failed to enable cgroup oom event source: %m");
3216 static int unit_check_cgroup_events(Unit
*u
) {
3217 char *values
[2] = {};
3222 if (!u
->cgroup_path
)
3225 r
= cg_get_keyed_attribute_graceful(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events",
3226 STRV_MAKE("populated", "frozen"), values
);
3230 /* The cgroup.events notifications can be merged together so act as we saw the given state for the
3231 * first time. The functions we call to handle given state are idempotent, which makes them
3232 * effectively remember the previous state. */
3234 if (streq(values
[0], "1"))
3235 unit_remove_from_cgroup_empty_queue(u
);
3237 unit_add_to_cgroup_empty_queue(u
);
3240 /* Disregard freezer state changes due to operations not initiated by us */
3241 if (values
[1] && IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_THAWING
)) {
3242 if (streq(values
[1], "0"))
3254 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
3255 Manager
*m
= userdata
;
3262 union inotify_event_buffer buffer
;
3265 l
= read(fd
, &buffer
, sizeof(buffer
));
3267 if (ERRNO_IS_TRANSIENT(errno
))
3270 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
3273 FOREACH_INOTIFY_EVENT_WARN(e
, buffer
, l
) {
3277 /* Queue overflow has no watch descriptor */
3280 if (e
->mask
& IN_IGNORED
)
3281 /* The watch was just removed */
3284 /* Note that inotify might deliver events for a watch even after it was removed,
3285 * because it was queued before the removal. Let's ignore this here safely. */
3287 u
= hashmap_get(m
->cgroup_control_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
3289 unit_check_cgroup_events(u
);
3291 u
= hashmap_get(m
->cgroup_memory_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
3293 unit_add_to_cgroup_oom_queue(u
);
3298 static int cg_bpf_mask_supported(CGroupMask
*ret
) {
3299 CGroupMask mask
= 0;
3302 /* BPF-based firewall */
3303 r
= bpf_firewall_supported();
3307 mask
|= CGROUP_MASK_BPF_FIREWALL
;
3309 /* BPF-based device access control */
3310 r
= bpf_devices_supported();
3314 mask
|= CGROUP_MASK_BPF_DEVICES
;
3316 /* BPF pinned prog */
3317 r
= bpf_foreign_supported();
3321 mask
|= CGROUP_MASK_BPF_FOREIGN
;
3323 /* BPF-based bind{4|6} hooks */
3324 r
= bpf_socket_bind_supported();
3328 mask
|= CGROUP_MASK_BPF_SOCKET_BIND
;
3330 /* BPF-based cgroup_skb/{egress|ingress} hooks */
3331 r
= restrict_network_interfaces_supported();
3335 mask
|= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES
;
3341 int manager_setup_cgroup(Manager
*m
) {
3342 _cleanup_free_
char *path
= NULL
;
3343 const char *scope_path
;
3350 /* 1. Determine hierarchy */
3351 m
->cgroup_root
= mfree(m
->cgroup_root
);
3352 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
3354 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
3356 /* Chop off the init scope, if we are already located in it */
3357 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
3359 /* LEGACY: Also chop off the system slice if we are in
3360 * it. This is to support live upgrades from older systemd
3361 * versions where PID 1 was moved there. Also see
3362 * cg_get_root_path(). */
3363 if (!e
&& MANAGER_IS_SYSTEM(m
)) {
3364 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
3366 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
3371 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
3372 * easily prepend it everywhere. */
3373 delete_trailing_chars(m
->cgroup_root
, "/");
3376 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
3378 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
3382 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
3384 all_unified
= cg_all_unified();
3385 if (all_unified
< 0)
3386 return log_error_errno(all_unified
, "Couldn't determine whether we are in all unified mode: %m");
3387 if (all_unified
> 0)
3388 log_debug("Unified cgroup hierarchy is located at %s.", path
);
3390 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
3392 return log_error_errno(r
, "Failed to determine whether systemd's own controller is in unified mode: %m");
3394 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path
);
3396 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY
". File system hierarchy is at %s.", path
);
3399 /* 3. Allocate cgroup empty defer event source */
3400 m
->cgroup_empty_event_source
= sd_event_source_disable_unref(m
->cgroup_empty_event_source
);
3401 r
= sd_event_add_defer(m
->event
, &m
->cgroup_empty_event_source
, on_cgroup_empty_event
, m
);
3403 return log_error_errno(r
, "Failed to create cgroup empty event source: %m");
3405 /* Schedule cgroup empty checks early, but after having processed service notification messages or
3406 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
3407 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
3408 r
= sd_event_source_set_priority(m
->cgroup_empty_event_source
, SD_EVENT_PRIORITY_NORMAL
-5);
3410 return log_error_errno(r
, "Failed to set priority of cgroup empty event source: %m");
3412 r
= sd_event_source_set_enabled(m
->cgroup_empty_event_source
, SD_EVENT_OFF
);
3414 return log_error_errno(r
, "Failed to disable cgroup empty event source: %m");
3416 (void) sd_event_source_set_description(m
->cgroup_empty_event_source
, "cgroup-empty");
3418 /* 4. Install notifier inotify object, or agent */
3419 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
3421 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
3423 m
->cgroup_inotify_event_source
= sd_event_source_disable_unref(m
->cgroup_inotify_event_source
);
3424 safe_close(m
->cgroup_inotify_fd
);
3426 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
3427 if (m
->cgroup_inotify_fd
< 0)
3428 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
3430 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
3432 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
3434 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
3435 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
3436 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
3437 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_NORMAL
-9);
3439 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
3441 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
3443 } else if (MANAGER_IS_SYSTEM(m
) && manager_owns_host_root_cgroup(m
) && !MANAGER_IS_TEST_RUN(m
)) {
3445 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
3446 * since it does not generate events when control groups with children run empty. */
3448 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUPS_AGENT_PATH
);
3450 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
3452 log_debug("Installed release agent.");
3454 log_debug("Release agent already installed.");
3457 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
3458 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
3459 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
3461 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
3462 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
3464 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
3466 /* 6. And pin it, so that it cannot be unmounted */
3467 safe_close(m
->pin_cgroupfs_fd
);
3468 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
3469 if (m
->pin_cgroupfs_fd
< 0)
3470 return log_error_errno(errno
, "Failed to open pin file: %m");
3472 } else if (!MANAGER_IS_TEST_RUN(m
))
3473 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
3475 /* 7. Always enable hierarchical support if it exists... */
3476 if (!all_unified
&& !MANAGER_IS_TEST_RUN(m
))
3477 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
3479 /* 8. Figure out which controllers are supported */
3480 r
= cg_mask_supported_subtree(m
->cgroup_root
, &m
->cgroup_supported
);
3482 return log_error_errno(r
, "Failed to determine supported controllers: %m");
3484 /* 9. Figure out which bpf-based pseudo-controllers are supported */
3485 r
= cg_bpf_mask_supported(&mask
);
3487 return log_error_errno(r
, "Failed to determine supported bpf-based pseudo-controllers: %m");
3488 m
->cgroup_supported
|= mask
;
3490 /* 10. Log which controllers are supported */
3491 for (CGroupController c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
3492 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
),
3493 yes_no(m
->cgroup_supported
& CGROUP_CONTROLLER_TO_MASK(c
)));
3498 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
3501 /* We can't really delete the group, since we are in it. But
3503 if (delete && m
->cgroup_root
&& !FLAGS_SET(m
->test_run_flags
, MANAGER_TEST_RUN_MINIMAL
))
3504 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
3506 m
->cgroup_empty_event_source
= sd_event_source_disable_unref(m
->cgroup_empty_event_source
);
3508 m
->cgroup_control_inotify_wd_unit
= hashmap_free(m
->cgroup_control_inotify_wd_unit
);
3509 m
->cgroup_memory_inotify_wd_unit
= hashmap_free(m
->cgroup_memory_inotify_wd_unit
);
3511 m
->cgroup_inotify_event_source
= sd_event_source_disable_unref(m
->cgroup_inotify_event_source
);
3512 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
3514 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
3516 m
->cgroup_root
= mfree(m
->cgroup_root
);
3519 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
3526 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
3530 p
= strdupa_safe(cgroup
);
3534 e
= strrchr(p
, '/');
3536 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
3540 u
= hashmap_get(m
->cgroup_unit
, p
);
3546 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
3547 _cleanup_free_
char *cgroup
= NULL
;
3551 if (!pid_is_valid(pid
))
3554 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
) < 0)
3557 return manager_get_unit_by_cgroup(m
, cgroup
);
3560 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
3565 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
3566 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
3567 * relevant one as children of the process will be assigned to that one, too, before all else. */
3569 if (!pid_is_valid(pid
))
3572 if (pid
== getpid_cached())
3573 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
3575 u
= manager_get_unit_by_pid_cgroup(m
, pid
);
3579 u
= hashmap_get(m
->watch_pids
, PID_TO_PTR(pid
));
3583 array
= hashmap_get(m
->watch_pids
, PID_TO_PTR(-pid
));
3590 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
3596 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
3597 * or from the --system instance */
3599 log_debug("Got cgroup empty notification for: %s", cgroup
);
3601 u
= manager_get_unit_by_cgroup(m
, cgroup
);
3605 unit_add_to_cgroup_empty_queue(u
);
3609 int unit_get_memory_available(Unit
*u
, uint64_t *ret
) {
3610 uint64_t unit_current
, available
= UINT64_MAX
;
3611 CGroupContext
*unit_context
;
3612 const char *memory_file
;
3618 /* If data from cgroups can be accessed, try to find out how much more memory a unit can
3619 * claim before hitting the configured cgroup limits (if any). Consider both MemoryHigh
3620 * and MemoryMax, and also any slice the unit might be nested below. */
3622 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
3625 if (!u
->cgroup_path
)
3628 /* The root cgroup doesn't expose this information */
3629 if (unit_has_host_root_cgroup(u
))
3632 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
3635 r
= cg_all_unified();
3638 memory_file
= r
> 0 ? "memory.current" : "memory.usage_in_bytes";
3640 r
= cg_get_attribute_as_uint64("memory", u
->cgroup_path
, memory_file
, &unit_current
);
3644 assert_se(unit_context
= unit_get_cgroup_context(u
));
3646 if (unit_context
->memory_max
!= UINT64_MAX
|| unit_context
->memory_high
!= UINT64_MAX
)
3647 available
= LESS_BY(MIN(unit_context
->memory_max
, unit_context
->memory_high
), unit_current
);
3649 for (Unit
*slice
= UNIT_GET_SLICE(u
); slice
; slice
= UNIT_GET_SLICE(slice
)) {
3650 uint64_t slice_current
, slice_available
= UINT64_MAX
;
3651 CGroupContext
*slice_context
;
3653 /* No point in continuing if we can't go any lower */
3657 if (!slice
->cgroup_path
)
3660 slice_context
= unit_get_cgroup_context(slice
);
3664 if (slice_context
->memory_max
== UINT64_MAX
&& slice_context
->memory_high
== UINT64_MAX
)
3667 r
= cg_get_attribute_as_uint64("memory", slice
->cgroup_path
, memory_file
, &slice_current
);
3671 slice_available
= LESS_BY(MIN(slice_context
->memory_max
, slice_context
->memory_high
), slice_current
);
3672 available
= MIN(slice_available
, available
);
3680 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
3686 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
3689 if (!u
->cgroup_path
)
3692 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3693 if (unit_has_host_root_cgroup(u
))
3694 return procfs_memory_get_used(ret
);
3696 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
3699 r
= cg_all_unified();
3703 return cg_get_attribute_as_uint64("memory", u
->cgroup_path
, r
> 0 ? "memory.current" : "memory.usage_in_bytes", ret
);
3706 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
3710 if (!UNIT_CGROUP_BOOL(u
, tasks_accounting
))
3713 if (!u
->cgroup_path
)
3716 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3717 if (unit_has_host_root_cgroup(u
))
3718 return procfs_tasks_get_current(ret
);
3720 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
3723 return cg_get_attribute_as_uint64("pids", u
->cgroup_path
, "pids.current", ret
);
3726 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
3733 if (!u
->cgroup_path
)
3736 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3737 if (unit_has_host_root_cgroup(u
))
3738 return procfs_cpu_get_usage(ret
);
3740 /* Requisite controllers for CPU accounting are not enabled */
3741 if ((get_cpu_accounting_mask() & ~u
->cgroup_realized_mask
) != 0)
3744 r
= cg_all_unified();
3748 _cleanup_free_
char *val
= NULL
;
3751 r
= cg_get_keyed_attribute("cpu", u
->cgroup_path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
3752 if (IN_SET(r
, -ENOENT
, -ENXIO
))
3757 r
= safe_atou64(val
, &us
);
3761 ns
= us
* NSEC_PER_USEC
;
3763 return cg_get_attribute_as_uint64("cpuacct", u
->cgroup_path
, "cpuacct.usage", ret
);
3769 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
3775 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
3776 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
3777 * call this function with a NULL return value. */
3779 if (!UNIT_CGROUP_BOOL(u
, cpu_accounting
))
3782 r
= unit_get_cpu_usage_raw(u
, &ns
);
3783 if (r
== -ENODATA
&& u
->cpu_usage_last
!= NSEC_INFINITY
) {
3784 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
3788 *ret
= u
->cpu_usage_last
;
3794 if (ns
> u
->cpu_usage_base
)
3795 ns
-= u
->cpu_usage_base
;
3799 u
->cpu_usage_last
= ns
;
3806 int unit_get_ip_accounting(
3808 CGroupIPAccountingMetric metric
,
3815 assert(metric
>= 0);
3816 assert(metric
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
);
3819 if (!UNIT_CGROUP_BOOL(u
, ip_accounting
))
3822 fd
= IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_INGRESS_PACKETS
) ?
3823 u
->ip_accounting_ingress_map_fd
:
3824 u
->ip_accounting_egress_map_fd
;
3828 if (IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
3829 r
= bpf_firewall_read_accounting(fd
, &value
, NULL
);
3831 r
= bpf_firewall_read_accounting(fd
, NULL
, &value
);
3835 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
3836 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
3837 * ip_accounting_extra[] field, and add them in here transparently. */
3839 *ret
= value
+ u
->ip_accounting_extra
[metric
];
3844 static int unit_get_io_accounting_raw(Unit
*u
, uint64_t ret
[static _CGROUP_IO_ACCOUNTING_METRIC_MAX
]) {
3845 static const char *const field_names
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
3846 [CGROUP_IO_READ_BYTES
] = "rbytes=",
3847 [CGROUP_IO_WRITE_BYTES
] = "wbytes=",
3848 [CGROUP_IO_READ_OPERATIONS
] = "rios=",
3849 [CGROUP_IO_WRITE_OPERATIONS
] = "wios=",
3851 uint64_t acc
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {};
3852 _cleanup_free_
char *path
= NULL
;
3853 _cleanup_fclose_
FILE *f
= NULL
;
3858 if (!u
->cgroup_path
)
3861 if (unit_has_host_root_cgroup(u
))
3862 return -ENODATA
; /* TODO: return useful data for the top-level cgroup */
3864 r
= cg_all_unified();
3867 if (r
== 0) /* TODO: support cgroupv1 */
3870 if (!FLAGS_SET(u
->cgroup_realized_mask
, CGROUP_MASK_IO
))
3873 r
= cg_get_path("io", u
->cgroup_path
, "io.stat", &path
);
3877 f
= fopen(path
, "re");
3882 _cleanup_free_
char *line
= NULL
;
3885 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3892 p
+= strcspn(p
, WHITESPACE
); /* Skip over device major/minor */
3893 p
+= strspn(p
, WHITESPACE
); /* Skip over following whitespace */
3896 _cleanup_free_
char *word
= NULL
;
3898 r
= extract_first_word(&p
, &word
, NULL
, EXTRACT_RETAIN_ESCAPE
);
3904 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++) {
3907 x
= startswith(word
, field_names
[i
]);
3911 r
= safe_atou64(x
, &w
);
3915 /* Sum up the stats of all devices */
3923 memcpy(ret
, acc
, sizeof(acc
));
3927 int unit_get_io_accounting(
3929 CGroupIOAccountingMetric metric
,
3933 uint64_t raw
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
];
3936 /* Retrieve an IO account parameter. This will subtract the counter when the unit was started. */
3938 if (!UNIT_CGROUP_BOOL(u
, io_accounting
))
3941 if (allow_cache
&& u
->io_accounting_last
[metric
] != UINT64_MAX
)
3944 r
= unit_get_io_accounting_raw(u
, raw
);
3945 if (r
== -ENODATA
&& u
->io_accounting_last
[metric
] != UINT64_MAX
)
3950 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++) {
3951 /* Saturated subtraction */
3952 if (raw
[i
] > u
->io_accounting_base
[i
])
3953 u
->io_accounting_last
[i
] = raw
[i
] - u
->io_accounting_base
[i
];
3955 u
->io_accounting_last
[i
] = 0;
3960 *ret
= u
->io_accounting_last
[metric
];
3965 int unit_reset_cpu_accounting(Unit
*u
) {
3970 u
->cpu_usage_last
= NSEC_INFINITY
;
3972 r
= unit_get_cpu_usage_raw(u
, &u
->cpu_usage_base
);
3974 u
->cpu_usage_base
= 0;
3981 int unit_reset_ip_accounting(Unit
*u
) {
3986 if (u
->ip_accounting_ingress_map_fd
>= 0)
3987 r
= bpf_firewall_reset_accounting(u
->ip_accounting_ingress_map_fd
);
3989 if (u
->ip_accounting_egress_map_fd
>= 0)
3990 q
= bpf_firewall_reset_accounting(u
->ip_accounting_egress_map_fd
);
3992 zero(u
->ip_accounting_extra
);
3994 return r
< 0 ? r
: q
;
3997 int unit_reset_io_accounting(Unit
*u
) {
4002 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++)
4003 u
->io_accounting_last
[i
] = UINT64_MAX
;
4005 r
= unit_get_io_accounting_raw(u
, u
->io_accounting_base
);
4007 zero(u
->io_accounting_base
);
4014 int unit_reset_accounting(Unit
*u
) {
4019 r
= unit_reset_cpu_accounting(u
);
4020 q
= unit_reset_io_accounting(u
);
4021 v
= unit_reset_ip_accounting(u
);
4023 return r
< 0 ? r
: q
< 0 ? q
: v
;
4026 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
4029 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
4035 /* always invalidate compat pairs together */
4036 if (m
& (CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
))
4037 m
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
4039 if (m
& (CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
))
4040 m
|= CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
;
4042 if (FLAGS_SET(u
->cgroup_invalidated_mask
, m
)) /* NOP? */
4045 u
->cgroup_invalidated_mask
|= m
;
4046 unit_add_to_cgroup_realize_queue(u
);
4049 void unit_invalidate_cgroup_bpf(Unit
*u
) {
4052 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
4055 if (u
->cgroup_invalidated_mask
& CGROUP_MASK_BPF_FIREWALL
) /* NOP? */
4058 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
4059 unit_add_to_cgroup_realize_queue(u
);
4061 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
4062 * list of our children includes our own. */
4063 if (u
->type
== UNIT_SLICE
) {
4066 UNIT_FOREACH_DEPENDENCY(member
, u
, UNIT_ATOM_SLICE_OF
)
4067 unit_invalidate_cgroup_bpf(member
);
4071 void unit_cgroup_catchup(Unit
*u
) {
4074 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
4077 /* We dropped the inotify watch during reexec/reload, so we need to
4078 * check these as they may have changed.
4079 * Note that (currently) the kernel doesn't actually update cgroup
4080 * file modification times, so we can't just serialize and then check
4081 * the mtime for file(s) we are interested in. */
4082 (void) unit_check_cgroup_events(u
);
4083 unit_add_to_cgroup_oom_queue(u
);
4086 bool unit_cgroup_delegate(Unit
*u
) {
4091 if (!UNIT_VTABLE(u
)->can_delegate
)
4094 c
= unit_get_cgroup_context(u
);
4101 void manager_invalidate_startup_units(Manager
*m
) {
4106 SET_FOREACH(u
, m
->startup_units
)
4107 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_IO
|CGROUP_MASK_BLKIO
|CGROUP_MASK_CPUSET
);
4110 static int unit_get_nice(Unit
*u
) {
4113 ec
= unit_get_exec_context(u
);
4114 return ec
? ec
->nice
: 0;
4117 static uint64_t unit_get_cpu_weight(Unit
*u
) {
4118 ManagerState state
= manager_state(u
->manager
);
4121 cc
= unit_get_cgroup_context(u
);
4122 return cc
? cgroup_context_cpu_weight(cc
, state
) : CGROUP_WEIGHT_DEFAULT
;
4125 int compare_job_priority(const void *a
, const void *b
) {
4126 const Job
*x
= a
, *y
= b
;
4128 uint64_t weight_x
, weight_y
;
4131 if ((ret
= CMP(x
->unit
->type
, y
->unit
->type
)) != 0)
4134 weight_x
= unit_get_cpu_weight(x
->unit
);
4135 weight_y
= unit_get_cpu_weight(y
->unit
);
4137 if ((ret
= CMP(weight_x
, weight_y
)) != 0)
4140 nice_x
= unit_get_nice(x
->unit
);
4141 nice_y
= unit_get_nice(y
->unit
);
4143 if ((ret
= CMP(nice_x
, nice_y
)) != 0)
4146 return strcmp(x
->unit
->id
, y
->unit
->id
);
4149 int unit_cgroup_freezer_action(Unit
*u
, FreezerAction action
) {
4150 _cleanup_free_
char *path
= NULL
;
4151 FreezerState target
, kernel
= _FREEZER_STATE_INVALID
;
4155 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
4157 if (!cg_freezer_supported())
4160 if (!u
->cgroup_realized
)
4163 target
= action
== FREEZER_FREEZE
? FREEZER_FROZEN
: FREEZER_RUNNING
;
4165 r
= unit_freezer_state_kernel(u
, &kernel
);
4167 log_unit_debug_errno(u
, r
, "Failed to obtain cgroup freezer state: %m");
4169 if (target
== kernel
) {
4170 u
->freezer_state
= target
;
4174 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.freeze", &path
);
4178 log_unit_debug(u
, "%s unit.", action
== FREEZER_FREEZE
? "Freezing" : "Thawing");
4180 if (action
== FREEZER_FREEZE
)
4181 u
->freezer_state
= FREEZER_FREEZING
;
4183 u
->freezer_state
= FREEZER_THAWING
;
4185 r
= write_string_file(path
, one_zero(action
== FREEZER_FREEZE
), WRITE_STRING_FILE_DISABLE_BUFFER
);
4192 int unit_get_cpuset(Unit
*u
, CPUSet
*cpus
, const char *name
) {
4193 _cleanup_free_
char *v
= NULL
;
4199 if (!u
->cgroup_path
)
4202 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUSET
) == 0)
4205 r
= cg_all_unified();
4211 r
= cg_get_attribute("cpuset", u
->cgroup_path
, name
, &v
);
4217 return parse_cpu_set_full(v
, cpus
, false, NULL
, NULL
, 0, NULL
);
4220 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
4221 [CGROUP_DEVICE_POLICY_AUTO
] = "auto",
4222 [CGROUP_DEVICE_POLICY_CLOSED
] = "closed",
4223 [CGROUP_DEVICE_POLICY_STRICT
] = "strict",
4226 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);
4228 static const char* const freezer_action_table
[_FREEZER_ACTION_MAX
] = {
4229 [FREEZER_FREEZE
] = "freeze",
4230 [FREEZER_THAW
] = "thaw",
4233 DEFINE_STRING_TABLE_LOOKUP(freezer_action
, FreezerAction
);