]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/cgroup.c
Merge pull request #27659 from yuwata/memfd-seal
[thirdparty/systemd.git] / src / core / cgroup.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <fcntl.h>
4
5 #include "sd-messages.h"
6
7 #include "af-list.h"
8 #include "alloc-util.h"
9 #include "blockdev-util.h"
10 #include "bpf-devices.h"
11 #include "bpf-firewall.h"
12 #include "bpf-foreign.h"
13 #include "bpf-socket-bind.h"
14 #include "btrfs-util.h"
15 #include "bus-error.h"
16 #include "bus-locator.h"
17 #include "cgroup-setup.h"
18 #include "cgroup-util.h"
19 #include "cgroup.h"
20 #include "devnum-util.h"
21 #include "fd-util.h"
22 #include "fileio.h"
23 #include "in-addr-prefix-util.h"
24 #include "inotify-util.h"
25 #include "io-util.h"
26 #include "ip-protocol-list.h"
27 #include "limits-util.h"
28 #include "nulstr-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "percent-util.h"
32 #include "process-util.h"
33 #include "procfs-util.h"
34 #include "restrict-ifaces.h"
35 #include "special.h"
36 #include "stdio-util.h"
37 #include "string-table.h"
38 #include "string-util.h"
39 #include "virt.h"
40
41 #if BPF_FRAMEWORK
42 #include "bpf-dlopen.h"
43 #include "bpf-link.h"
44 #include "bpf/restrict_fs/restrict-fs-skel.h"
45 #endif
46
47 #define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
48
49 /* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
50 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
51 * out specific attributes from us. */
52 #define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
53
54 uint64_t tasks_max_resolve(const TasksMax *tasks_max) {
55 if (tasks_max->scale == 0)
56 return tasks_max->value;
57
58 return system_tasks_max_scale(tasks_max->value, tasks_max->scale);
59 }
60
61 bool manager_owns_host_root_cgroup(Manager *m) {
62 assert(m);
63
64 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
65 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
66 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
67 * we run in any kind of container virtualization. */
68
69 if (MANAGER_IS_USER(m))
70 return false;
71
72 if (detect_container() > 0)
73 return false;
74
75 return empty_or_root(m->cgroup_root);
76 }
77
78 bool unit_has_startup_cgroup_constraints(Unit *u) {
79 assert(u);
80
81 /* Returns true if this unit has any directives which apply during
82 * startup/shutdown phases. */
83
84 CGroupContext *c;
85
86 c = unit_get_cgroup_context(u);
87 if (!c)
88 return false;
89
90 return c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID ||
91 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
92 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
93 c->startup_cpuset_cpus.set ||
94 c->startup_cpuset_mems.set ||
95 c->startup_memory_high_set ||
96 c->startup_memory_max_set ||
97 c->startup_memory_swap_max_set||
98 c->startup_memory_zswap_max_set ||
99 c->startup_memory_low_set;
100 }
101
102 bool unit_has_host_root_cgroup(Unit *u) {
103 assert(u);
104
105 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
106 * the manager manages the root cgroup. */
107
108 if (!manager_owns_host_root_cgroup(u->manager))
109 return false;
110
111 return unit_has_name(u, SPECIAL_ROOT_SLICE);
112 }
113
114 static int set_attribute_and_warn(Unit *u, const char *controller, const char *attribute, const char *value) {
115 int r;
116
117 r = cg_set_attribute(controller, u->cgroup_path, attribute, value);
118 if (r < 0)
119 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r), r, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
120 strna(attribute), empty_to_root(u->cgroup_path), (int) strcspn(value, NEWLINE), value);
121
122 return r;
123 }
124
125 static void cgroup_compat_warn(void) {
126 static bool cgroup_compat_warned = false;
127
128 if (cgroup_compat_warned)
129 return;
130
131 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
132 "See cgroup-compat debug messages for details.");
133
134 cgroup_compat_warned = true;
135 }
136
137 #define log_cgroup_compat(unit, fmt, ...) do { \
138 cgroup_compat_warn(); \
139 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
140 } while (false)
141
142 void cgroup_context_init(CGroupContext *c) {
143 assert(c);
144
145 /* Initialize everything to the kernel defaults. */
146
147 *c = (CGroupContext) {
148 .cpu_weight = CGROUP_WEIGHT_INVALID,
149 .startup_cpu_weight = CGROUP_WEIGHT_INVALID,
150 .cpu_quota_per_sec_usec = USEC_INFINITY,
151 .cpu_quota_period_usec = USEC_INFINITY,
152
153 .cpu_shares = CGROUP_CPU_SHARES_INVALID,
154 .startup_cpu_shares = CGROUP_CPU_SHARES_INVALID,
155
156 .memory_high = CGROUP_LIMIT_MAX,
157 .startup_memory_high = CGROUP_LIMIT_MAX,
158 .memory_max = CGROUP_LIMIT_MAX,
159 .startup_memory_max = CGROUP_LIMIT_MAX,
160 .memory_swap_max = CGROUP_LIMIT_MAX,
161 .startup_memory_swap_max = CGROUP_LIMIT_MAX,
162 .memory_zswap_max = CGROUP_LIMIT_MAX,
163 .startup_memory_zswap_max = CGROUP_LIMIT_MAX,
164
165 .memory_limit = CGROUP_LIMIT_MAX,
166
167 .io_weight = CGROUP_WEIGHT_INVALID,
168 .startup_io_weight = CGROUP_WEIGHT_INVALID,
169
170 .blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
171 .startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
172
173 .tasks_max = TASKS_MAX_UNSET,
174
175 .moom_swap = MANAGED_OOM_AUTO,
176 .moom_mem_pressure = MANAGED_OOM_AUTO,
177 .moom_preference = MANAGED_OOM_PREFERENCE_NONE,
178
179 .memory_pressure_watch = _CGROUP_PRESSURE_WATCH_INVALID,
180 .memory_pressure_threshold_usec = USEC_INFINITY,
181 };
182 }
183
184 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
185 assert(c);
186 assert(a);
187
188 LIST_REMOVE(device_allow, c->device_allow, a);
189 free(a->path);
190 free(a);
191 }
192
193 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
194 assert(c);
195 assert(w);
196
197 LIST_REMOVE(device_weights, c->io_device_weights, w);
198 free(w->path);
199 free(w);
200 }
201
202 void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l) {
203 assert(c);
204 assert(l);
205
206 LIST_REMOVE(device_latencies, c->io_device_latencies, l);
207 free(l->path);
208 free(l);
209 }
210
211 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
212 assert(c);
213 assert(l);
214
215 LIST_REMOVE(device_limits, c->io_device_limits, l);
216 free(l->path);
217 free(l);
218 }
219
220 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
221 assert(c);
222 assert(w);
223
224 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
225 free(w->path);
226 free(w);
227 }
228
229 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
230 assert(c);
231 assert(b);
232
233 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
234 free(b->path);
235 free(b);
236 }
237
238 void cgroup_context_remove_bpf_foreign_program(CGroupContext *c, CGroupBPFForeignProgram *p) {
239 assert(c);
240 assert(p);
241
242 LIST_REMOVE(programs, c->bpf_foreign_programs, p);
243 free(p->bpffs_path);
244 free(p);
245 }
246
247 void cgroup_context_remove_socket_bind(CGroupSocketBindItem **head) {
248 assert(head);
249
250 while (*head) {
251 CGroupSocketBindItem *h = *head;
252 LIST_REMOVE(socket_bind_items, *head, h);
253 free(h);
254 }
255 }
256
257 void cgroup_context_done(CGroupContext *c) {
258 assert(c);
259
260 while (c->io_device_weights)
261 cgroup_context_free_io_device_weight(c, c->io_device_weights);
262
263 while (c->io_device_latencies)
264 cgroup_context_free_io_device_latency(c, c->io_device_latencies);
265
266 while (c->io_device_limits)
267 cgroup_context_free_io_device_limit(c, c->io_device_limits);
268
269 while (c->blockio_device_weights)
270 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
271
272 while (c->blockio_device_bandwidths)
273 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
274
275 while (c->device_allow)
276 cgroup_context_free_device_allow(c, c->device_allow);
277
278 cgroup_context_remove_socket_bind(&c->socket_bind_allow);
279 cgroup_context_remove_socket_bind(&c->socket_bind_deny);
280
281 c->ip_address_allow = set_free(c->ip_address_allow);
282 c->ip_address_deny = set_free(c->ip_address_deny);
283
284 c->ip_filters_ingress = strv_free(c->ip_filters_ingress);
285 c->ip_filters_egress = strv_free(c->ip_filters_egress);
286
287 while (c->bpf_foreign_programs)
288 cgroup_context_remove_bpf_foreign_program(c, c->bpf_foreign_programs);
289
290 c->restrict_network_interfaces = set_free(c->restrict_network_interfaces);
291
292 cpu_set_reset(&c->cpuset_cpus);
293 cpu_set_reset(&c->startup_cpuset_cpus);
294 cpu_set_reset(&c->cpuset_mems);
295 cpu_set_reset(&c->startup_cpuset_mems);
296
297 c->delegate_subgroup = mfree(c->delegate_subgroup);
298 }
299
300 static int unit_get_kernel_memory_limit(Unit *u, const char *file, uint64_t *ret) {
301 assert(u);
302
303 if (!u->cgroup_realized)
304 return -EOWNERDEAD;
305
306 return cg_get_attribute_as_uint64("memory", u->cgroup_path, file, ret);
307 }
308
309 static int unit_compare_memory_limit(Unit *u, const char *property_name, uint64_t *ret_unit_value, uint64_t *ret_kernel_value) {
310 CGroupContext *c;
311 CGroupMask m;
312 const char *file;
313 uint64_t unit_value;
314 int r;
315
316 /* Compare kernel memcg configuration against our internal systemd state. Unsupported (and will
317 * return -ENODATA) on cgroup v1.
318 *
319 * Returns:
320 *
321 * <0: On error.
322 * 0: If the kernel memory setting doesn't match our configuration.
323 * >0: If the kernel memory setting matches our configuration.
324 *
325 * The following values are only guaranteed to be populated on return >=0:
326 *
327 * - ret_unit_value will contain our internal expected value for the unit, page-aligned.
328 * - ret_kernel_value will contain the actual value presented by the kernel. */
329
330 assert(u);
331
332 r = cg_all_unified();
333 if (r < 0)
334 return log_debug_errno(r, "Failed to determine cgroup hierarchy version: %m");
335
336 /* Unsupported on v1.
337 *
338 * We don't return ENOENT, since that could actually mask a genuine problem where somebody else has
339 * silently masked the controller. */
340 if (r == 0)
341 return -ENODATA;
342
343 /* The root slice doesn't have any controller files, so we can't compare anything. */
344 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
345 return -ENODATA;
346
347 /* It's possible to have MemoryFoo set without systemd wanting to have the memory controller enabled,
348 * for example, in the case of DisableControllers= or cgroup_disable on the kernel command line. To
349 * avoid specious errors in these scenarios, check that we even expect the memory controller to be
350 * enabled at all. */
351 m = unit_get_target_mask(u);
352 if (!FLAGS_SET(m, CGROUP_MASK_MEMORY))
353 return -ENODATA;
354
355 assert_se(c = unit_get_cgroup_context(u));
356
357 bool startup = u->manager && IN_SET(manager_state(u->manager), MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING);
358
359 if (streq(property_name, "MemoryLow")) {
360 unit_value = unit_get_ancestor_memory_low(u);
361 file = "memory.low";
362 } else if (startup && streq(property_name, "StartupMemoryLow")) {
363 unit_value = unit_get_ancestor_startup_memory_low(u);
364 file = "memory.low";
365 } else if (streq(property_name, "MemoryMin")) {
366 unit_value = unit_get_ancestor_memory_min(u);
367 file = "memory.min";
368 } else if (streq(property_name, "MemoryHigh")) {
369 unit_value = c->memory_high;
370 file = "memory.high";
371 } else if (startup && streq(property_name, "StartupMemoryHigh")) {
372 unit_value = c->startup_memory_high;
373 file = "memory.high";
374 } else if (streq(property_name, "MemoryMax")) {
375 unit_value = c->memory_max;
376 file = "memory.max";
377 } else if (startup && streq(property_name, "StartupMemoryMax")) {
378 unit_value = c->startup_memory_max;
379 file = "memory.max";
380 } else if (streq(property_name, "MemorySwapMax")) {
381 unit_value = c->memory_swap_max;
382 file = "memory.swap.max";
383 } else if (startup && streq(property_name, "StartupMemorySwapMax")) {
384 unit_value = c->startup_memory_swap_max;
385 file = "memory.swap.max";
386 } else if (streq(property_name, "MemoryZSwapMax")) {
387 unit_value = c->memory_zswap_max;
388 file = "memory.zswap.max";
389 } else if (startup && streq(property_name, "StartupMemoryZSwapMax")) {
390 unit_value = c->startup_memory_zswap_max;
391 file = "memory.zswap.max";
392 } else
393 return -EINVAL;
394
395 r = unit_get_kernel_memory_limit(u, file, ret_kernel_value);
396 if (r < 0)
397 return log_unit_debug_errno(u, r, "Failed to parse %s: %m", file);
398
399 /* It's intended (soon) in a future kernel to not expose cgroup memory limits rounded to page
400 * boundaries, but instead separate the user-exposed limit, which is whatever userspace told us, from
401 * our internal page-counting. To support those future kernels, just check the value itself first
402 * without any page-alignment. */
403 if (*ret_kernel_value == unit_value) {
404 *ret_unit_value = unit_value;
405 return 1;
406 }
407
408 /* The current kernel behaviour, by comparison, is that even if you write a particular number of
409 * bytes into a cgroup memory file, it always returns that number page-aligned down (since the kernel
410 * internally stores cgroup limits in pages). As such, so long as it aligns properly, everything is
411 * cricket. */
412 if (unit_value != CGROUP_LIMIT_MAX)
413 unit_value = PAGE_ALIGN_DOWN(unit_value);
414
415 *ret_unit_value = unit_value;
416
417 return *ret_kernel_value == *ret_unit_value;
418 }
419
420 #define FORMAT_CGROUP_DIFF_MAX 128
421
422 static char *format_cgroup_memory_limit_comparison(char *buf, size_t l, Unit *u, const char *property_name) {
423 uint64_t kval, sval;
424 int r;
425
426 assert(u);
427 assert(buf);
428 assert(l > 0);
429
430 r = unit_compare_memory_limit(u, property_name, &sval, &kval);
431
432 /* memory.swap.max is special in that it relies on CONFIG_MEMCG_SWAP (and the default swapaccount=1).
433 * In the absence of reliably being able to detect whether memcg swap support is available or not,
434 * only complain if the error is not ENOENT. This is similarly the case for memory.zswap.max relying
435 * on CONFIG_ZSWAP. */
436 if (r > 0 || IN_SET(r, -ENODATA, -EOWNERDEAD) ||
437 (r == -ENOENT && STR_IN_SET(property_name,
438 "MemorySwapMax",
439 "StartupMemorySwapMax",
440 "MemoryZSwapMax",
441 "StartupMemoryZSwapMax")))
442 buf[0] = 0;
443 else if (r < 0) {
444 errno = -r;
445 (void) snprintf(buf, l, " (error getting kernel value: %m)");
446 } else
447 (void) snprintf(buf, l, " (different value in kernel: %" PRIu64 ")", kval);
448
449 return buf;
450 }
451
452 void cgroup_context_dump(Unit *u, FILE* f, const char *prefix) {
453 _cleanup_free_ char *disable_controllers_str = NULL, *delegate_controllers_str = NULL, *cpuset_cpus = NULL, *cpuset_mems = NULL, *startup_cpuset_cpus = NULL, *startup_cpuset_mems = NULL;
454 CGroupContext *c;
455 struct in_addr_prefix *iaai;
456
457 char cda[FORMAT_CGROUP_DIFF_MAX];
458 char cdb[FORMAT_CGROUP_DIFF_MAX];
459 char cdc[FORMAT_CGROUP_DIFF_MAX];
460 char cdd[FORMAT_CGROUP_DIFF_MAX];
461 char cde[FORMAT_CGROUP_DIFF_MAX];
462 char cdf[FORMAT_CGROUP_DIFF_MAX];
463 char cdg[FORMAT_CGROUP_DIFF_MAX];
464 char cdh[FORMAT_CGROUP_DIFF_MAX];
465 char cdi[FORMAT_CGROUP_DIFF_MAX];
466 char cdj[FORMAT_CGROUP_DIFF_MAX];
467 char cdk[FORMAT_CGROUP_DIFF_MAX];
468
469 assert(u);
470 assert(f);
471
472 assert_se(c = unit_get_cgroup_context(u));
473
474 prefix = strempty(prefix);
475
476 (void) cg_mask_to_string(c->disable_controllers, &disable_controllers_str);
477 (void) cg_mask_to_string(c->delegate_controllers, &delegate_controllers_str);
478
479 /* "Delegate=" means "yes, but no controllers". Show this as "(none)". */
480 const char *delegate_str = delegate_controllers_str ?: c->delegate ? "(none)" : "no";
481
482 cpuset_cpus = cpu_set_to_range_string(&c->cpuset_cpus);
483 startup_cpuset_cpus = cpu_set_to_range_string(&c->startup_cpuset_cpus);
484 cpuset_mems = cpu_set_to_range_string(&c->cpuset_mems);
485 startup_cpuset_mems = cpu_set_to_range_string(&c->startup_cpuset_mems);
486
487 fprintf(f,
488 "%sCPUAccounting: %s\n"
489 "%sIOAccounting: %s\n"
490 "%sBlockIOAccounting: %s\n"
491 "%sMemoryAccounting: %s\n"
492 "%sTasksAccounting: %s\n"
493 "%sIPAccounting: %s\n"
494 "%sCPUWeight: %" PRIu64 "\n"
495 "%sStartupCPUWeight: %" PRIu64 "\n"
496 "%sCPUShares: %" PRIu64 "\n"
497 "%sStartupCPUShares: %" PRIu64 "\n"
498 "%sCPUQuotaPerSecSec: %s\n"
499 "%sCPUQuotaPeriodSec: %s\n"
500 "%sAllowedCPUs: %s\n"
501 "%sStartupAllowedCPUs: %s\n"
502 "%sAllowedMemoryNodes: %s\n"
503 "%sStartupAllowedMemoryNodes: %s\n"
504 "%sIOWeight: %" PRIu64 "\n"
505 "%sStartupIOWeight: %" PRIu64 "\n"
506 "%sBlockIOWeight: %" PRIu64 "\n"
507 "%sStartupBlockIOWeight: %" PRIu64 "\n"
508 "%sDefaultMemoryMin: %" PRIu64 "\n"
509 "%sDefaultMemoryLow: %" PRIu64 "\n"
510 "%sMemoryMin: %" PRIu64 "%s\n"
511 "%sMemoryLow: %" PRIu64 "%s\n"
512 "%sStartupMemoryLow: %" PRIu64 "%s\n"
513 "%sMemoryHigh: %" PRIu64 "%s\n"
514 "%sStartupMemoryHigh: %" PRIu64 "%s\n"
515 "%sMemoryMax: %" PRIu64 "%s\n"
516 "%sStartupMemoryMax: %" PRIu64 "%s\n"
517 "%sMemorySwapMax: %" PRIu64 "%s\n"
518 "%sStartupMemorySwapMax: %" PRIu64 "%s\n"
519 "%sMemoryZSwapMax: %" PRIu64 "%s\n"
520 "%sStartupMemoryZSwapMax: %" PRIu64 "%s\n"
521 "%sMemoryLimit: %" PRIu64 "\n"
522 "%sTasksMax: %" PRIu64 "\n"
523 "%sDevicePolicy: %s\n"
524 "%sDisableControllers: %s\n"
525 "%sDelegate: %s\n"
526 "%sManagedOOMSwap: %s\n"
527 "%sManagedOOMMemoryPressure: %s\n"
528 "%sManagedOOMMemoryPressureLimit: " PERMYRIAD_AS_PERCENT_FORMAT_STR "\n"
529 "%sManagedOOMPreference: %s\n"
530 "%sMemoryPressureWatch: %s\n",
531 prefix, yes_no(c->cpu_accounting),
532 prefix, yes_no(c->io_accounting),
533 prefix, yes_no(c->blockio_accounting),
534 prefix, yes_no(c->memory_accounting),
535 prefix, yes_no(c->tasks_accounting),
536 prefix, yes_no(c->ip_accounting),
537 prefix, c->cpu_weight,
538 prefix, c->startup_cpu_weight,
539 prefix, c->cpu_shares,
540 prefix, c->startup_cpu_shares,
541 prefix, FORMAT_TIMESPAN(c->cpu_quota_per_sec_usec, 1),
542 prefix, FORMAT_TIMESPAN(c->cpu_quota_period_usec, 1),
543 prefix, strempty(cpuset_cpus),
544 prefix, strempty(startup_cpuset_cpus),
545 prefix, strempty(cpuset_mems),
546 prefix, strempty(startup_cpuset_mems),
547 prefix, c->io_weight,
548 prefix, c->startup_io_weight,
549 prefix, c->blockio_weight,
550 prefix, c->startup_blockio_weight,
551 prefix, c->default_memory_min,
552 prefix, c->default_memory_low,
553 prefix, c->memory_min, format_cgroup_memory_limit_comparison(cda, sizeof(cda), u, "MemoryMin"),
554 prefix, c->memory_low, format_cgroup_memory_limit_comparison(cdb, sizeof(cdb), u, "MemoryLow"),
555 prefix, c->startup_memory_low, format_cgroup_memory_limit_comparison(cdc, sizeof(cdc), u, "StartupMemoryLow"),
556 prefix, c->memory_high, format_cgroup_memory_limit_comparison(cdd, sizeof(cdd), u, "MemoryHigh"),
557 prefix, c->startup_memory_high, format_cgroup_memory_limit_comparison(cde, sizeof(cde), u, "StartupMemoryHigh"),
558 prefix, c->memory_max, format_cgroup_memory_limit_comparison(cdf, sizeof(cdf), u, "MemoryMax"),
559 prefix, c->startup_memory_max, format_cgroup_memory_limit_comparison(cdg, sizeof(cdg), u, "StartupMemoryMax"),
560 prefix, c->memory_swap_max, format_cgroup_memory_limit_comparison(cdh, sizeof(cdh), u, "MemorySwapMax"),
561 prefix, c->startup_memory_swap_max, format_cgroup_memory_limit_comparison(cdi, sizeof(cdi), u, "StartupMemorySwapMax"),
562 prefix, c->memory_zswap_max, format_cgroup_memory_limit_comparison(cdj, sizeof(cdj), u, "MemoryZSwapMax"),
563 prefix, c->startup_memory_zswap_max, format_cgroup_memory_limit_comparison(cdk, sizeof(cdk), u, "StartupMemoryZSwapMax"),
564 prefix, c->memory_limit,
565 prefix, tasks_max_resolve(&c->tasks_max),
566 prefix, cgroup_device_policy_to_string(c->device_policy),
567 prefix, strempty(disable_controllers_str),
568 prefix, delegate_str,
569 prefix, managed_oom_mode_to_string(c->moom_swap),
570 prefix, managed_oom_mode_to_string(c->moom_mem_pressure),
571 prefix, PERMYRIAD_AS_PERCENT_FORMAT_VAL(UINT32_SCALE_TO_PERMYRIAD(c->moom_mem_pressure_limit)),
572 prefix, managed_oom_preference_to_string(c->moom_preference),
573 prefix, cgroup_pressure_watch_to_string(c->memory_pressure_watch));
574
575 if (c->delegate_subgroup)
576 fprintf(f, "%sDelegateSubgroup: %s\n",
577 prefix, c->delegate_subgroup);
578
579 if (c->memory_pressure_threshold_usec != USEC_INFINITY)
580 fprintf(f, "%sMemoryPressureThresholdSec: %s\n",
581 prefix, FORMAT_TIMESPAN(c->memory_pressure_threshold_usec, 1));
582
583 LIST_FOREACH(device_allow, a, c->device_allow)
584 fprintf(f,
585 "%sDeviceAllow: %s %s%s%s\n",
586 prefix,
587 a->path,
588 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
589
590 LIST_FOREACH(device_weights, iw, c->io_device_weights)
591 fprintf(f,
592 "%sIODeviceWeight: %s %" PRIu64 "\n",
593 prefix,
594 iw->path,
595 iw->weight);
596
597 LIST_FOREACH(device_latencies, l, c->io_device_latencies)
598 fprintf(f,
599 "%sIODeviceLatencyTargetSec: %s %s\n",
600 prefix,
601 l->path,
602 FORMAT_TIMESPAN(l->target_usec, 1));
603
604 LIST_FOREACH(device_limits, il, c->io_device_limits)
605 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
606 if (il->limits[type] != cgroup_io_limit_defaults[type])
607 fprintf(f,
608 "%s%s: %s %s\n",
609 prefix,
610 cgroup_io_limit_type_to_string(type),
611 il->path,
612 FORMAT_BYTES(il->limits[type]));
613
614 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
615 fprintf(f,
616 "%sBlockIODeviceWeight: %s %" PRIu64,
617 prefix,
618 w->path,
619 w->weight);
620
621 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
622 if (b->rbps != CGROUP_LIMIT_MAX)
623 fprintf(f,
624 "%sBlockIOReadBandwidth: %s %s\n",
625 prefix,
626 b->path,
627 FORMAT_BYTES(b->rbps));
628 if (b->wbps != CGROUP_LIMIT_MAX)
629 fprintf(f,
630 "%sBlockIOWriteBandwidth: %s %s\n",
631 prefix,
632 b->path,
633 FORMAT_BYTES(b->wbps));
634 }
635
636 SET_FOREACH(iaai, c->ip_address_allow)
637 fprintf(f, "%sIPAddressAllow: %s\n", prefix,
638 IN_ADDR_PREFIX_TO_STRING(iaai->family, &iaai->address, iaai->prefixlen));
639 SET_FOREACH(iaai, c->ip_address_deny)
640 fprintf(f, "%sIPAddressDeny: %s\n", prefix,
641 IN_ADDR_PREFIX_TO_STRING(iaai->family, &iaai->address, iaai->prefixlen));
642
643 STRV_FOREACH(path, c->ip_filters_ingress)
644 fprintf(f, "%sIPIngressFilterPath: %s\n", prefix, *path);
645 STRV_FOREACH(path, c->ip_filters_egress)
646 fprintf(f, "%sIPEgressFilterPath: %s\n", prefix, *path);
647
648 LIST_FOREACH(programs, p, c->bpf_foreign_programs)
649 fprintf(f, "%sBPFProgram: %s:%s",
650 prefix, bpf_cgroup_attach_type_to_string(p->attach_type), p->bpffs_path);
651
652 if (c->socket_bind_allow) {
653 fprintf(f, "%sSocketBindAllow:", prefix);
654 LIST_FOREACH(socket_bind_items, bi, c->socket_bind_allow)
655 cgroup_context_dump_socket_bind_item(bi, f);
656 fputc('\n', f);
657 }
658
659 if (c->socket_bind_deny) {
660 fprintf(f, "%sSocketBindDeny:", prefix);
661 LIST_FOREACH(socket_bind_items, bi, c->socket_bind_deny)
662 cgroup_context_dump_socket_bind_item(bi, f);
663 fputc('\n', f);
664 }
665
666 if (c->restrict_network_interfaces) {
667 char *iface;
668 SET_FOREACH(iface, c->restrict_network_interfaces)
669 fprintf(f, "%sRestrictNetworkInterfaces: %s\n", prefix, iface);
670 }
671 }
672
673 void cgroup_context_dump_socket_bind_item(const CGroupSocketBindItem *item, FILE *f) {
674 const char *family, *colon1, *protocol = "", *colon2 = "";
675
676 family = strempty(af_to_ipv4_ipv6(item->address_family));
677 colon1 = isempty(family) ? "" : ":";
678
679 if (item->ip_protocol != 0) {
680 protocol = ip_protocol_to_tcp_udp(item->ip_protocol);
681 colon2 = ":";
682 }
683
684 if (item->nr_ports == 0)
685 fprintf(f, " %s%s%s%sany", family, colon1, protocol, colon2);
686 else if (item->nr_ports == 1)
687 fprintf(f, " %s%s%s%s%" PRIu16, family, colon1, protocol, colon2, item->port_min);
688 else {
689 uint16_t port_max = item->port_min + item->nr_ports - 1;
690 fprintf(f, " %s%s%s%s%" PRIu16 "-%" PRIu16, family, colon1, protocol, colon2,
691 item->port_min, port_max);
692 }
693 }
694
695 int cgroup_add_device_allow(CGroupContext *c, const char *dev, const char *mode) {
696 _cleanup_free_ CGroupDeviceAllow *a = NULL;
697 _cleanup_free_ char *d = NULL;
698
699 assert(c);
700 assert(dev);
701 assert(isempty(mode) || in_charset(mode, "rwm"));
702
703 a = new(CGroupDeviceAllow, 1);
704 if (!a)
705 return -ENOMEM;
706
707 d = strdup(dev);
708 if (!d)
709 return -ENOMEM;
710
711 *a = (CGroupDeviceAllow) {
712 .path = TAKE_PTR(d),
713 .r = isempty(mode) || strchr(mode, 'r'),
714 .w = isempty(mode) || strchr(mode, 'w'),
715 .m = isempty(mode) || strchr(mode, 'm'),
716 };
717
718 LIST_PREPEND(device_allow, c->device_allow, a);
719 TAKE_PTR(a);
720
721 return 0;
722 }
723
724 int cgroup_add_bpf_foreign_program(CGroupContext *c, uint32_t attach_type, const char *bpffs_path) {
725 CGroupBPFForeignProgram *p;
726 _cleanup_free_ char *d = NULL;
727
728 assert(c);
729 assert(bpffs_path);
730
731 if (!path_is_normalized(bpffs_path) || !path_is_absolute(bpffs_path))
732 return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Path is not normalized: %m");
733
734 d = strdup(bpffs_path);
735 if (!d)
736 return log_oom();
737
738 p = new(CGroupBPFForeignProgram, 1);
739 if (!p)
740 return log_oom();
741
742 *p = (CGroupBPFForeignProgram) {
743 .attach_type = attach_type,
744 .bpffs_path = TAKE_PTR(d),
745 };
746
747 LIST_PREPEND(programs, c->bpf_foreign_programs, TAKE_PTR(p));
748
749 return 0;
750 }
751
752 #define UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(entry) \
753 uint64_t unit_get_ancestor_##entry(Unit *u) { \
754 CGroupContext *c; \
755 \
756 /* 1. Is entry set in this unit? If so, use that. \
757 * 2. Is the default for this entry set in any \
758 * ancestor? If so, use that. \
759 * 3. Otherwise, return CGROUP_LIMIT_MIN. */ \
760 \
761 assert(u); \
762 \
763 c = unit_get_cgroup_context(u); \
764 if (c && c->entry##_set) \
765 return c->entry; \
766 \
767 while ((u = UNIT_GET_SLICE(u))) { \
768 c = unit_get_cgroup_context(u); \
769 if (c && c->default_##entry##_set) \
770 return c->default_##entry; \
771 } \
772 \
773 /* We've reached the root, but nobody had default for \
774 * this entry set, so set it to the kernel default. */ \
775 return CGROUP_LIMIT_MIN; \
776 }
777
778 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_low);
779 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(startup_memory_low);
780 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_min);
781
782 static void unit_set_xattr_graceful(Unit *u, const char *cgroup_path, const char *name, const void *data, size_t size) {
783 int r;
784
785 assert(u);
786 assert(name);
787
788 if (!cgroup_path) {
789 if (!u->cgroup_path)
790 return;
791
792 cgroup_path = u->cgroup_path;
793 }
794
795 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, cgroup_path, name, data, size, 0);
796 if (r < 0)
797 log_unit_debug_errno(u, r, "Failed to set '%s' xattr on control group %s, ignoring: %m", name, empty_to_root(cgroup_path));
798 }
799
800 static void unit_remove_xattr_graceful(Unit *u, const char *cgroup_path, const char *name) {
801 int r;
802
803 assert(u);
804 assert(name);
805
806 if (!cgroup_path) {
807 if (!u->cgroup_path)
808 return;
809
810 cgroup_path = u->cgroup_path;
811 }
812
813 r = cg_remove_xattr(SYSTEMD_CGROUP_CONTROLLER, cgroup_path, name);
814 if (r < 0 && !ERRNO_IS_XATTR_ABSENT(r))
815 log_unit_debug_errno(u, r, "Failed to remove '%s' xattr flag on control group %s, ignoring: %m", name, empty_to_root(cgroup_path));
816 }
817
818 void cgroup_oomd_xattr_apply(Unit *u, const char *cgroup_path) {
819 CGroupContext *c;
820
821 assert(u);
822
823 c = unit_get_cgroup_context(u);
824 if (!c)
825 return;
826
827 if (c->moom_preference == MANAGED_OOM_PREFERENCE_OMIT)
828 unit_set_xattr_graceful(u, cgroup_path, "user.oomd_omit", "1", 1);
829
830 if (c->moom_preference == MANAGED_OOM_PREFERENCE_AVOID)
831 unit_set_xattr_graceful(u, cgroup_path, "user.oomd_avoid", "1", 1);
832
833 if (c->moom_preference != MANAGED_OOM_PREFERENCE_AVOID)
834 unit_remove_xattr_graceful(u, cgroup_path, "user.oomd_avoid");
835
836 if (c->moom_preference != MANAGED_OOM_PREFERENCE_OMIT)
837 unit_remove_xattr_graceful(u, cgroup_path, "user.oomd_omit");
838 }
839
840 int cgroup_log_xattr_apply(Unit *u, const char *cgroup_path) {
841 ExecContext *c;
842 size_t len, allowed_patterns_len, denied_patterns_len;
843 _cleanup_free_ char *patterns = NULL, *allowed_patterns = NULL, *denied_patterns = NULL;
844 char *last;
845 int r;
846
847 assert(u);
848
849 c = unit_get_exec_context(u);
850 if (!c)
851 /* Some unit types have a cgroup context but no exec context, so we do not log
852 * any error here to avoid confusion. */
853 return 0;
854
855 if (set_isempty(c->log_filter_allowed_patterns) && set_isempty(c->log_filter_denied_patterns)) {
856 unit_remove_xattr_graceful(u, cgroup_path, "user.journald_log_filter_patterns");
857 return 0;
858 }
859
860 r = set_make_nulstr(c->log_filter_allowed_patterns, &allowed_patterns, &allowed_patterns_len);
861 if (r < 0)
862 return log_debug_errno(r, "Failed to make nulstr from set: %m");
863
864 r = set_make_nulstr(c->log_filter_denied_patterns, &denied_patterns, &denied_patterns_len);
865 if (r < 0)
866 return log_debug_errno(r, "Failed to make nulstr from set: %m");
867
868 /* Use nul character separated strings without trailing nul */
869 allowed_patterns_len = LESS_BY(allowed_patterns_len, 1u);
870 denied_patterns_len = LESS_BY(denied_patterns_len, 1u);
871
872 len = allowed_patterns_len + 1 + denied_patterns_len;
873 patterns = new(char, len);
874 if (!patterns)
875 return log_oom_debug();
876
877 last = mempcpy_safe(patterns, allowed_patterns, allowed_patterns_len);
878 *(last++) = '\xff';
879 memcpy_safe(last, denied_patterns, denied_patterns_len);
880
881 unit_set_xattr_graceful(u, cgroup_path, "user.journald_log_filter_patterns", patterns, len);
882
883 return 0;
884 }
885
886 static void cgroup_xattr_apply(Unit *u) {
887 bool b;
888
889 assert(u);
890
891 /* The 'user.*' xattrs can be set from a user manager. */
892 cgroup_oomd_xattr_apply(u, u->cgroup_path);
893 cgroup_log_xattr_apply(u, u->cgroup_path);
894
895 if (!MANAGER_IS_SYSTEM(u->manager))
896 return;
897
898 b = !sd_id128_is_null(u->invocation_id);
899 FOREACH_STRING(xn, "trusted.invocation_id", "user.invocation_id") {
900 if (b)
901 unit_set_xattr_graceful(u, NULL, xn, SD_ID128_TO_STRING(u->invocation_id), 32);
902 else
903 unit_remove_xattr_graceful(u, NULL, xn);
904 }
905
906 /* Indicate on the cgroup whether delegation is on, via an xattr. This is best-effort, as old kernels
907 * didn't support xattrs on cgroups at all. Later they got support for setting 'trusted.*' xattrs,
908 * and even later 'user.*' xattrs. We started setting this field when 'trusted.*' was added, and
909 * given this is now pretty much API, let's continue to support that. But also set 'user.*' as well,
910 * since it is readable by any user, not just CAP_SYS_ADMIN. This hence comes with slightly weaker
911 * security (as users who got delegated cgroups could turn it off if they like), but this shouldn't
912 * be a big problem given this communicates delegation state to clients, but the manager never reads
913 * it. */
914 b = unit_cgroup_delegate(u);
915 FOREACH_STRING(xn, "trusted.delegate", "user.delegate") {
916 if (b)
917 unit_set_xattr_graceful(u, NULL, xn, "1", 1);
918 else
919 unit_remove_xattr_graceful(u, NULL, xn);
920 }
921 }
922
923 static int lookup_block_device(const char *p, dev_t *ret) {
924 dev_t rdev, dev = 0;
925 mode_t mode;
926 int r;
927
928 assert(p);
929 assert(ret);
930
931 r = device_path_parse_major_minor(p, &mode, &rdev);
932 if (r == -ENODEV) { /* not a parsable device node, need to go to disk */
933 struct stat st;
934
935 if (stat(p, &st) < 0)
936 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
937
938 mode = st.st_mode;
939 rdev = st.st_rdev;
940 dev = st.st_dev;
941 } else if (r < 0)
942 return log_warning_errno(r, "Failed to parse major/minor from path '%s': %m", p);
943
944 if (S_ISCHR(mode))
945 return log_warning_errno(SYNTHETIC_ERRNO(ENOTBLK),
946 "Device node '%s' is a character device, but block device needed.", p);
947 if (S_ISBLK(mode))
948 *ret = rdev;
949 else if (major(dev) != 0)
950 *ret = dev; /* If this is not a device node then use the block device this file is stored on */
951 else {
952 /* If this is btrfs, getting the backing block device is a bit harder */
953 r = btrfs_get_block_device(p, ret);
954 if (r == -ENOTTY)
955 return log_warning_errno(SYNTHETIC_ERRNO(ENODEV),
956 "'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
957 if (r < 0)
958 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
959 }
960
961 /* If this is a LUKS/DM device, recursively try to get the originating block device */
962 while (block_get_originating(*ret, ret) > 0);
963
964 /* If this is a partition, try to get the originating block device */
965 (void) block_get_whole_disk(*ret, ret);
966 return 0;
967 }
968
969 static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
970 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
971 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
972 }
973
974 static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
975 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
976 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
977 }
978
979 static bool cgroup_context_has_allowed_cpus(CGroupContext *c) {
980 return c->cpuset_cpus.set || c->startup_cpuset_cpus.set;
981 }
982
983 static bool cgroup_context_has_allowed_mems(CGroupContext *c) {
984 return c->cpuset_mems.set || c->startup_cpuset_mems.set;
985 }
986
987 static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
988 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
989 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
990 return c->startup_cpu_weight;
991 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
992 return c->cpu_weight;
993 else
994 return CGROUP_WEIGHT_DEFAULT;
995 }
996
997 static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
998 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
999 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
1000 return c->startup_cpu_shares;
1001 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
1002 return c->cpu_shares;
1003 else
1004 return CGROUP_CPU_SHARES_DEFAULT;
1005 }
1006
1007 static CPUSet *cgroup_context_allowed_cpus(CGroupContext *c, ManagerState state) {
1008 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
1009 c->startup_cpuset_cpus.set)
1010 return &c->startup_cpuset_cpus;
1011 else
1012 return &c->cpuset_cpus;
1013 }
1014
1015 static CPUSet *cgroup_context_allowed_mems(CGroupContext *c, ManagerState state) {
1016 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
1017 c->startup_cpuset_mems.set)
1018 return &c->startup_cpuset_mems;
1019 else
1020 return &c->cpuset_mems;
1021 }
1022
1023 usec_t cgroup_cpu_adjust_period(usec_t period, usec_t quota, usec_t resolution, usec_t max_period) {
1024 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
1025 * need to be higher than that boundary. quota is specified in USecPerSec.
1026 * Additionally, period must be at most max_period. */
1027 assert(quota > 0);
1028
1029 return MIN(MAX3(period, resolution, resolution * USEC_PER_SEC / quota), max_period);
1030 }
1031
1032 static usec_t cgroup_cpu_adjust_period_and_log(Unit *u, usec_t period, usec_t quota) {
1033 usec_t new_period;
1034
1035 if (quota == USEC_INFINITY)
1036 /* Always use default period for infinity quota. */
1037 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC;
1038
1039 if (period == USEC_INFINITY)
1040 /* Default period was requested. */
1041 period = CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC;
1042
1043 /* Clamp to interval [1ms, 1s] */
1044 new_period = cgroup_cpu_adjust_period(period, quota, USEC_PER_MSEC, USEC_PER_SEC);
1045
1046 if (new_period != period) {
1047 log_unit_full(u, u->warned_clamping_cpu_quota_period ? LOG_DEBUG : LOG_WARNING,
1048 "Clamping CPU interval for cpu.max: period is now %s",
1049 FORMAT_TIMESPAN(new_period, 1));
1050 u->warned_clamping_cpu_quota_period = true;
1051 }
1052
1053 return new_period;
1054 }
1055
1056 static void cgroup_apply_unified_cpu_weight(Unit *u, uint64_t weight) {
1057 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
1058
1059 if (weight == CGROUP_WEIGHT_IDLE)
1060 return;
1061 xsprintf(buf, "%" PRIu64 "\n", weight);
1062 (void) set_attribute_and_warn(u, "cpu", "cpu.weight", buf);
1063 }
1064
1065 static void cgroup_apply_unified_cpu_idle(Unit *u, uint64_t weight) {
1066 int r;
1067 bool is_idle;
1068 const char *idle_val;
1069
1070 is_idle = weight == CGROUP_WEIGHT_IDLE;
1071 idle_val = one_zero(is_idle);
1072 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.idle", idle_val);
1073 if (r < 0 && (r != -ENOENT || is_idle))
1074 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r), r, "Failed to set '%s' attribute on '%s' to '%s': %m",
1075 "cpu.idle", empty_to_root(u->cgroup_path), idle_val);
1076 }
1077
1078 static void cgroup_apply_unified_cpu_quota(Unit *u, usec_t quota, usec_t period) {
1079 char buf[(DECIMAL_STR_MAX(usec_t) + 1) * 2 + 1];
1080
1081 period = cgroup_cpu_adjust_period_and_log(u, period, quota);
1082 if (quota != USEC_INFINITY)
1083 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
1084 MAX(quota * period / USEC_PER_SEC, USEC_PER_MSEC), period);
1085 else
1086 xsprintf(buf, "max " USEC_FMT "\n", period);
1087 (void) set_attribute_and_warn(u, "cpu", "cpu.max", buf);
1088 }
1089
1090 static void cgroup_apply_legacy_cpu_shares(Unit *u, uint64_t shares) {
1091 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
1092
1093 xsprintf(buf, "%" PRIu64 "\n", shares);
1094 (void) set_attribute_and_warn(u, "cpu", "cpu.shares", buf);
1095 }
1096
1097 static void cgroup_apply_legacy_cpu_quota(Unit *u, usec_t quota, usec_t period) {
1098 char buf[DECIMAL_STR_MAX(usec_t) + 2];
1099
1100 period = cgroup_cpu_adjust_period_and_log(u, period, quota);
1101
1102 xsprintf(buf, USEC_FMT "\n", period);
1103 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_period_us", buf);
1104
1105 if (quota != USEC_INFINITY) {
1106 xsprintf(buf, USEC_FMT "\n", MAX(quota * period / USEC_PER_SEC, USEC_PER_MSEC));
1107 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", buf);
1108 } else
1109 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", "-1\n");
1110 }
1111
1112 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
1113 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
1114 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
1115 }
1116
1117 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
1118 /* we don't support idle in cgroupv1 */
1119 if (weight == CGROUP_WEIGHT_IDLE)
1120 return CGROUP_CPU_SHARES_MIN;
1121
1122 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
1123 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
1124 }
1125
1126 static void cgroup_apply_unified_cpuset(Unit *u, const CPUSet *cpus, const char *name) {
1127 _cleanup_free_ char *buf = NULL;
1128
1129 buf = cpu_set_to_range_string(cpus);
1130 if (!buf) {
1131 log_oom();
1132 return;
1133 }
1134
1135 (void) set_attribute_and_warn(u, "cpuset", name, buf);
1136 }
1137
1138 static bool cgroup_context_has_io_config(CGroupContext *c) {
1139 return c->io_accounting ||
1140 c->io_weight != CGROUP_WEIGHT_INVALID ||
1141 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
1142 c->io_device_weights ||
1143 c->io_device_latencies ||
1144 c->io_device_limits;
1145 }
1146
1147 static bool cgroup_context_has_blockio_config(CGroupContext *c) {
1148 return c->blockio_accounting ||
1149 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
1150 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
1151 c->blockio_device_weights ||
1152 c->blockio_device_bandwidths;
1153 }
1154
1155 static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
1156 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
1157 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
1158 return c->startup_io_weight;
1159 if (c->io_weight != CGROUP_WEIGHT_INVALID)
1160 return c->io_weight;
1161 return CGROUP_WEIGHT_DEFAULT;
1162 }
1163
1164 static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
1165 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
1166 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
1167 return c->startup_blockio_weight;
1168 if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
1169 return c->blockio_weight;
1170 return CGROUP_BLKIO_WEIGHT_DEFAULT;
1171 }
1172
1173 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
1174 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
1175 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
1176 }
1177
1178 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
1179 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
1180 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
1181 }
1182
1183 static int set_bfq_weight(Unit *u, const char *controller, dev_t dev, uint64_t io_weight) {
1184 static const char * const prop_names[] = {
1185 "IOWeight",
1186 "BlockIOWeight",
1187 "IODeviceWeight",
1188 "BlockIODeviceWeight",
1189 };
1190 static bool warned = false;
1191 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+STRLEN("\n")];
1192 const char *p;
1193 uint64_t bfq_weight;
1194 int r;
1195
1196 /* FIXME: drop this function when distro kernels properly support BFQ through "io.weight"
1197 * See also: https://github.com/systemd/systemd/pull/13335 and
1198 * https://github.com/torvalds/linux/commit/65752aef0a407e1ef17ec78a7fc31ba4e0b360f9. */
1199 p = strjoina(controller, ".bfq.weight");
1200 /* Adjust to kernel range is 1..1000, the default is 100. */
1201 bfq_weight = BFQ_WEIGHT(io_weight);
1202
1203 if (major(dev) > 0)
1204 xsprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), bfq_weight);
1205 else
1206 xsprintf(buf, "%" PRIu64 "\n", bfq_weight);
1207
1208 r = cg_set_attribute(controller, u->cgroup_path, p, buf);
1209
1210 /* FIXME: drop this when kernels prior
1211 * 795fe54c2a82 ("bfq: Add per-device weight") v5.4
1212 * are not interesting anymore. Old kernels will fail with EINVAL, while new kernels won't return
1213 * EINVAL on properly formatted input by us. Treat EINVAL accordingly. */
1214 if (r == -EINVAL && major(dev) > 0) {
1215 if (!warned) {
1216 log_unit_warning(u, "Kernel version does not accept per-device setting in %s.", p);
1217 warned = true;
1218 }
1219 r = -EOPNOTSUPP; /* mask as unconfigured device */
1220 } else if (r >= 0 && io_weight != bfq_weight)
1221 log_unit_debug(u, "%s=%" PRIu64 " scaled to %s=%" PRIu64,
1222 prop_names[2*(major(dev) > 0) + streq(controller, "blkio")],
1223 io_weight, p, bfq_weight);
1224 return r;
1225 }
1226
1227 static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
1228 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1229 dev_t dev;
1230 int r, r1, r2;
1231
1232 if (lookup_block_device(dev_path, &dev) < 0)
1233 return;
1234
1235 r1 = set_bfq_weight(u, "io", dev, io_weight);
1236
1237 xsprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), io_weight);
1238 r2 = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
1239
1240 /* Look at the configured device, when both fail, prefer io.weight errno. */
1241 r = r2 == -EOPNOTSUPP ? r1 : r2;
1242
1243 if (r < 0)
1244 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r),
1245 r, "Failed to set 'io[.bfq].weight' attribute on '%s' to '%.*s': %m",
1246 empty_to_root(u->cgroup_path), (int) strcspn(buf, NEWLINE), buf);
1247 }
1248
1249 static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
1250 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1251 dev_t dev;
1252 int r;
1253
1254 r = lookup_block_device(dev_path, &dev);
1255 if (r < 0)
1256 return;
1257
1258 xsprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), blkio_weight);
1259 (void) set_attribute_and_warn(u, "blkio", "blkio.weight_device", buf);
1260 }
1261
1262 static void cgroup_apply_io_device_latency(Unit *u, const char *dev_path, usec_t target) {
1263 char buf[DECIMAL_STR_MAX(dev_t)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
1264 dev_t dev;
1265 int r;
1266
1267 r = lookup_block_device(dev_path, &dev);
1268 if (r < 0)
1269 return;
1270
1271 if (target != USEC_INFINITY)
1272 xsprintf(buf, DEVNUM_FORMAT_STR " target=%" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), target);
1273 else
1274 xsprintf(buf, DEVNUM_FORMAT_STR " target=max\n", DEVNUM_FORMAT_VAL(dev));
1275
1276 (void) set_attribute_and_warn(u, "io", "io.latency", buf);
1277 }
1278
1279 static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
1280 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)],
1281 buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
1282 dev_t dev;
1283
1284 if (lookup_block_device(dev_path, &dev) < 0)
1285 return;
1286
1287 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
1288 if (limits[type] != cgroup_io_limit_defaults[type])
1289 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
1290 else
1291 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
1292
1293 xsprintf(buf, DEVNUM_FORMAT_STR " rbps=%s wbps=%s riops=%s wiops=%s\n", DEVNUM_FORMAT_VAL(dev),
1294 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
1295 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
1296 (void) set_attribute_and_warn(u, "io", "io.max", buf);
1297 }
1298
1299 static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
1300 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1301 dev_t dev;
1302
1303 if (lookup_block_device(dev_path, &dev) < 0)
1304 return;
1305
1306 sprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), rbps);
1307 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.read_bps_device", buf);
1308
1309 sprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), wbps);
1310 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.write_bps_device", buf);
1311 }
1312
1313 static bool unit_has_unified_memory_config(Unit *u) {
1314 CGroupContext *c;
1315
1316 assert(u);
1317
1318 assert_se(c = unit_get_cgroup_context(u));
1319
1320 return unit_get_ancestor_memory_min(u) > 0 ||
1321 unit_get_ancestor_memory_low(u) > 0 || unit_get_ancestor_startup_memory_low(u) > 0 ||
1322 c->memory_high != CGROUP_LIMIT_MAX || c->startup_memory_high_set ||
1323 c->memory_max != CGROUP_LIMIT_MAX || c->startup_memory_max_set ||
1324 c->memory_swap_max != CGROUP_LIMIT_MAX || c->startup_memory_swap_max_set ||
1325 c->memory_zswap_max != CGROUP_LIMIT_MAX || c->startup_memory_zswap_max_set;
1326 }
1327
1328 static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
1329 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
1330
1331 if (v != CGROUP_LIMIT_MAX)
1332 xsprintf(buf, "%" PRIu64 "\n", v);
1333
1334 (void) set_attribute_and_warn(u, "memory", file, buf);
1335 }
1336
1337 static void cgroup_apply_firewall(Unit *u) {
1338 assert(u);
1339
1340 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
1341
1342 if (bpf_firewall_compile(u) < 0)
1343 return;
1344
1345 (void) bpf_firewall_load_custom(u);
1346 (void) bpf_firewall_install(u);
1347 }
1348
1349 static void cgroup_apply_socket_bind(Unit *u) {
1350 assert(u);
1351
1352 (void) bpf_socket_bind_install(u);
1353 }
1354
1355 static void cgroup_apply_restrict_network_interfaces(Unit *u) {
1356 assert(u);
1357
1358 (void) restrict_network_interfaces_install(u);
1359 }
1360
1361 static int cgroup_apply_devices(Unit *u) {
1362 _cleanup_(bpf_program_freep) BPFProgram *prog = NULL;
1363 const char *path;
1364 CGroupContext *c;
1365 CGroupDevicePolicy policy;
1366 int r;
1367
1368 assert_se(c = unit_get_cgroup_context(u));
1369 assert_se(path = u->cgroup_path);
1370
1371 policy = c->device_policy;
1372
1373 if (cg_all_unified() > 0) {
1374 r = bpf_devices_cgroup_init(&prog, policy, c->device_allow);
1375 if (r < 0)
1376 return log_unit_warning_errno(u, r, "Failed to initialize device control bpf program: %m");
1377
1378 } else {
1379 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore
1380 * EINVAL here. */
1381
1382 if (c->device_allow || policy != CGROUP_DEVICE_POLICY_AUTO)
1383 r = cg_set_attribute("devices", path, "devices.deny", "a");
1384 else
1385 r = cg_set_attribute("devices", path, "devices.allow", "a");
1386 if (r < 0)
1387 log_unit_full_errno(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING, r,
1388 "Failed to reset devices.allow/devices.deny: %m");
1389 }
1390
1391 bool allow_list_static = policy == CGROUP_DEVICE_POLICY_CLOSED ||
1392 (policy == CGROUP_DEVICE_POLICY_AUTO && c->device_allow);
1393 if (allow_list_static)
1394 (void) bpf_devices_allow_list_static(prog, path);
1395
1396 bool any = allow_list_static;
1397 LIST_FOREACH(device_allow, a, c->device_allow) {
1398 char acc[4], *val;
1399 unsigned k = 0;
1400
1401 if (a->r)
1402 acc[k++] = 'r';
1403 if (a->w)
1404 acc[k++] = 'w';
1405 if (a->m)
1406 acc[k++] = 'm';
1407 if (k == 0)
1408 continue;
1409 acc[k++] = 0;
1410
1411 if (path_startswith(a->path, "/dev/"))
1412 r = bpf_devices_allow_list_device(prog, path, a->path, acc);
1413 else if ((val = startswith(a->path, "block-")))
1414 r = bpf_devices_allow_list_major(prog, path, val, 'b', acc);
1415 else if ((val = startswith(a->path, "char-")))
1416 r = bpf_devices_allow_list_major(prog, path, val, 'c', acc);
1417 else {
1418 log_unit_debug(u, "Ignoring device '%s' while writing cgroup attribute.", a->path);
1419 continue;
1420 }
1421
1422 if (r >= 0)
1423 any = true;
1424 }
1425
1426 if (prog && !any) {
1427 log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENODEV), "No devices matched by device filter.");
1428
1429 /* The kernel verifier would reject a program we would build with the normal intro and outro
1430 but no allow-listing rules (outro would contain an unreachable instruction for successful
1431 return). */
1432 policy = CGROUP_DEVICE_POLICY_STRICT;
1433 }
1434
1435 r = bpf_devices_apply_policy(&prog, policy, any, path, &u->bpf_device_control_installed);
1436 if (r < 0) {
1437 static bool warned = false;
1438
1439 log_full_errno(warned ? LOG_DEBUG : LOG_WARNING, r,
1440 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1441 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1442 "(This warning is only shown for the first loaded unit using device ACL.)", u->id);
1443
1444 warned = true;
1445 }
1446 return r;
1447 }
1448
1449 static void set_io_weight(Unit *u, uint64_t weight) {
1450 char buf[STRLEN("default \n")+DECIMAL_STR_MAX(uint64_t)];
1451
1452 assert(u);
1453
1454 (void) set_bfq_weight(u, "io", makedev(0, 0), weight);
1455
1456 xsprintf(buf, "default %" PRIu64 "\n", weight);
1457 (void) set_attribute_and_warn(u, "io", "io.weight", buf);
1458 }
1459
1460 static void set_blkio_weight(Unit *u, uint64_t weight) {
1461 char buf[STRLEN("\n")+DECIMAL_STR_MAX(uint64_t)];
1462
1463 assert(u);
1464
1465 (void) set_bfq_weight(u, "blkio", makedev(0, 0), weight);
1466
1467 xsprintf(buf, "%" PRIu64 "\n", weight);
1468 (void) set_attribute_and_warn(u, "blkio", "blkio.weight", buf);
1469 }
1470
1471 static void cgroup_apply_bpf_foreign_program(Unit *u) {
1472 assert(u);
1473
1474 (void) bpf_foreign_install(u);
1475 }
1476
1477 static void cgroup_context_apply(
1478 Unit *u,
1479 CGroupMask apply_mask,
1480 ManagerState state) {
1481
1482 const char *path;
1483 CGroupContext *c;
1484 bool is_host_root, is_local_root;
1485 int r;
1486
1487 assert(u);
1488
1489 /* Nothing to do? Exit early! */
1490 if (apply_mask == 0)
1491 return;
1492
1493 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
1494 * attributes should only be managed for cgroups further down the tree. */
1495 is_local_root = unit_has_name(u, SPECIAL_ROOT_SLICE);
1496 is_host_root = unit_has_host_root_cgroup(u);
1497
1498 assert_se(c = unit_get_cgroup_context(u));
1499 assert_se(path = u->cgroup_path);
1500
1501 if (is_local_root) /* Make sure we don't try to display messages with an empty path. */
1502 path = "/";
1503
1504 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
1505 * then), and missing cgroups, i.e. EROFS and ENOENT. */
1506
1507 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
1508 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
1509 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
1510 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
1511 * we couldn't even write to them if we wanted to). */
1512 if ((apply_mask & CGROUP_MASK_CPU) && !is_local_root) {
1513
1514 if (cg_all_unified() > 0) {
1515 uint64_t weight;
1516
1517 if (cgroup_context_has_cpu_weight(c))
1518 weight = cgroup_context_cpu_weight(c, state);
1519 else if (cgroup_context_has_cpu_shares(c)) {
1520 uint64_t shares;
1521
1522 shares = cgroup_context_cpu_shares(c, state);
1523 weight = cgroup_cpu_shares_to_weight(shares);
1524
1525 log_cgroup_compat(u, "Applying [Startup]CPUShares=%" PRIu64 " as [Startup]CPUWeight=%" PRIu64 " on %s",
1526 shares, weight, path);
1527 } else
1528 weight = CGROUP_WEIGHT_DEFAULT;
1529
1530 cgroup_apply_unified_cpu_idle(u, weight);
1531 cgroup_apply_unified_cpu_weight(u, weight);
1532 cgroup_apply_unified_cpu_quota(u, c->cpu_quota_per_sec_usec, c->cpu_quota_period_usec);
1533
1534 } else {
1535 uint64_t shares;
1536
1537 if (cgroup_context_has_cpu_weight(c)) {
1538 uint64_t weight;
1539
1540 weight = cgroup_context_cpu_weight(c, state);
1541 shares = cgroup_cpu_weight_to_shares(weight);
1542
1543 log_cgroup_compat(u, "Applying [Startup]CPUWeight=%" PRIu64 " as [Startup]CPUShares=%" PRIu64 " on %s",
1544 weight, shares, path);
1545 } else if (cgroup_context_has_cpu_shares(c))
1546 shares = cgroup_context_cpu_shares(c, state);
1547 else
1548 shares = CGROUP_CPU_SHARES_DEFAULT;
1549
1550 cgroup_apply_legacy_cpu_shares(u, shares);
1551 cgroup_apply_legacy_cpu_quota(u, c->cpu_quota_per_sec_usec, c->cpu_quota_period_usec);
1552 }
1553 }
1554
1555 if ((apply_mask & CGROUP_MASK_CPUSET) && !is_local_root) {
1556 cgroup_apply_unified_cpuset(u, cgroup_context_allowed_cpus(c, state), "cpuset.cpus");
1557 cgroup_apply_unified_cpuset(u, cgroup_context_allowed_mems(c, state), "cpuset.mems");
1558 }
1559
1560 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
1561 * controller), and in case of containers we want to leave control of these attributes to the container manager
1562 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
1563 if ((apply_mask & CGROUP_MASK_IO) && !is_local_root) {
1564 bool has_io, has_blockio;
1565 uint64_t weight;
1566
1567 has_io = cgroup_context_has_io_config(c);
1568 has_blockio = cgroup_context_has_blockio_config(c);
1569
1570 if (has_io)
1571 weight = cgroup_context_io_weight(c, state);
1572 else if (has_blockio) {
1573 uint64_t blkio_weight;
1574
1575 blkio_weight = cgroup_context_blkio_weight(c, state);
1576 weight = cgroup_weight_blkio_to_io(blkio_weight);
1577
1578 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight=%" PRIu64 " as [Startup]IOWeight=%" PRIu64,
1579 blkio_weight, weight);
1580 } else
1581 weight = CGROUP_WEIGHT_DEFAULT;
1582
1583 set_io_weight(u, weight);
1584
1585 if (has_io) {
1586 LIST_FOREACH(device_weights, w, c->io_device_weights)
1587 cgroup_apply_io_device_weight(u, w->path, w->weight);
1588
1589 LIST_FOREACH(device_limits, limit, c->io_device_limits)
1590 cgroup_apply_io_device_limit(u, limit->path, limit->limits);
1591
1592 LIST_FOREACH(device_latencies, latency, c->io_device_latencies)
1593 cgroup_apply_io_device_latency(u, latency->path, latency->target_usec);
1594
1595 } else if (has_blockio) {
1596 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
1597 weight = cgroup_weight_blkio_to_io(w->weight);
1598
1599 log_cgroup_compat(u, "Applying BlockIODeviceWeight=%" PRIu64 " as IODeviceWeight=%" PRIu64 " for %s",
1600 w->weight, weight, w->path);
1601
1602 cgroup_apply_io_device_weight(u, w->path, weight);
1603 }
1604
1605 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
1606 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
1607
1608 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
1609 limits[type] = cgroup_io_limit_defaults[type];
1610
1611 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
1612 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
1613
1614 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax= for %s",
1615 b->rbps, b->wbps, b->path);
1616
1617 cgroup_apply_io_device_limit(u, b->path, limits);
1618 }
1619 }
1620 }
1621
1622 if (apply_mask & CGROUP_MASK_BLKIO) {
1623 bool has_io, has_blockio;
1624
1625 has_io = cgroup_context_has_io_config(c);
1626 has_blockio = cgroup_context_has_blockio_config(c);
1627
1628 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
1629 * left to our container manager, too. */
1630 if (!is_local_root) {
1631 uint64_t weight;
1632
1633 if (has_io) {
1634 uint64_t io_weight;
1635
1636 io_weight = cgroup_context_io_weight(c, state);
1637 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
1638
1639 log_cgroup_compat(u, "Applying [Startup]IOWeight=%" PRIu64 " as [Startup]BlockIOWeight=%" PRIu64,
1640 io_weight, weight);
1641 } else if (has_blockio)
1642 weight = cgroup_context_blkio_weight(c, state);
1643 else
1644 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
1645
1646 set_blkio_weight(u, weight);
1647
1648 if (has_io)
1649 LIST_FOREACH(device_weights, w, c->io_device_weights) {
1650 weight = cgroup_weight_io_to_blkio(w->weight);
1651
1652 log_cgroup_compat(u, "Applying IODeviceWeight=%" PRIu64 " as BlockIODeviceWeight=%" PRIu64 " for %s",
1653 w->weight, weight, w->path);
1654
1655 cgroup_apply_blkio_device_weight(u, w->path, weight);
1656 }
1657 else if (has_blockio)
1658 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
1659 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
1660 }
1661
1662 /* The bandwidth limits are something that make sense to be applied to the host's root but not container
1663 * roots, as there we want the container manager to handle it */
1664 if (is_host_root || !is_local_root) {
1665 if (has_io)
1666 LIST_FOREACH(device_limits, l, c->io_device_limits) {
1667 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth=%" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax= for %s",
1668 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
1669
1670 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
1671 }
1672 else if (has_blockio)
1673 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
1674 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
1675 }
1676 }
1677
1678 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
1679 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
1680 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
1681 * write to this if we wanted to.) */
1682 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_local_root) {
1683
1684 if (cg_all_unified() > 0) {
1685 uint64_t max, swap_max = CGROUP_LIMIT_MAX, zswap_max = CGROUP_LIMIT_MAX, high = CGROUP_LIMIT_MAX;
1686
1687 if (unit_has_unified_memory_config(u)) {
1688 bool startup = IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING);
1689
1690 high = startup && c->startup_memory_high_set ? c->startup_memory_high : c->memory_high;
1691 max = startup && c->startup_memory_max_set ? c->startup_memory_max : c->memory_max;
1692 swap_max = startup && c->startup_memory_swap_max_set ? c->startup_memory_swap_max : c->memory_swap_max;
1693 zswap_max = startup && c->startup_memory_zswap_max_set ? c->startup_memory_zswap_max : c->memory_zswap_max;
1694 } else {
1695 max = c->memory_limit;
1696
1697 if (max != CGROUP_LIMIT_MAX)
1698 log_cgroup_compat(u, "Applying MemoryLimit=%" PRIu64 " as MemoryMax=", max);
1699 }
1700
1701 cgroup_apply_unified_memory_limit(u, "memory.min", unit_get_ancestor_memory_min(u));
1702 cgroup_apply_unified_memory_limit(u, "memory.low", unit_get_ancestor_memory_low(u));
1703 cgroup_apply_unified_memory_limit(u, "memory.high", high);
1704 cgroup_apply_unified_memory_limit(u, "memory.max", max);
1705 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
1706 cgroup_apply_unified_memory_limit(u, "memory.zswap.max", zswap_max);
1707
1708 (void) set_attribute_and_warn(u, "memory", "memory.oom.group", one_zero(c->memory_oom_group));
1709
1710 } else {
1711 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
1712 uint64_t val;
1713
1714 if (unit_has_unified_memory_config(u)) {
1715 val = c->memory_max;
1716 if (val != CGROUP_LIMIT_MAX)
1717 log_cgroup_compat(u, "Applying MemoryMax=%" PRIu64 " as MemoryLimit=", val);
1718 } else
1719 val = c->memory_limit;
1720
1721 if (val == CGROUP_LIMIT_MAX)
1722 strncpy(buf, "-1\n", sizeof(buf));
1723 else
1724 xsprintf(buf, "%" PRIu64 "\n", val);
1725
1726 (void) set_attribute_and_warn(u, "memory", "memory.limit_in_bytes", buf);
1727 }
1728 }
1729
1730 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
1731 * containers, where we leave this to the manager */
1732 if ((apply_mask & (CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES)) &&
1733 (is_host_root || cg_all_unified() > 0 || !is_local_root))
1734 (void) cgroup_apply_devices(u);
1735
1736 if (apply_mask & CGROUP_MASK_PIDS) {
1737
1738 if (is_host_root) {
1739 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1740 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1741 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1742 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1743 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1744 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1745 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1746 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1747 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1748 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1749 * which is desirable so that there's an official way to release control of the sysctl from
1750 * systemd: set the limit to unbounded and reload. */
1751
1752 if (tasks_max_isset(&c->tasks_max)) {
1753 u->manager->sysctl_pid_max_changed = true;
1754 r = procfs_tasks_set_limit(tasks_max_resolve(&c->tasks_max));
1755 } else if (u->manager->sysctl_pid_max_changed)
1756 r = procfs_tasks_set_limit(TASKS_MAX);
1757 else
1758 r = 0;
1759 if (r < 0)
1760 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r), r,
1761 "Failed to write to tasks limit sysctls: %m");
1762 }
1763
1764 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
1765 * leave it for the container manager. */
1766 if (!is_local_root) {
1767 if (tasks_max_isset(&c->tasks_max)) {
1768 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
1769
1770 xsprintf(buf, "%" PRIu64 "\n", tasks_max_resolve(&c->tasks_max));
1771 (void) set_attribute_and_warn(u, "pids", "pids.max", buf);
1772 } else
1773 (void) set_attribute_and_warn(u, "pids", "pids.max", "max\n");
1774 }
1775 }
1776
1777 if (apply_mask & CGROUP_MASK_BPF_FIREWALL)
1778 cgroup_apply_firewall(u);
1779
1780 if (apply_mask & CGROUP_MASK_BPF_FOREIGN)
1781 cgroup_apply_bpf_foreign_program(u);
1782
1783 if (apply_mask & CGROUP_MASK_BPF_SOCKET_BIND)
1784 cgroup_apply_socket_bind(u);
1785
1786 if (apply_mask & CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES)
1787 cgroup_apply_restrict_network_interfaces(u);
1788 }
1789
1790 static bool unit_get_needs_bpf_firewall(Unit *u) {
1791 CGroupContext *c;
1792 assert(u);
1793
1794 c = unit_get_cgroup_context(u);
1795 if (!c)
1796 return false;
1797
1798 if (c->ip_accounting ||
1799 !set_isempty(c->ip_address_allow) ||
1800 !set_isempty(c->ip_address_deny) ||
1801 c->ip_filters_ingress ||
1802 c->ip_filters_egress)
1803 return true;
1804
1805 /* If any parent slice has an IP access list defined, it applies too */
1806 for (Unit *p = UNIT_GET_SLICE(u); p; p = UNIT_GET_SLICE(p)) {
1807 c = unit_get_cgroup_context(p);
1808 if (!c)
1809 return false;
1810
1811 if (!set_isempty(c->ip_address_allow) ||
1812 !set_isempty(c->ip_address_deny))
1813 return true;
1814 }
1815
1816 return false;
1817 }
1818
1819 static bool unit_get_needs_bpf_foreign_program(Unit *u) {
1820 CGroupContext *c;
1821 assert(u);
1822
1823 c = unit_get_cgroup_context(u);
1824 if (!c)
1825 return false;
1826
1827 return !!c->bpf_foreign_programs;
1828 }
1829
1830 static bool unit_get_needs_socket_bind(Unit *u) {
1831 CGroupContext *c;
1832 assert(u);
1833
1834 c = unit_get_cgroup_context(u);
1835 if (!c)
1836 return false;
1837
1838 return c->socket_bind_allow || c->socket_bind_deny;
1839 }
1840
1841 static bool unit_get_needs_restrict_network_interfaces(Unit *u) {
1842 CGroupContext *c;
1843 assert(u);
1844
1845 c = unit_get_cgroup_context(u);
1846 if (!c)
1847 return false;
1848
1849 return !set_isempty(c->restrict_network_interfaces);
1850 }
1851
1852 static CGroupMask unit_get_cgroup_mask(Unit *u) {
1853 CGroupMask mask = 0;
1854 CGroupContext *c;
1855
1856 assert(u);
1857
1858 assert_se(c = unit_get_cgroup_context(u));
1859
1860 /* Figure out which controllers we need, based on the cgroup context object */
1861
1862 if (c->cpu_accounting)
1863 mask |= get_cpu_accounting_mask();
1864
1865 if (cgroup_context_has_cpu_weight(c) ||
1866 cgroup_context_has_cpu_shares(c) ||
1867 c->cpu_quota_per_sec_usec != USEC_INFINITY)
1868 mask |= CGROUP_MASK_CPU;
1869
1870 if (cgroup_context_has_allowed_cpus(c) || cgroup_context_has_allowed_mems(c))
1871 mask |= CGROUP_MASK_CPUSET;
1872
1873 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1874 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
1875
1876 if (c->memory_accounting ||
1877 c->memory_limit != CGROUP_LIMIT_MAX ||
1878 unit_has_unified_memory_config(u))
1879 mask |= CGROUP_MASK_MEMORY;
1880
1881 if (c->device_allow ||
1882 c->device_policy != CGROUP_DEVICE_POLICY_AUTO)
1883 mask |= CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES;
1884
1885 if (c->tasks_accounting ||
1886 tasks_max_isset(&c->tasks_max))
1887 mask |= CGROUP_MASK_PIDS;
1888
1889 return CGROUP_MASK_EXTEND_JOINED(mask);
1890 }
1891
1892 static CGroupMask unit_get_bpf_mask(Unit *u) {
1893 CGroupMask mask = 0;
1894
1895 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
1896 * too. */
1897
1898 if (unit_get_needs_bpf_firewall(u))
1899 mask |= CGROUP_MASK_BPF_FIREWALL;
1900
1901 if (unit_get_needs_bpf_foreign_program(u))
1902 mask |= CGROUP_MASK_BPF_FOREIGN;
1903
1904 if (unit_get_needs_socket_bind(u))
1905 mask |= CGROUP_MASK_BPF_SOCKET_BIND;
1906
1907 if (unit_get_needs_restrict_network_interfaces(u))
1908 mask |= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES;
1909
1910 return mask;
1911 }
1912
1913 CGroupMask unit_get_own_mask(Unit *u) {
1914 CGroupContext *c;
1915
1916 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
1917 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
1918
1919 if (u->load_state != UNIT_LOADED)
1920 return 0;
1921
1922 c = unit_get_cgroup_context(u);
1923 if (!c)
1924 return 0;
1925
1926 return unit_get_cgroup_mask(u) | unit_get_bpf_mask(u) | unit_get_delegate_mask(u);
1927 }
1928
1929 CGroupMask unit_get_delegate_mask(Unit *u) {
1930 CGroupContext *c;
1931
1932 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1933 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1934 *
1935 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1936
1937 if (!unit_cgroup_delegate(u))
1938 return 0;
1939
1940 if (cg_all_unified() <= 0) {
1941 ExecContext *e;
1942
1943 e = unit_get_exec_context(u);
1944 if (e && !exec_context_maintains_privileges(e))
1945 return 0;
1946 }
1947
1948 assert_se(c = unit_get_cgroup_context(u));
1949 return CGROUP_MASK_EXTEND_JOINED(c->delegate_controllers);
1950 }
1951
1952 static CGroupMask unit_get_subtree_mask(Unit *u) {
1953
1954 /* Returns the mask of this subtree, meaning of the group
1955 * itself and its children. */
1956
1957 return unit_get_own_mask(u) | unit_get_members_mask(u);
1958 }
1959
1960 CGroupMask unit_get_members_mask(Unit *u) {
1961 assert(u);
1962
1963 /* Returns the mask of controllers all of the unit's children require, merged */
1964
1965 if (u->cgroup_members_mask_valid)
1966 return u->cgroup_members_mask; /* Use cached value if possible */
1967
1968 u->cgroup_members_mask = 0;
1969
1970 if (u->type == UNIT_SLICE) {
1971 Unit *member;
1972
1973 UNIT_FOREACH_DEPENDENCY(member, u, UNIT_ATOM_SLICE_OF)
1974 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
1975 }
1976
1977 u->cgroup_members_mask_valid = true;
1978 return u->cgroup_members_mask;
1979 }
1980
1981 CGroupMask unit_get_siblings_mask(Unit *u) {
1982 Unit *slice;
1983 assert(u);
1984
1985 /* Returns the mask of controllers all of the unit's siblings
1986 * require, i.e. the members mask of the unit's parent slice
1987 * if there is one. */
1988
1989 slice = UNIT_GET_SLICE(u);
1990 if (slice)
1991 return unit_get_members_mask(slice);
1992
1993 return unit_get_subtree_mask(u); /* we are the top-level slice */
1994 }
1995
1996 static CGroupMask unit_get_disable_mask(Unit *u) {
1997 CGroupContext *c;
1998
1999 c = unit_get_cgroup_context(u);
2000 if (!c)
2001 return 0;
2002
2003 return c->disable_controllers;
2004 }
2005
2006 CGroupMask unit_get_ancestor_disable_mask(Unit *u) {
2007 CGroupMask mask;
2008 Unit *slice;
2009
2010 assert(u);
2011 mask = unit_get_disable_mask(u);
2012
2013 /* Returns the mask of controllers which are marked as forcibly
2014 * disabled in any ancestor unit or the unit in question. */
2015
2016 slice = UNIT_GET_SLICE(u);
2017 if (slice)
2018 mask |= unit_get_ancestor_disable_mask(slice);
2019
2020 return mask;
2021 }
2022
2023 CGroupMask unit_get_target_mask(Unit *u) {
2024 CGroupMask own_mask, mask;
2025
2026 /* This returns the cgroup mask of all controllers to enable for a specific cgroup, i.e. everything
2027 * it needs itself, plus all that its children need, plus all that its siblings need. This is
2028 * primarily useful on the legacy cgroup hierarchy, where we need to duplicate each cgroup in each
2029 * hierarchy that shall be enabled for it. */
2030
2031 own_mask = unit_get_own_mask(u);
2032
2033 if (own_mask & CGROUP_MASK_BPF_FIREWALL & ~u->manager->cgroup_supported)
2034 emit_bpf_firewall_warning(u);
2035
2036 mask = own_mask | unit_get_members_mask(u) | unit_get_siblings_mask(u);
2037
2038 mask &= u->manager->cgroup_supported;
2039 mask &= ~unit_get_ancestor_disable_mask(u);
2040
2041 return mask;
2042 }
2043
2044 CGroupMask unit_get_enable_mask(Unit *u) {
2045 CGroupMask mask;
2046
2047 /* This returns the cgroup mask of all controllers to enable
2048 * for the children of a specific cgroup. This is primarily
2049 * useful for the unified cgroup hierarchy, where each cgroup
2050 * controls which controllers are enabled for its children. */
2051
2052 mask = unit_get_members_mask(u);
2053 mask &= u->manager->cgroup_supported;
2054 mask &= ~unit_get_ancestor_disable_mask(u);
2055
2056 return mask;
2057 }
2058
2059 void unit_invalidate_cgroup_members_masks(Unit *u) {
2060 Unit *slice;
2061
2062 assert(u);
2063
2064 /* Recurse invalidate the member masks cache all the way up the tree */
2065 u->cgroup_members_mask_valid = false;
2066
2067 slice = UNIT_GET_SLICE(u);
2068 if (slice)
2069 unit_invalidate_cgroup_members_masks(slice);
2070 }
2071
2072 const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
2073
2074 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
2075
2076 while (u) {
2077
2078 if (u->cgroup_path &&
2079 u->cgroup_realized &&
2080 FLAGS_SET(u->cgroup_realized_mask, mask))
2081 return u->cgroup_path;
2082
2083 u = UNIT_GET_SLICE(u);
2084 }
2085
2086 return NULL;
2087 }
2088
2089 static const char *migrate_callback(CGroupMask mask, void *userdata) {
2090 /* If not realized at all, migrate to root ("").
2091 * It may happen if we're upgrading from older version that didn't clean up.
2092 */
2093 return strempty(unit_get_realized_cgroup_path(userdata, mask));
2094 }
2095
2096 int unit_default_cgroup_path(const Unit *u, char **ret) {
2097 _cleanup_free_ char *p = NULL;
2098 int r;
2099
2100 assert(u);
2101 assert(ret);
2102
2103 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2104 p = strdup(u->manager->cgroup_root);
2105 else {
2106 _cleanup_free_ char *escaped = NULL, *slice_path = NULL;
2107 Unit *slice;
2108
2109 slice = UNIT_GET_SLICE(u);
2110 if (slice && !unit_has_name(slice, SPECIAL_ROOT_SLICE)) {
2111 r = cg_slice_to_path(slice->id, &slice_path);
2112 if (r < 0)
2113 return r;
2114 }
2115
2116 r = cg_escape(u->id, &escaped);
2117 if (r < 0)
2118 return r;
2119
2120 p = path_join(empty_to_root(u->manager->cgroup_root), slice_path, escaped);
2121 }
2122 if (!p)
2123 return -ENOMEM;
2124
2125 *ret = TAKE_PTR(p);
2126 return 0;
2127 }
2128
2129 int unit_set_cgroup_path(Unit *u, const char *path) {
2130 _cleanup_free_ char *p = NULL;
2131 int r;
2132
2133 assert(u);
2134
2135 if (streq_ptr(u->cgroup_path, path))
2136 return 0;
2137
2138 if (path) {
2139 p = strdup(path);
2140 if (!p)
2141 return -ENOMEM;
2142 }
2143
2144 if (p) {
2145 r = hashmap_put(u->manager->cgroup_unit, p, u);
2146 if (r < 0)
2147 return r;
2148 }
2149
2150 unit_release_cgroup(u);
2151 u->cgroup_path = TAKE_PTR(p);
2152
2153 return 1;
2154 }
2155
2156 int unit_watch_cgroup(Unit *u) {
2157 _cleanup_free_ char *events = NULL;
2158 int r;
2159
2160 assert(u);
2161
2162 /* Watches the "cgroups.events" attribute of this unit's cgroup for "empty" events, but only if
2163 * cgroupv2 is available. */
2164
2165 if (!u->cgroup_path)
2166 return 0;
2167
2168 if (u->cgroup_control_inotify_wd >= 0)
2169 return 0;
2170
2171 /* Only applies to the unified hierarchy */
2172 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2173 if (r < 0)
2174 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
2175 if (r == 0)
2176 return 0;
2177
2178 /* No point in watch the top-level slice, it's never going to run empty. */
2179 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2180 return 0;
2181
2182 r = hashmap_ensure_allocated(&u->manager->cgroup_control_inotify_wd_unit, &trivial_hash_ops);
2183 if (r < 0)
2184 return log_oom();
2185
2186 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
2187 if (r < 0)
2188 return log_oom();
2189
2190 u->cgroup_control_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
2191 if (u->cgroup_control_inotify_wd < 0) {
2192
2193 if (errno == ENOENT) /* If the directory is already gone we don't need to track it, so this
2194 * is not an error */
2195 return 0;
2196
2197 return log_unit_error_errno(u, errno, "Failed to add control inotify watch descriptor for control group %s: %m", empty_to_root(u->cgroup_path));
2198 }
2199
2200 r = hashmap_put(u->manager->cgroup_control_inotify_wd_unit, INT_TO_PTR(u->cgroup_control_inotify_wd), u);
2201 if (r < 0)
2202 return log_unit_error_errno(u, r, "Failed to add control inotify watch descriptor for control group %s to hash map: %m", empty_to_root(u->cgroup_path));
2203
2204 return 0;
2205 }
2206
2207 int unit_watch_cgroup_memory(Unit *u) {
2208 _cleanup_free_ char *events = NULL;
2209 CGroupContext *c;
2210 int r;
2211
2212 assert(u);
2213
2214 /* Watches the "memory.events" attribute of this unit's cgroup for "oom_kill" events, but only if
2215 * cgroupv2 is available. */
2216
2217 if (!u->cgroup_path)
2218 return 0;
2219
2220 c = unit_get_cgroup_context(u);
2221 if (!c)
2222 return 0;
2223
2224 /* The "memory.events" attribute is only available if the memory controller is on. Let's hence tie
2225 * this to memory accounting, in a way watching for OOM kills is a form of memory accounting after
2226 * all. */
2227 if (!c->memory_accounting)
2228 return 0;
2229
2230 /* Don't watch inner nodes, as the kernel doesn't report oom_kill events recursively currently, and
2231 * we also don't want to generate a log message for each parent cgroup of a process. */
2232 if (u->type == UNIT_SLICE)
2233 return 0;
2234
2235 if (u->cgroup_memory_inotify_wd >= 0)
2236 return 0;
2237
2238 /* Only applies to the unified hierarchy */
2239 r = cg_all_unified();
2240 if (r < 0)
2241 return log_error_errno(r, "Failed to determine whether the memory controller is unified: %m");
2242 if (r == 0)
2243 return 0;
2244
2245 r = hashmap_ensure_allocated(&u->manager->cgroup_memory_inotify_wd_unit, &trivial_hash_ops);
2246 if (r < 0)
2247 return log_oom();
2248
2249 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "memory.events", &events);
2250 if (r < 0)
2251 return log_oom();
2252
2253 u->cgroup_memory_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
2254 if (u->cgroup_memory_inotify_wd < 0) {
2255
2256 if (errno == ENOENT) /* If the directory is already gone we don't need to track it, so this
2257 * is not an error */
2258 return 0;
2259
2260 return log_unit_error_errno(u, errno, "Failed to add memory inotify watch descriptor for control group %s: %m", empty_to_root(u->cgroup_path));
2261 }
2262
2263 r = hashmap_put(u->manager->cgroup_memory_inotify_wd_unit, INT_TO_PTR(u->cgroup_memory_inotify_wd), u);
2264 if (r < 0)
2265 return log_unit_error_errno(u, r, "Failed to add memory inotify watch descriptor for control group %s to hash map: %m", empty_to_root(u->cgroup_path));
2266
2267 return 0;
2268 }
2269
2270 int unit_pick_cgroup_path(Unit *u) {
2271 _cleanup_free_ char *path = NULL;
2272 int r;
2273
2274 assert(u);
2275
2276 if (u->cgroup_path)
2277 return 0;
2278
2279 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2280 return -EINVAL;
2281
2282 r = unit_default_cgroup_path(u, &path);
2283 if (r < 0)
2284 return log_unit_error_errno(u, r, "Failed to generate default cgroup path: %m");
2285
2286 r = unit_set_cgroup_path(u, path);
2287 if (r == -EEXIST)
2288 return log_unit_error_errno(u, r, "Control group %s exists already.", empty_to_root(path));
2289 if (r < 0)
2290 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", empty_to_root(path));
2291
2292 return 0;
2293 }
2294
2295 static int unit_update_cgroup(
2296 Unit *u,
2297 CGroupMask target_mask,
2298 CGroupMask enable_mask,
2299 ManagerState state) {
2300
2301 bool created, is_root_slice;
2302 CGroupMask migrate_mask = 0;
2303 _cleanup_free_ char *cgroup_full_path = NULL;
2304 int r;
2305
2306 assert(u);
2307
2308 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2309 return 0;
2310
2311 /* Figure out our cgroup path */
2312 r = unit_pick_cgroup_path(u);
2313 if (r < 0)
2314 return r;
2315
2316 /* First, create our own group */
2317 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
2318 if (r < 0)
2319 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", empty_to_root(u->cgroup_path));
2320 created = r;
2321
2322 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
2323 uint64_t cgroup_id = 0;
2324
2325 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &cgroup_full_path);
2326 if (r == 0) {
2327 r = cg_path_get_cgroupid(cgroup_full_path, &cgroup_id);
2328 if (r < 0)
2329 log_unit_full_errno(u, ERRNO_IS_NOT_SUPPORTED(r) ? LOG_DEBUG : LOG_WARNING, r,
2330 "Failed to get cgroup ID of cgroup %s, ignoring: %m", cgroup_full_path);
2331 } else
2332 log_unit_warning_errno(u, r, "Failed to get full cgroup path on cgroup %s, ignoring: %m", empty_to_root(u->cgroup_path));
2333
2334 u->cgroup_id = cgroup_id;
2335 }
2336
2337 /* Start watching it */
2338 (void) unit_watch_cgroup(u);
2339 (void) unit_watch_cgroup_memory(u);
2340
2341 /* For v2 we preserve enabled controllers in delegated units, adjust others,
2342 * for v1 we figure out which controller hierarchies need migration. */
2343 if (created || !u->cgroup_realized || !unit_cgroup_delegate(u)) {
2344 CGroupMask result_mask = 0;
2345
2346 /* Enable all controllers we need */
2347 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path, &result_mask);
2348 if (r < 0)
2349 log_unit_warning_errno(u, r, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", empty_to_root(u->cgroup_path));
2350
2351 /* Remember what's actually enabled now */
2352 u->cgroup_enabled_mask = result_mask;
2353
2354 migrate_mask = u->cgroup_realized_mask ^ target_mask;
2355 }
2356
2357 /* Keep track that this is now realized */
2358 u->cgroup_realized = true;
2359 u->cgroup_realized_mask = target_mask;
2360
2361 /* Migrate processes in controller hierarchies both downwards (enabling) and upwards (disabling).
2362 *
2363 * Unnecessary controller cgroups are trimmed (after emptied by upward migration).
2364 * We perform migration also with whole slices for cases when users don't care about leave
2365 * granularity. Since delegated_mask is subset of target mask, we won't trim slice subtree containing
2366 * delegated units.
2367 */
2368 if (cg_all_unified() == 0) {
2369 r = cg_migrate_v1_controllers(u->manager->cgroup_supported, migrate_mask, u->cgroup_path, migrate_callback, u);
2370 if (r < 0)
2371 log_unit_warning_errno(u, r, "Failed to migrate controller cgroups from %s, ignoring: %m", empty_to_root(u->cgroup_path));
2372
2373 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
2374 r = cg_trim_v1_controllers(u->manager->cgroup_supported, ~target_mask, u->cgroup_path, !is_root_slice);
2375 if (r < 0)
2376 log_unit_warning_errno(u, r, "Failed to delete controller cgroups %s, ignoring: %m", empty_to_root(u->cgroup_path));
2377 }
2378
2379 /* Set attributes */
2380 cgroup_context_apply(u, target_mask, state);
2381 cgroup_xattr_apply(u);
2382
2383 /* For most units we expect that memory monitoring is set up before the unit is started and we won't
2384 * touch it after. For PID 1 this is different though, because we couldn't possibly do that given
2385 * that PID 1 runs before init.scope is even set up. Hence, whenever init.scope is realized, let's
2386 * try to open the memory pressure interface anew. */
2387 if (unit_has_name(u, SPECIAL_INIT_SCOPE))
2388 (void) manager_setup_memory_pressure_event_source(u->manager);
2389
2390 return 0;
2391 }
2392
2393 static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
2394 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2395 char *pp;
2396 int r;
2397
2398 assert(u);
2399
2400 if (MANAGER_IS_SYSTEM(u->manager))
2401 return -EINVAL;
2402
2403 if (!u->manager->system_bus)
2404 return -EIO;
2405
2406 if (!u->cgroup_path)
2407 return -EINVAL;
2408
2409 /* Determine this unit's cgroup path relative to our cgroup root */
2410 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
2411 if (!pp)
2412 return -EINVAL;
2413
2414 pp = strjoina("/", pp, suffix_path);
2415 path_simplify(pp);
2416
2417 r = bus_call_method(u->manager->system_bus,
2418 bus_systemd_mgr,
2419 "AttachProcessesToUnit",
2420 &error, NULL,
2421 "ssau",
2422 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
2423 if (r < 0)
2424 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
2425
2426 return 0;
2427 }
2428
2429 int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
2430 _cleanup_free_ char *joined = NULL;
2431 CGroupMask delegated_mask;
2432 const char *p;
2433 void *pidp;
2434 int ret, r;
2435
2436 assert(u);
2437
2438 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2439 return -EINVAL;
2440
2441 if (set_isempty(pids))
2442 return 0;
2443
2444 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
2445 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
2446 r = bpf_firewall_load_custom(u);
2447 if (r < 0)
2448 return r;
2449
2450 r = unit_realize_cgroup(u);
2451 if (r < 0)
2452 return r;
2453
2454 if (isempty(suffix_path))
2455 p = u->cgroup_path;
2456 else {
2457 joined = path_join(u->cgroup_path, suffix_path);
2458 if (!joined)
2459 return -ENOMEM;
2460
2461 p = joined;
2462 }
2463
2464 delegated_mask = unit_get_delegate_mask(u);
2465
2466 ret = 0;
2467 SET_FOREACH(pidp, pids) {
2468 pid_t pid = PTR_TO_PID(pidp);
2469
2470 /* First, attach the PID to the main cgroup hierarchy */
2471 r = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
2472 if (r < 0) {
2473 bool again = MANAGER_IS_USER(u->manager) && ERRNO_IS_PRIVILEGE(r);
2474
2475 log_unit_full_errno(u, again ? LOG_DEBUG : LOG_INFO, r,
2476 "Couldn't move process "PID_FMT" to%s requested cgroup '%s': %m",
2477 pid, again ? " directly" : "", empty_to_root(p));
2478
2479 if (again) {
2480 int z;
2481
2482 /* If we are in a user instance, and we can't move the process ourselves due
2483 * to permission problems, let's ask the system instance about it instead.
2484 * Since it's more privileged it might be able to move the process across the
2485 * leaves of a subtree whose top node is not owned by us. */
2486
2487 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
2488 if (z < 0)
2489 log_unit_info_errno(u, z, "Couldn't move process "PID_FMT" to requested cgroup '%s' (directly or via the system bus): %m", pid, empty_to_root(p));
2490 else {
2491 if (ret >= 0)
2492 ret++; /* Count successful additions */
2493 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
2494 }
2495 }
2496
2497 if (ret >= 0)
2498 ret = r; /* Remember first error */
2499
2500 continue;
2501 } else if (ret >= 0)
2502 ret++; /* Count successful additions */
2503
2504 r = cg_all_unified();
2505 if (r < 0)
2506 return r;
2507 if (r > 0)
2508 continue;
2509
2510 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
2511 * innermost realized one */
2512
2513 for (CGroupController c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2514 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2515 const char *realized;
2516
2517 if (!(u->manager->cgroup_supported & bit))
2518 continue;
2519
2520 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
2521 if (delegated_mask & u->cgroup_realized_mask & bit) {
2522 r = cg_attach(cgroup_controller_to_string(c), p, pid);
2523 if (r >= 0)
2524 continue; /* Success! */
2525
2526 log_unit_debug_errno(u, r, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
2527 pid, empty_to_root(p), cgroup_controller_to_string(c));
2528 }
2529
2530 /* So this controller is either not delegate or realized, or something else weird happened. In
2531 * that case let's attach the PID at least to the closest cgroup up the tree that is
2532 * realized. */
2533 realized = unit_get_realized_cgroup_path(u, bit);
2534 if (!realized)
2535 continue; /* Not even realized in the root slice? Then let's not bother */
2536
2537 r = cg_attach(cgroup_controller_to_string(c), realized, pid);
2538 if (r < 0)
2539 log_unit_debug_errno(u, r, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
2540 pid, realized, cgroup_controller_to_string(c));
2541 }
2542 }
2543
2544 return ret;
2545 }
2546
2547 static bool unit_has_mask_realized(
2548 Unit *u,
2549 CGroupMask target_mask,
2550 CGroupMask enable_mask) {
2551
2552 assert(u);
2553
2554 /* Returns true if this unit is fully realized. We check four things:
2555 *
2556 * 1. Whether the cgroup was created at all
2557 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
2558 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
2559 * 4. Whether the invalidation mask is currently zero
2560 *
2561 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
2562 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
2563 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
2564 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
2565 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
2566 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
2567 * simply don't matter. */
2568
2569 return u->cgroup_realized &&
2570 ((u->cgroup_realized_mask ^ target_mask) & CGROUP_MASK_V1) == 0 &&
2571 ((u->cgroup_enabled_mask ^ enable_mask) & CGROUP_MASK_V2) == 0 &&
2572 u->cgroup_invalidated_mask == 0;
2573 }
2574
2575 static bool unit_has_mask_disables_realized(
2576 Unit *u,
2577 CGroupMask target_mask,
2578 CGroupMask enable_mask) {
2579
2580 assert(u);
2581
2582 /* Returns true if all controllers which should be disabled are indeed disabled.
2583 *
2584 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
2585 * already removed. */
2586
2587 return !u->cgroup_realized ||
2588 (FLAGS_SET(u->cgroup_realized_mask, target_mask & CGROUP_MASK_V1) &&
2589 FLAGS_SET(u->cgroup_enabled_mask, enable_mask & CGROUP_MASK_V2));
2590 }
2591
2592 static bool unit_has_mask_enables_realized(
2593 Unit *u,
2594 CGroupMask target_mask,
2595 CGroupMask enable_mask) {
2596
2597 assert(u);
2598
2599 /* Returns true if all controllers which should be enabled are indeed enabled.
2600 *
2601 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
2602 * we want to add is already added. */
2603
2604 return u->cgroup_realized &&
2605 ((u->cgroup_realized_mask | target_mask) & CGROUP_MASK_V1) == (u->cgroup_realized_mask & CGROUP_MASK_V1) &&
2606 ((u->cgroup_enabled_mask | enable_mask) & CGROUP_MASK_V2) == (u->cgroup_enabled_mask & CGROUP_MASK_V2);
2607 }
2608
2609 void unit_add_to_cgroup_realize_queue(Unit *u) {
2610 assert(u);
2611
2612 if (u->in_cgroup_realize_queue)
2613 return;
2614
2615 LIST_APPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
2616 u->in_cgroup_realize_queue = true;
2617 }
2618
2619 static void unit_remove_from_cgroup_realize_queue(Unit *u) {
2620 assert(u);
2621
2622 if (!u->in_cgroup_realize_queue)
2623 return;
2624
2625 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
2626 u->in_cgroup_realize_queue = false;
2627 }
2628
2629 /* Controllers can only be enabled breadth-first, from the root of the
2630 * hierarchy downwards to the unit in question. */
2631 static int unit_realize_cgroup_now_enable(Unit *u, ManagerState state) {
2632 CGroupMask target_mask, enable_mask, new_target_mask, new_enable_mask;
2633 Unit *slice;
2634 int r;
2635
2636 assert(u);
2637
2638 /* First go deal with this unit's parent, or we won't be able to enable
2639 * any new controllers at this layer. */
2640 slice = UNIT_GET_SLICE(u);
2641 if (slice) {
2642 r = unit_realize_cgroup_now_enable(slice, state);
2643 if (r < 0)
2644 return r;
2645 }
2646
2647 target_mask = unit_get_target_mask(u);
2648 enable_mask = unit_get_enable_mask(u);
2649
2650 /* We can only enable in this direction, don't try to disable anything.
2651 */
2652 if (unit_has_mask_enables_realized(u, target_mask, enable_mask))
2653 return 0;
2654
2655 new_target_mask = u->cgroup_realized_mask | target_mask;
2656 new_enable_mask = u->cgroup_enabled_mask | enable_mask;
2657
2658 return unit_update_cgroup(u, new_target_mask, new_enable_mask, state);
2659 }
2660
2661 /* Controllers can only be disabled depth-first, from the leaves of the
2662 * hierarchy upwards to the unit in question. */
2663 static int unit_realize_cgroup_now_disable(Unit *u, ManagerState state) {
2664 Unit *m;
2665
2666 assert(u);
2667
2668 if (u->type != UNIT_SLICE)
2669 return 0;
2670
2671 UNIT_FOREACH_DEPENDENCY(m, u, UNIT_ATOM_SLICE_OF) {
2672 CGroupMask target_mask, enable_mask, new_target_mask, new_enable_mask;
2673 int r;
2674
2675 /* The cgroup for this unit might not actually be fully realised yet, in which case it isn't
2676 * holding any controllers open anyway. */
2677 if (!m->cgroup_realized)
2678 continue;
2679
2680 /* We must disable those below us first in order to release the controller. */
2681 if (m->type == UNIT_SLICE)
2682 (void) unit_realize_cgroup_now_disable(m, state);
2683
2684 target_mask = unit_get_target_mask(m);
2685 enable_mask = unit_get_enable_mask(m);
2686
2687 /* We can only disable in this direction, don't try to enable anything. */
2688 if (unit_has_mask_disables_realized(m, target_mask, enable_mask))
2689 continue;
2690
2691 new_target_mask = m->cgroup_realized_mask & target_mask;
2692 new_enable_mask = m->cgroup_enabled_mask & enable_mask;
2693
2694 r = unit_update_cgroup(m, new_target_mask, new_enable_mask, state);
2695 if (r < 0)
2696 return r;
2697 }
2698
2699 return 0;
2700 }
2701
2702 /* Check if necessary controllers and attributes for a unit are in place.
2703 *
2704 * - If so, do nothing.
2705 * - If not, create paths, move processes over, and set attributes.
2706 *
2707 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
2708 * a depth-first way. As such the process looks like this:
2709 *
2710 * Suppose we have a cgroup hierarchy which looks like this:
2711 *
2712 * root
2713 * / \
2714 * / \
2715 * / \
2716 * a b
2717 * / \ / \
2718 * / \ / \
2719 * c d e f
2720 * / \ / \ / \ / \
2721 * h i j k l m n o
2722 *
2723 * 1. We want to realise cgroup "d" now.
2724 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
2725 * 3. cgroup "k" just started requesting the memory controller.
2726 *
2727 * To make this work we must do the following in order:
2728 *
2729 * 1. Disable CPU controller in k, j
2730 * 2. Disable CPU controller in d
2731 * 3. Enable memory controller in root
2732 * 4. Enable memory controller in a
2733 * 5. Enable memory controller in d
2734 * 6. Enable memory controller in k
2735 *
2736 * Notice that we need to touch j in one direction, but not the other. We also
2737 * don't go beyond d when disabling -- it's up to "a" to get realized if it
2738 * wants to disable further. The basic rules are therefore:
2739 *
2740 * - If you're disabling something, you need to realise all of the cgroups from
2741 * your recursive descendants to the root. This starts from the leaves.
2742 * - If you're enabling something, you need to realise from the root cgroup
2743 * downwards, but you don't need to iterate your recursive descendants.
2744 *
2745 * Returns 0 on success and < 0 on failure. */
2746 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
2747 CGroupMask target_mask, enable_mask;
2748 Unit *slice;
2749 int r;
2750
2751 assert(u);
2752
2753 unit_remove_from_cgroup_realize_queue(u);
2754
2755 target_mask = unit_get_target_mask(u);
2756 enable_mask = unit_get_enable_mask(u);
2757
2758 if (unit_has_mask_realized(u, target_mask, enable_mask))
2759 return 0;
2760
2761 /* Disable controllers below us, if there are any */
2762 r = unit_realize_cgroup_now_disable(u, state);
2763 if (r < 0)
2764 return r;
2765
2766 /* Enable controllers above us, if there are any */
2767 slice = UNIT_GET_SLICE(u);
2768 if (slice) {
2769 r = unit_realize_cgroup_now_enable(slice, state);
2770 if (r < 0)
2771 return r;
2772 }
2773
2774 /* Now actually deal with the cgroup we were trying to realise and set attributes */
2775 r = unit_update_cgroup(u, target_mask, enable_mask, state);
2776 if (r < 0)
2777 return r;
2778
2779 /* Now, reset the invalidation mask */
2780 u->cgroup_invalidated_mask = 0;
2781 return 0;
2782 }
2783
2784 unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
2785 ManagerState state;
2786 unsigned n = 0;
2787 Unit *i;
2788 int r;
2789
2790 assert(m);
2791
2792 state = manager_state(m);
2793
2794 while ((i = m->cgroup_realize_queue)) {
2795 assert(i->in_cgroup_realize_queue);
2796
2797 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
2798 /* Maybe things changed, and the unit is not actually active anymore? */
2799 unit_remove_from_cgroup_realize_queue(i);
2800 continue;
2801 }
2802
2803 r = unit_realize_cgroup_now(i, state);
2804 if (r < 0)
2805 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
2806
2807 n++;
2808 }
2809
2810 return n;
2811 }
2812
2813 void unit_add_family_to_cgroup_realize_queue(Unit *u) {
2814 assert(u);
2815 assert(u->type == UNIT_SLICE);
2816
2817 /* Family of a unit for is defined as (immediate) children of the unit and immediate children of all
2818 * its ancestors.
2819 *
2820 * Ideally we would enqueue ancestor path only (bottom up). However, on cgroup-v1 scheduling becomes
2821 * very weird if two units that own processes reside in the same slice, but one is realized in the
2822 * "cpu" hierarchy and one is not (for example because one has CPUWeight= set and the other does
2823 * not), because that means individual processes need to be scheduled against whole cgroups. Let's
2824 * avoid this asymmetry by always ensuring that siblings of a unit are always realized in their v1
2825 * controller hierarchies too (if unit requires the controller to be realized).
2826 *
2827 * The function must invalidate cgroup_members_mask of all ancestors in order to calculate up to date
2828 * masks. */
2829
2830 do {
2831 Unit *m;
2832
2833 /* Children of u likely changed when we're called */
2834 u->cgroup_members_mask_valid = false;
2835
2836 UNIT_FOREACH_DEPENDENCY(m, u, UNIT_ATOM_SLICE_OF) {
2837
2838 /* No point in doing cgroup application for units without active processes. */
2839 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
2840 continue;
2841
2842 /* We only enqueue siblings if they were realized once at least, in the main
2843 * hierarchy. */
2844 if (!m->cgroup_realized)
2845 continue;
2846
2847 /* If the unit doesn't need any new controllers and has current ones
2848 * realized, it doesn't need any changes. */
2849 if (unit_has_mask_realized(m,
2850 unit_get_target_mask(m),
2851 unit_get_enable_mask(m)))
2852 continue;
2853
2854 unit_add_to_cgroup_realize_queue(m);
2855 }
2856
2857 /* Parent comes after children */
2858 unit_add_to_cgroup_realize_queue(u);
2859
2860 u = UNIT_GET_SLICE(u);
2861 } while (u);
2862 }
2863
2864 int unit_realize_cgroup(Unit *u) {
2865 Unit *slice;
2866
2867 assert(u);
2868
2869 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2870 return 0;
2871
2872 /* So, here's the deal: when realizing the cgroups for this unit, we need to first create all
2873 * parents, but there's more actually: for the weight-based controllers we also need to make sure
2874 * that all our siblings (i.e. units that are in the same slice as we are) have cgroups, too. On the
2875 * other hand, when a controller is removed from realized set, it may become unnecessary in siblings
2876 * and ancestors and they should be (de)realized too.
2877 *
2878 * This call will defer work on the siblings and derealized ancestors to the next event loop
2879 * iteration and synchronously creates the parent cgroups (unit_realize_cgroup_now). */
2880
2881 slice = UNIT_GET_SLICE(u);
2882 if (slice)
2883 unit_add_family_to_cgroup_realize_queue(slice);
2884
2885 /* And realize this one now (and apply the values) */
2886 return unit_realize_cgroup_now(u, manager_state(u->manager));
2887 }
2888
2889 void unit_release_cgroup(Unit *u) {
2890 assert(u);
2891
2892 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
2893 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
2894
2895 if (u->cgroup_path) {
2896 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
2897 u->cgroup_path = mfree(u->cgroup_path);
2898 }
2899
2900 if (u->cgroup_control_inotify_wd >= 0) {
2901 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_control_inotify_wd) < 0)
2902 log_unit_debug_errno(u, errno, "Failed to remove cgroup control inotify watch %i for %s, ignoring: %m", u->cgroup_control_inotify_wd, u->id);
2903
2904 (void) hashmap_remove(u->manager->cgroup_control_inotify_wd_unit, INT_TO_PTR(u->cgroup_control_inotify_wd));
2905 u->cgroup_control_inotify_wd = -1;
2906 }
2907
2908 if (u->cgroup_memory_inotify_wd >= 0) {
2909 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_memory_inotify_wd) < 0)
2910 log_unit_debug_errno(u, errno, "Failed to remove cgroup memory inotify watch %i for %s, ignoring: %m", u->cgroup_memory_inotify_wd, u->id);
2911
2912 (void) hashmap_remove(u->manager->cgroup_memory_inotify_wd_unit, INT_TO_PTR(u->cgroup_memory_inotify_wd));
2913 u->cgroup_memory_inotify_wd = -1;
2914 }
2915 }
2916
2917 bool unit_maybe_release_cgroup(Unit *u) {
2918 int r;
2919
2920 assert(u);
2921
2922 if (!u->cgroup_path)
2923 return true;
2924
2925 /* Don't release the cgroup if there are still processes under it. If we get notified later when all the
2926 * processes exit (e.g. the processes were in D-state and exited after the unit was marked as failed)
2927 * we need the cgroup paths to continue to be tracked by the manager so they can be looked up and cleaned
2928 * up later. */
2929 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2930 if (r < 0)
2931 log_unit_debug_errno(u, r, "Error checking if the cgroup is recursively empty, ignoring: %m");
2932 else if (r == 1) {
2933 unit_release_cgroup(u);
2934 return true;
2935 }
2936
2937 return false;
2938 }
2939
2940 void unit_prune_cgroup(Unit *u) {
2941 int r;
2942 bool is_root_slice;
2943
2944 assert(u);
2945
2946 /* Removes the cgroup, if empty and possible, and stops watching it. */
2947
2948 if (!u->cgroup_path)
2949 return;
2950
2951 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
2952
2953 #if BPF_FRAMEWORK
2954 (void) lsm_bpf_cleanup(u); /* Remove cgroup from the global LSM BPF map */
2955 #endif
2956
2957 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
2958
2959 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
2960 if (r < 0)
2961 /* One reason we could have failed here is, that the cgroup still contains a process.
2962 * However, if the cgroup becomes removable at a later time, it might be removed when
2963 * the containing slice is stopped. So even if we failed now, this unit shouldn't assume
2964 * that the cgroup is still realized the next time it is started. Do not return early
2965 * on error, continue cleanup. */
2966 log_unit_full_errno(u, r == -EBUSY ? LOG_DEBUG : LOG_WARNING, r, "Failed to destroy cgroup %s, ignoring: %m", empty_to_root(u->cgroup_path));
2967
2968 if (is_root_slice)
2969 return;
2970
2971 if (!unit_maybe_release_cgroup(u)) /* Returns true if the cgroup was released */
2972 return;
2973
2974 u->cgroup_realized = false;
2975 u->cgroup_realized_mask = 0;
2976 u->cgroup_enabled_mask = 0;
2977
2978 u->bpf_device_control_installed = bpf_program_free(u->bpf_device_control_installed);
2979 }
2980
2981 int unit_search_main_pid(Unit *u, pid_t *ret) {
2982 _cleanup_fclose_ FILE *f = NULL;
2983 pid_t pid = 0, npid;
2984 int r;
2985
2986 assert(u);
2987 assert(ret);
2988
2989 if (!u->cgroup_path)
2990 return -ENXIO;
2991
2992 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
2993 if (r < 0)
2994 return r;
2995
2996 while (cg_read_pid(f, &npid) > 0) {
2997
2998 if (npid == pid)
2999 continue;
3000
3001 if (pid_is_my_child(npid) == 0)
3002 continue;
3003
3004 if (pid != 0)
3005 /* Dang, there's more than one daemonized PID
3006 in this group, so we don't know what process
3007 is the main process. */
3008
3009 return -ENODATA;
3010
3011 pid = npid;
3012 }
3013
3014 *ret = pid;
3015 return 0;
3016 }
3017
3018 static int unit_watch_pids_in_path(Unit *u, const char *path) {
3019 _cleanup_closedir_ DIR *d = NULL;
3020 _cleanup_fclose_ FILE *f = NULL;
3021 int ret = 0, r;
3022
3023 assert(u);
3024 assert(path);
3025
3026 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
3027 if (r < 0)
3028 ret = r;
3029 else {
3030 pid_t pid;
3031
3032 while ((r = cg_read_pid(f, &pid)) > 0) {
3033 r = unit_watch_pid(u, pid, false);
3034 if (r < 0 && ret >= 0)
3035 ret = r;
3036 }
3037
3038 if (r < 0 && ret >= 0)
3039 ret = r;
3040 }
3041
3042 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
3043 if (r < 0) {
3044 if (ret >= 0)
3045 ret = r;
3046 } else {
3047 char *fn;
3048
3049 while ((r = cg_read_subgroup(d, &fn)) > 0) {
3050 _cleanup_free_ char *p = NULL;
3051
3052 p = path_join(empty_to_root(path), fn);
3053 free(fn);
3054
3055 if (!p)
3056 return -ENOMEM;
3057
3058 r = unit_watch_pids_in_path(u, p);
3059 if (r < 0 && ret >= 0)
3060 ret = r;
3061 }
3062
3063 if (r < 0 && ret >= 0)
3064 ret = r;
3065 }
3066
3067 return ret;
3068 }
3069
3070 int unit_synthesize_cgroup_empty_event(Unit *u) {
3071 int r;
3072
3073 assert(u);
3074
3075 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
3076 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
3077 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
3078
3079 if (!u->cgroup_path)
3080 return -ENOENT;
3081
3082 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3083 if (r < 0)
3084 return r;
3085 if (r > 0) /* On unified we have reliable notifications, and don't need this */
3086 return 0;
3087
3088 if (!set_isempty(u->pids))
3089 return 0;
3090
3091 unit_add_to_cgroup_empty_queue(u);
3092 return 0;
3093 }
3094
3095 int unit_watch_all_pids(Unit *u) {
3096 int r;
3097
3098 assert(u);
3099
3100 /* Adds all PIDs from our cgroup to the set of PIDs we
3101 * watch. This is a fallback logic for cases where we do not
3102 * get reliable cgroup empty notifications: we try to use
3103 * SIGCHLD as replacement. */
3104
3105 if (!u->cgroup_path)
3106 return -ENOENT;
3107
3108 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3109 if (r < 0)
3110 return r;
3111 if (r > 0) /* On unified we can use proper notifications */
3112 return 0;
3113
3114 return unit_watch_pids_in_path(u, u->cgroup_path);
3115 }
3116
3117 static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
3118 Manager *m = ASSERT_PTR(userdata);
3119 Unit *u;
3120 int r;
3121
3122 assert(s);
3123
3124 u = m->cgroup_empty_queue;
3125 if (!u)
3126 return 0;
3127
3128 assert(u->in_cgroup_empty_queue);
3129 u->in_cgroup_empty_queue = false;
3130 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
3131
3132 if (m->cgroup_empty_queue) {
3133 /* More stuff queued, let's make sure we remain enabled */
3134 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
3135 if (r < 0)
3136 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
3137 }
3138
3139 /* Update state based on OOM kills before we notify about cgroup empty event */
3140 (void) unit_check_oom(u);
3141 (void) unit_check_oomd_kill(u);
3142
3143 unit_add_to_gc_queue(u);
3144
3145 if (UNIT_VTABLE(u)->notify_cgroup_empty)
3146 UNIT_VTABLE(u)->notify_cgroup_empty(u);
3147
3148 return 0;
3149 }
3150
3151 void unit_add_to_cgroup_empty_queue(Unit *u) {
3152 int r;
3153
3154 assert(u);
3155
3156 /* Note that there are four different ways how cgroup empty events reach us:
3157 *
3158 * 1. On the unified hierarchy we get an inotify event on the cgroup
3159 *
3160 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
3161 *
3162 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
3163 *
3164 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
3165 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
3166 *
3167 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
3168 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
3169 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
3170 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
3171 * case for scope units). */
3172
3173 if (u->in_cgroup_empty_queue)
3174 return;
3175
3176 /* Let's verify that the cgroup is really empty */
3177 if (!u->cgroup_path)
3178 return;
3179
3180 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
3181 if (r < 0) {
3182 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
3183 return;
3184 }
3185 if (r == 0)
3186 return;
3187
3188 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
3189 u->in_cgroup_empty_queue = true;
3190
3191 /* Trigger the defer event */
3192 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
3193 if (r < 0)
3194 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
3195 }
3196
3197 static void unit_remove_from_cgroup_empty_queue(Unit *u) {
3198 assert(u);
3199
3200 if (!u->in_cgroup_empty_queue)
3201 return;
3202
3203 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
3204 u->in_cgroup_empty_queue = false;
3205 }
3206
3207 int unit_check_oomd_kill(Unit *u) {
3208 _cleanup_free_ char *value = NULL;
3209 bool increased;
3210 uint64_t n = 0;
3211 int r;
3212
3213 if (!u->cgroup_path)
3214 return 0;
3215
3216 r = cg_all_unified();
3217 if (r < 0)
3218 return log_unit_debug_errno(u, r, "Couldn't determine whether we are in all unified mode: %m");
3219 else if (r == 0)
3220 return 0;
3221
3222 r = cg_get_xattr_malloc(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "user.oomd_ooms", &value);
3223 if (r < 0 && !ERRNO_IS_XATTR_ABSENT(r))
3224 return r;
3225
3226 if (!isempty(value)) {
3227 r = safe_atou64(value, &n);
3228 if (r < 0)
3229 return r;
3230 }
3231
3232 increased = n > u->managed_oom_kill_last;
3233 u->managed_oom_kill_last = n;
3234
3235 if (!increased)
3236 return 0;
3237
3238 n = 0;
3239 value = mfree(value);
3240 r = cg_get_xattr_malloc(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "user.oomd_kill", &value);
3241 if (r >= 0 && !isempty(value))
3242 (void) safe_atou64(value, &n);
3243
3244 if (n > 0)
3245 log_unit_struct(u, LOG_NOTICE,
3246 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR,
3247 LOG_UNIT_INVOCATION_ID(u),
3248 LOG_UNIT_MESSAGE(u, "systemd-oomd killed %"PRIu64" process(es) in this unit.", n),
3249 "N_PROCESSES=%" PRIu64, n);
3250 else
3251 log_unit_struct(u, LOG_NOTICE,
3252 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR,
3253 LOG_UNIT_INVOCATION_ID(u),
3254 LOG_UNIT_MESSAGE(u, "systemd-oomd killed some process(es) in this unit."));
3255
3256 unit_notify_cgroup_oom(u, /* ManagedOOM= */ true);
3257
3258 return 1;
3259 }
3260
3261 int unit_check_oom(Unit *u) {
3262 _cleanup_free_ char *oom_kill = NULL;
3263 bool increased;
3264 uint64_t c;
3265 int r;
3266
3267 if (!u->cgroup_path)
3268 return 0;
3269
3270 r = cg_get_keyed_attribute("memory", u->cgroup_path, "memory.events", STRV_MAKE("oom_kill"), &oom_kill);
3271 if (IN_SET(r, -ENOENT, -ENXIO)) /* Handle gracefully if cgroup or oom_kill attribute don't exist */
3272 c = 0;
3273 else if (r < 0)
3274 return log_unit_debug_errno(u, r, "Failed to read oom_kill field of memory.events cgroup attribute: %m");
3275 else {
3276 r = safe_atou64(oom_kill, &c);
3277 if (r < 0)
3278 return log_unit_debug_errno(u, r, "Failed to parse oom_kill field: %m");
3279 }
3280
3281 increased = c > u->oom_kill_last;
3282 u->oom_kill_last = c;
3283
3284 if (!increased)
3285 return 0;
3286
3287 log_unit_struct(u, LOG_NOTICE,
3288 "MESSAGE_ID=" SD_MESSAGE_UNIT_OUT_OF_MEMORY_STR,
3289 LOG_UNIT_INVOCATION_ID(u),
3290 LOG_UNIT_MESSAGE(u, "A process of this unit has been killed by the OOM killer."));
3291
3292 unit_notify_cgroup_oom(u, /* ManagedOOM= */ false);
3293
3294 return 1;
3295 }
3296
3297 static int on_cgroup_oom_event(sd_event_source *s, void *userdata) {
3298 Manager *m = ASSERT_PTR(userdata);
3299 Unit *u;
3300 int r;
3301
3302 assert(s);
3303
3304 u = m->cgroup_oom_queue;
3305 if (!u)
3306 return 0;
3307
3308 assert(u->in_cgroup_oom_queue);
3309 u->in_cgroup_oom_queue = false;
3310 LIST_REMOVE(cgroup_oom_queue, m->cgroup_oom_queue, u);
3311
3312 if (m->cgroup_oom_queue) {
3313 /* More stuff queued, let's make sure we remain enabled */
3314 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
3315 if (r < 0)
3316 log_debug_errno(r, "Failed to reenable cgroup oom event source, ignoring: %m");
3317 }
3318
3319 (void) unit_check_oom(u);
3320 return 0;
3321 }
3322
3323 static void unit_add_to_cgroup_oom_queue(Unit *u) {
3324 int r;
3325
3326 assert(u);
3327
3328 if (u->in_cgroup_oom_queue)
3329 return;
3330 if (!u->cgroup_path)
3331 return;
3332
3333 LIST_PREPEND(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
3334 u->in_cgroup_oom_queue = true;
3335
3336 /* Trigger the defer event */
3337 if (!u->manager->cgroup_oom_event_source) {
3338 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
3339
3340 r = sd_event_add_defer(u->manager->event, &s, on_cgroup_oom_event, u->manager);
3341 if (r < 0) {
3342 log_error_errno(r, "Failed to create cgroup oom event source: %m");
3343 return;
3344 }
3345
3346 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_NORMAL-8);
3347 if (r < 0) {
3348 log_error_errno(r, "Failed to set priority of cgroup oom event source: %m");
3349 return;
3350 }
3351
3352 (void) sd_event_source_set_description(s, "cgroup-oom");
3353 u->manager->cgroup_oom_event_source = TAKE_PTR(s);
3354 }
3355
3356 r = sd_event_source_set_enabled(u->manager->cgroup_oom_event_source, SD_EVENT_ONESHOT);
3357 if (r < 0)
3358 log_error_errno(r, "Failed to enable cgroup oom event source: %m");
3359 }
3360
3361 static int unit_check_cgroup_events(Unit *u) {
3362 char *values[2] = {};
3363 int r;
3364
3365 assert(u);
3366
3367 if (!u->cgroup_path)
3368 return 0;
3369
3370 r = cg_get_keyed_attribute_graceful(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
3371 STRV_MAKE("populated", "frozen"), values);
3372 if (r < 0)
3373 return r;
3374
3375 /* The cgroup.events notifications can be merged together so act as we saw the given state for the
3376 * first time. The functions we call to handle given state are idempotent, which makes them
3377 * effectively remember the previous state. */
3378 if (values[0]) {
3379 if (streq(values[0], "1"))
3380 unit_remove_from_cgroup_empty_queue(u);
3381 else
3382 unit_add_to_cgroup_empty_queue(u);
3383 }
3384
3385 /* Disregard freezer state changes due to operations not initiated by us */
3386 if (values[1] && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING)) {
3387 if (streq(values[1], "0"))
3388 unit_thawed(u);
3389 else
3390 unit_frozen(u);
3391 }
3392
3393 free(values[0]);
3394 free(values[1]);
3395
3396 return 0;
3397 }
3398
3399 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
3400 Manager *m = ASSERT_PTR(userdata);
3401
3402 assert(s);
3403 assert(fd >= 0);
3404
3405 for (;;) {
3406 union inotify_event_buffer buffer;
3407 ssize_t l;
3408
3409 l = read(fd, &buffer, sizeof(buffer));
3410 if (l < 0) {
3411 if (ERRNO_IS_TRANSIENT(errno))
3412 return 0;
3413
3414 return log_error_errno(errno, "Failed to read control group inotify events: %m");
3415 }
3416
3417 FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
3418 Unit *u;
3419
3420 if (e->wd < 0)
3421 /* Queue overflow has no watch descriptor */
3422 continue;
3423
3424 if (e->mask & IN_IGNORED)
3425 /* The watch was just removed */
3426 continue;
3427
3428 /* Note that inotify might deliver events for a watch even after it was removed,
3429 * because it was queued before the removal. Let's ignore this here safely. */
3430
3431 u = hashmap_get(m->cgroup_control_inotify_wd_unit, INT_TO_PTR(e->wd));
3432 if (u)
3433 unit_check_cgroup_events(u);
3434
3435 u = hashmap_get(m->cgroup_memory_inotify_wd_unit, INT_TO_PTR(e->wd));
3436 if (u)
3437 unit_add_to_cgroup_oom_queue(u);
3438 }
3439 }
3440 }
3441
3442 static int cg_bpf_mask_supported(CGroupMask *ret) {
3443 CGroupMask mask = 0;
3444 int r;
3445
3446 /* BPF-based firewall */
3447 r = bpf_firewall_supported();
3448 if (r < 0)
3449 return r;
3450 if (r > 0)
3451 mask |= CGROUP_MASK_BPF_FIREWALL;
3452
3453 /* BPF-based device access control */
3454 r = bpf_devices_supported();
3455 if (r < 0)
3456 return r;
3457 if (r > 0)
3458 mask |= CGROUP_MASK_BPF_DEVICES;
3459
3460 /* BPF pinned prog */
3461 r = bpf_foreign_supported();
3462 if (r < 0)
3463 return r;
3464 if (r > 0)
3465 mask |= CGROUP_MASK_BPF_FOREIGN;
3466
3467 /* BPF-based bind{4|6} hooks */
3468 r = bpf_socket_bind_supported();
3469 if (r < 0)
3470 return r;
3471 if (r > 0)
3472 mask |= CGROUP_MASK_BPF_SOCKET_BIND;
3473
3474 /* BPF-based cgroup_skb/{egress|ingress} hooks */
3475 r = restrict_network_interfaces_supported();
3476 if (r < 0)
3477 return r;
3478 if (r > 0)
3479 mask |= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES;
3480
3481 *ret = mask;
3482 return 0;
3483 }
3484
3485 int manager_setup_cgroup(Manager *m) {
3486 _cleanup_free_ char *path = NULL;
3487 const char *scope_path;
3488 int r, all_unified;
3489 CGroupMask mask;
3490 char *e;
3491
3492 assert(m);
3493
3494 /* 1. Determine hierarchy */
3495 m->cgroup_root = mfree(m->cgroup_root);
3496 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
3497 if (r < 0)
3498 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
3499
3500 /* Chop off the init scope, if we are already located in it */
3501 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
3502
3503 /* LEGACY: Also chop off the system slice if we are in
3504 * it. This is to support live upgrades from older systemd
3505 * versions where PID 1 was moved there. Also see
3506 * cg_get_root_path(). */
3507 if (!e && MANAGER_IS_SYSTEM(m)) {
3508 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
3509 if (!e)
3510 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
3511 }
3512 if (e)
3513 *e = 0;
3514
3515 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
3516 * easily prepend it everywhere. */
3517 delete_trailing_chars(m->cgroup_root, "/");
3518
3519 /* 2. Show data */
3520 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
3521 if (r < 0)
3522 return log_error_errno(r, "Cannot find cgroup mount point: %m");
3523
3524 r = cg_unified();
3525 if (r < 0)
3526 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
3527
3528 all_unified = cg_all_unified();
3529 if (all_unified < 0)
3530 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
3531 if (all_unified > 0)
3532 log_debug("Unified cgroup hierarchy is located at %s.", path);
3533 else {
3534 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3535 if (r < 0)
3536 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
3537 if (r > 0)
3538 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
3539 else
3540 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
3541 }
3542
3543 /* 3. Allocate cgroup empty defer event source */
3544 m->cgroup_empty_event_source = sd_event_source_disable_unref(m->cgroup_empty_event_source);
3545 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
3546 if (r < 0)
3547 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
3548
3549 /* Schedule cgroup empty checks early, but after having processed service notification messages or
3550 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
3551 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
3552 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
3553 if (r < 0)
3554 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
3555
3556 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
3557 if (r < 0)
3558 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
3559
3560 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
3561
3562 /* 4. Install notifier inotify object, or agent */
3563 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
3564
3565 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
3566
3567 m->cgroup_inotify_event_source = sd_event_source_disable_unref(m->cgroup_inotify_event_source);
3568 safe_close(m->cgroup_inotify_fd);
3569
3570 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
3571 if (m->cgroup_inotify_fd < 0)
3572 return log_error_errno(errno, "Failed to create control group inotify object: %m");
3573
3574 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
3575 if (r < 0)
3576 return log_error_errno(r, "Failed to watch control group inotify object: %m");
3577
3578 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
3579 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
3580 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
3581 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-9);
3582 if (r < 0)
3583 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
3584
3585 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
3586
3587 } else if (MANAGER_IS_SYSTEM(m) && manager_owns_host_root_cgroup(m) && !MANAGER_IS_TEST_RUN(m)) {
3588
3589 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
3590 * since it does not generate events when control groups with children run empty. */
3591
3592 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUPS_AGENT_PATH);
3593 if (r < 0)
3594 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
3595 else if (r > 0)
3596 log_debug("Installed release agent.");
3597 else if (r == 0)
3598 log_debug("Release agent already installed.");
3599 }
3600
3601 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
3602 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
3603 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
3604 if (r >= 0) {
3605 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
3606 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
3607 if (r < 0)
3608 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
3609
3610 /* 6. And pin it, so that it cannot be unmounted */
3611 safe_close(m->pin_cgroupfs_fd);
3612 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
3613 if (m->pin_cgroupfs_fd < 0)
3614 return log_error_errno(errno, "Failed to open pin file: %m");
3615
3616 } else if (!MANAGER_IS_TEST_RUN(m))
3617 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
3618
3619 /* 7. Always enable hierarchical support if it exists... */
3620 if (!all_unified && !MANAGER_IS_TEST_RUN(m))
3621 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
3622
3623 /* 8. Figure out which controllers are supported */
3624 r = cg_mask_supported_subtree(m->cgroup_root, &m->cgroup_supported);
3625 if (r < 0)
3626 return log_error_errno(r, "Failed to determine supported controllers: %m");
3627
3628 /* 9. Figure out which bpf-based pseudo-controllers are supported */
3629 r = cg_bpf_mask_supported(&mask);
3630 if (r < 0)
3631 return log_error_errno(r, "Failed to determine supported bpf-based pseudo-controllers: %m");
3632 m->cgroup_supported |= mask;
3633
3634 /* 10. Log which controllers are supported */
3635 for (CGroupController c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
3636 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c),
3637 yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
3638
3639 return 0;
3640 }
3641
3642 void manager_shutdown_cgroup(Manager *m, bool delete) {
3643 assert(m);
3644
3645 /* We can't really delete the group, since we are in it. But
3646 * let's trim it. */
3647 if (delete && m->cgroup_root && !FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL))
3648 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
3649
3650 m->cgroup_empty_event_source = sd_event_source_disable_unref(m->cgroup_empty_event_source);
3651
3652 m->cgroup_control_inotify_wd_unit = hashmap_free(m->cgroup_control_inotify_wd_unit);
3653 m->cgroup_memory_inotify_wd_unit = hashmap_free(m->cgroup_memory_inotify_wd_unit);
3654
3655 m->cgroup_inotify_event_source = sd_event_source_disable_unref(m->cgroup_inotify_event_source);
3656 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
3657
3658 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
3659
3660 m->cgroup_root = mfree(m->cgroup_root);
3661 }
3662
3663 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
3664 char *p;
3665 Unit *u;
3666
3667 assert(m);
3668 assert(cgroup);
3669
3670 u = hashmap_get(m->cgroup_unit, cgroup);
3671 if (u)
3672 return u;
3673
3674 p = strdupa_safe(cgroup);
3675 for (;;) {
3676 char *e;
3677
3678 e = strrchr(p, '/');
3679 if (!e || e == p)
3680 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
3681
3682 *e = 0;
3683
3684 u = hashmap_get(m->cgroup_unit, p);
3685 if (u)
3686 return u;
3687 }
3688 }
3689
3690 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
3691 _cleanup_free_ char *cgroup = NULL;
3692
3693 assert(m);
3694
3695 if (!pid_is_valid(pid))
3696 return NULL;
3697
3698 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
3699 return NULL;
3700
3701 return manager_get_unit_by_cgroup(m, cgroup);
3702 }
3703
3704 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
3705 Unit *u, **array;
3706
3707 assert(m);
3708
3709 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
3710 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
3711 * relevant one as children of the process will be assigned to that one, too, before all else. */
3712
3713 if (!pid_is_valid(pid))
3714 return NULL;
3715
3716 if (pid == getpid_cached())
3717 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
3718
3719 u = manager_get_unit_by_pid_cgroup(m, pid);
3720 if (u)
3721 return u;
3722
3723 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
3724 if (u)
3725 return u;
3726
3727 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
3728 if (array)
3729 return array[0];
3730
3731 return NULL;
3732 }
3733
3734 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
3735 Unit *u;
3736
3737 assert(m);
3738 assert(cgroup);
3739
3740 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
3741 * or from the --system instance */
3742
3743 log_debug("Got cgroup empty notification for: %s", cgroup);
3744
3745 u = manager_get_unit_by_cgroup(m, cgroup);
3746 if (!u)
3747 return 0;
3748
3749 unit_add_to_cgroup_empty_queue(u);
3750 return 1;
3751 }
3752
3753 int unit_get_memory_available(Unit *u, uint64_t *ret) {
3754 uint64_t unit_current, available = UINT64_MAX;
3755 CGroupContext *unit_context;
3756 const char *memory_file;
3757 int r;
3758
3759 assert(u);
3760 assert(ret);
3761
3762 /* If data from cgroups can be accessed, try to find out how much more memory a unit can
3763 * claim before hitting the configured cgroup limits (if any). Consider both MemoryHigh
3764 * and MemoryMax, and also any slice the unit might be nested below. */
3765
3766 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
3767 return -ENODATA;
3768
3769 if (!u->cgroup_path)
3770 return -ENODATA;
3771
3772 /* The root cgroup doesn't expose this information */
3773 if (unit_has_host_root_cgroup(u))
3774 return -ENODATA;
3775
3776 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
3777 return -ENODATA;
3778
3779 r = cg_all_unified();
3780 if (r < 0)
3781 return r;
3782 memory_file = r > 0 ? "memory.current" : "memory.usage_in_bytes";
3783
3784 r = cg_get_attribute_as_uint64("memory", u->cgroup_path, memory_file, &unit_current);
3785 if (r < 0)
3786 return r;
3787
3788 assert_se(unit_context = unit_get_cgroup_context(u));
3789
3790 if (unit_context->memory_max != UINT64_MAX || unit_context->memory_high != UINT64_MAX)
3791 available = LESS_BY(MIN(unit_context->memory_max, unit_context->memory_high), unit_current);
3792
3793 for (Unit *slice = UNIT_GET_SLICE(u); slice; slice = UNIT_GET_SLICE(slice)) {
3794 uint64_t slice_current, slice_available = UINT64_MAX;
3795 CGroupContext *slice_context;
3796
3797 /* No point in continuing if we can't go any lower */
3798 if (available == 0)
3799 break;
3800
3801 if (!slice->cgroup_path)
3802 continue;
3803
3804 slice_context = unit_get_cgroup_context(slice);
3805 if (!slice_context)
3806 continue;
3807
3808 if (slice_context->memory_max == UINT64_MAX && slice_context->memory_high == UINT64_MAX)
3809 continue;
3810
3811 r = cg_get_attribute_as_uint64("memory", slice->cgroup_path, memory_file, &slice_current);
3812 if (r < 0)
3813 continue;
3814
3815 slice_available = LESS_BY(MIN(slice_context->memory_max, slice_context->memory_high), slice_current);
3816 available = MIN(slice_available, available);
3817 }
3818
3819 *ret = available;
3820
3821 return 0;
3822 }
3823
3824 int unit_get_memory_current(Unit *u, uint64_t *ret) {
3825 int r;
3826
3827 assert(u);
3828 assert(ret);
3829
3830 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
3831 return -ENODATA;
3832
3833 if (!u->cgroup_path)
3834 return -ENODATA;
3835
3836 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3837 if (unit_has_host_root_cgroup(u))
3838 return procfs_memory_get_used(ret);
3839
3840 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
3841 return -ENODATA;
3842
3843 r = cg_all_unified();
3844 if (r < 0)
3845 return r;
3846
3847 return cg_get_attribute_as_uint64("memory", u->cgroup_path, r > 0 ? "memory.current" : "memory.usage_in_bytes", ret);
3848 }
3849
3850 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
3851 assert(u);
3852 assert(ret);
3853
3854 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
3855 return -ENODATA;
3856
3857 if (!u->cgroup_path)
3858 return -ENODATA;
3859
3860 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3861 if (unit_has_host_root_cgroup(u))
3862 return procfs_tasks_get_current(ret);
3863
3864 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
3865 return -ENODATA;
3866
3867 return cg_get_attribute_as_uint64("pids", u->cgroup_path, "pids.current", ret);
3868 }
3869
3870 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
3871 uint64_t ns;
3872 int r;
3873
3874 assert(u);
3875 assert(ret);
3876
3877 if (!u->cgroup_path)
3878 return -ENODATA;
3879
3880 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3881 if (unit_has_host_root_cgroup(u))
3882 return procfs_cpu_get_usage(ret);
3883
3884 /* Requisite controllers for CPU accounting are not enabled */
3885 if ((get_cpu_accounting_mask() & ~u->cgroup_realized_mask) != 0)
3886 return -ENODATA;
3887
3888 r = cg_all_unified();
3889 if (r < 0)
3890 return r;
3891 if (r > 0) {
3892 _cleanup_free_ char *val = NULL;
3893 uint64_t us;
3894
3895 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
3896 if (IN_SET(r, -ENOENT, -ENXIO))
3897 return -ENODATA;
3898 if (r < 0)
3899 return r;
3900
3901 r = safe_atou64(val, &us);
3902 if (r < 0)
3903 return r;
3904
3905 ns = us * NSEC_PER_USEC;
3906 } else
3907 return cg_get_attribute_as_uint64("cpuacct", u->cgroup_path, "cpuacct.usage", ret);
3908
3909 *ret = ns;
3910 return 0;
3911 }
3912
3913 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
3914 nsec_t ns;
3915 int r;
3916
3917 assert(u);
3918
3919 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
3920 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
3921 * call this function with a NULL return value. */
3922
3923 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
3924 return -ENODATA;
3925
3926 r = unit_get_cpu_usage_raw(u, &ns);
3927 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
3928 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
3929 * cached value. */
3930
3931 if (ret)
3932 *ret = u->cpu_usage_last;
3933 return 0;
3934 }
3935 if (r < 0)
3936 return r;
3937
3938 if (ns > u->cpu_usage_base)
3939 ns -= u->cpu_usage_base;
3940 else
3941 ns = 0;
3942
3943 u->cpu_usage_last = ns;
3944 if (ret)
3945 *ret = ns;
3946
3947 return 0;
3948 }
3949
3950 int unit_get_ip_accounting(
3951 Unit *u,
3952 CGroupIPAccountingMetric metric,
3953 uint64_t *ret) {
3954
3955 uint64_t value;
3956 int fd, r;
3957
3958 assert(u);
3959 assert(metric >= 0);
3960 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
3961 assert(ret);
3962
3963 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
3964 return -ENODATA;
3965
3966 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
3967 u->ip_accounting_ingress_map_fd :
3968 u->ip_accounting_egress_map_fd;
3969 if (fd < 0)
3970 return -ENODATA;
3971
3972 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
3973 r = bpf_firewall_read_accounting(fd, &value, NULL);
3974 else
3975 r = bpf_firewall_read_accounting(fd, NULL, &value);
3976 if (r < 0)
3977 return r;
3978
3979 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
3980 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
3981 * ip_accounting_extra[] field, and add them in here transparently. */
3982
3983 *ret = value + u->ip_accounting_extra[metric];
3984
3985 return r;
3986 }
3987
3988 static int unit_get_io_accounting_raw(Unit *u, uint64_t ret[static _CGROUP_IO_ACCOUNTING_METRIC_MAX]) {
3989 static const char *const field_names[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3990 [CGROUP_IO_READ_BYTES] = "rbytes=",
3991 [CGROUP_IO_WRITE_BYTES] = "wbytes=",
3992 [CGROUP_IO_READ_OPERATIONS] = "rios=",
3993 [CGROUP_IO_WRITE_OPERATIONS] = "wios=",
3994 };
3995 uint64_t acc[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {};
3996 _cleanup_free_ char *path = NULL;
3997 _cleanup_fclose_ FILE *f = NULL;
3998 int r;
3999
4000 assert(u);
4001
4002 if (!u->cgroup_path)
4003 return -ENODATA;
4004
4005 if (unit_has_host_root_cgroup(u))
4006 return -ENODATA; /* TODO: return useful data for the top-level cgroup */
4007
4008 r = cg_all_unified();
4009 if (r < 0)
4010 return r;
4011 if (r == 0) /* TODO: support cgroupv1 */
4012 return -ENODATA;
4013
4014 if (!FLAGS_SET(u->cgroup_realized_mask, CGROUP_MASK_IO))
4015 return -ENODATA;
4016
4017 r = cg_get_path("io", u->cgroup_path, "io.stat", &path);
4018 if (r < 0)
4019 return r;
4020
4021 f = fopen(path, "re");
4022 if (!f)
4023 return -errno;
4024
4025 for (;;) {
4026 _cleanup_free_ char *line = NULL;
4027 const char *p;
4028
4029 r = read_line(f, LONG_LINE_MAX, &line);
4030 if (r < 0)
4031 return r;
4032 if (r == 0)
4033 break;
4034
4035 p = line;
4036 p += strcspn(p, WHITESPACE); /* Skip over device major/minor */
4037 p += strspn(p, WHITESPACE); /* Skip over following whitespace */
4038
4039 for (;;) {
4040 _cleanup_free_ char *word = NULL;
4041
4042 r = extract_first_word(&p, &word, NULL, EXTRACT_RETAIN_ESCAPE);
4043 if (r < 0)
4044 return r;
4045 if (r == 0)
4046 break;
4047
4048 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++) {
4049 const char *x;
4050
4051 x = startswith(word, field_names[i]);
4052 if (x) {
4053 uint64_t w;
4054
4055 r = safe_atou64(x, &w);
4056 if (r < 0)
4057 return r;
4058
4059 /* Sum up the stats of all devices */
4060 acc[i] += w;
4061 break;
4062 }
4063 }
4064 }
4065 }
4066
4067 memcpy(ret, acc, sizeof(acc));
4068 return 0;
4069 }
4070
4071 int unit_get_io_accounting(
4072 Unit *u,
4073 CGroupIOAccountingMetric metric,
4074 bool allow_cache,
4075 uint64_t *ret) {
4076
4077 uint64_t raw[_CGROUP_IO_ACCOUNTING_METRIC_MAX];
4078 int r;
4079
4080 /* Retrieve an IO account parameter. This will subtract the counter when the unit was started. */
4081
4082 if (!UNIT_CGROUP_BOOL(u, io_accounting))
4083 return -ENODATA;
4084
4085 if (allow_cache && u->io_accounting_last[metric] != UINT64_MAX)
4086 goto done;
4087
4088 r = unit_get_io_accounting_raw(u, raw);
4089 if (r == -ENODATA && u->io_accounting_last[metric] != UINT64_MAX)
4090 goto done;
4091 if (r < 0)
4092 return r;
4093
4094 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++) {
4095 /* Saturated subtraction */
4096 if (raw[i] > u->io_accounting_base[i])
4097 u->io_accounting_last[i] = raw[i] - u->io_accounting_base[i];
4098 else
4099 u->io_accounting_last[i] = 0;
4100 }
4101
4102 done:
4103 if (ret)
4104 *ret = u->io_accounting_last[metric];
4105
4106 return 0;
4107 }
4108
4109 int unit_reset_cpu_accounting(Unit *u) {
4110 int r;
4111
4112 assert(u);
4113
4114 u->cpu_usage_last = NSEC_INFINITY;
4115
4116 r = unit_get_cpu_usage_raw(u, &u->cpu_usage_base);
4117 if (r < 0) {
4118 u->cpu_usage_base = 0;
4119 return r;
4120 }
4121
4122 return 0;
4123 }
4124
4125 int unit_reset_ip_accounting(Unit *u) {
4126 int r = 0, q = 0;
4127
4128 assert(u);
4129
4130 if (u->ip_accounting_ingress_map_fd >= 0)
4131 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
4132
4133 if (u->ip_accounting_egress_map_fd >= 0)
4134 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
4135
4136 zero(u->ip_accounting_extra);
4137
4138 return r < 0 ? r : q;
4139 }
4140
4141 int unit_reset_io_accounting(Unit *u) {
4142 int r;
4143
4144 assert(u);
4145
4146 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
4147 u->io_accounting_last[i] = UINT64_MAX;
4148
4149 r = unit_get_io_accounting_raw(u, u->io_accounting_base);
4150 if (r < 0) {
4151 zero(u->io_accounting_base);
4152 return r;
4153 }
4154
4155 return 0;
4156 }
4157
4158 int unit_reset_accounting(Unit *u) {
4159 int r, q, v;
4160
4161 assert(u);
4162
4163 r = unit_reset_cpu_accounting(u);
4164 q = unit_reset_io_accounting(u);
4165 v = unit_reset_ip_accounting(u);
4166
4167 return r < 0 ? r : q < 0 ? q : v;
4168 }
4169
4170 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
4171 assert(u);
4172
4173 if (!UNIT_HAS_CGROUP_CONTEXT(u))
4174 return;
4175
4176 if (m == 0)
4177 return;
4178
4179 /* always invalidate compat pairs together */
4180 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
4181 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
4182
4183 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
4184 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
4185
4186 if (FLAGS_SET(u->cgroup_invalidated_mask, m)) /* NOP? */
4187 return;
4188
4189 u->cgroup_invalidated_mask |= m;
4190 unit_add_to_cgroup_realize_queue(u);
4191 }
4192
4193 void unit_invalidate_cgroup_bpf(Unit *u) {
4194 assert(u);
4195
4196 if (!UNIT_HAS_CGROUP_CONTEXT(u))
4197 return;
4198
4199 if (u->cgroup_invalidated_mask & CGROUP_MASK_BPF_FIREWALL) /* NOP? */
4200 return;
4201
4202 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
4203 unit_add_to_cgroup_realize_queue(u);
4204
4205 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
4206 * list of our children includes our own. */
4207 if (u->type == UNIT_SLICE) {
4208 Unit *member;
4209
4210 UNIT_FOREACH_DEPENDENCY(member, u, UNIT_ATOM_SLICE_OF)
4211 unit_invalidate_cgroup_bpf(member);
4212 }
4213 }
4214
4215 void unit_cgroup_catchup(Unit *u) {
4216 assert(u);
4217
4218 if (!UNIT_HAS_CGROUP_CONTEXT(u))
4219 return;
4220
4221 /* We dropped the inotify watch during reexec/reload, so we need to
4222 * check these as they may have changed.
4223 * Note that (currently) the kernel doesn't actually update cgroup
4224 * file modification times, so we can't just serialize and then check
4225 * the mtime for file(s) we are interested in. */
4226 (void) unit_check_cgroup_events(u);
4227 unit_add_to_cgroup_oom_queue(u);
4228 }
4229
4230 bool unit_cgroup_delegate(Unit *u) {
4231 CGroupContext *c;
4232
4233 assert(u);
4234
4235 if (!UNIT_VTABLE(u)->can_delegate)
4236 return false;
4237
4238 c = unit_get_cgroup_context(u);
4239 if (!c)
4240 return false;
4241
4242 return c->delegate;
4243 }
4244
4245 void manager_invalidate_startup_units(Manager *m) {
4246 Unit *u;
4247
4248 assert(m);
4249
4250 SET_FOREACH(u, m->startup_units)
4251 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO|CGROUP_MASK_CPUSET);
4252 }
4253
4254 static int unit_get_nice(Unit *u) {
4255 ExecContext *ec;
4256
4257 ec = unit_get_exec_context(u);
4258 return ec ? ec->nice : 0;
4259 }
4260
4261 static uint64_t unit_get_cpu_weight(Unit *u) {
4262 ManagerState state = manager_state(u->manager);
4263 CGroupContext *cc;
4264
4265 cc = unit_get_cgroup_context(u);
4266 return cc ? cgroup_context_cpu_weight(cc, state) : CGROUP_WEIGHT_DEFAULT;
4267 }
4268
4269 int compare_job_priority(const void *a, const void *b) {
4270 const Job *x = a, *y = b;
4271 int nice_x, nice_y;
4272 uint64_t weight_x, weight_y;
4273 int ret;
4274
4275 if ((ret = CMP(x->unit->type, y->unit->type)) != 0)
4276 return -ret;
4277
4278 weight_x = unit_get_cpu_weight(x->unit);
4279 weight_y = unit_get_cpu_weight(y->unit);
4280
4281 if ((ret = CMP(weight_x, weight_y)) != 0)
4282 return -ret;
4283
4284 nice_x = unit_get_nice(x->unit);
4285 nice_y = unit_get_nice(y->unit);
4286
4287 if ((ret = CMP(nice_x, nice_y)) != 0)
4288 return ret;
4289
4290 return strcmp(x->unit->id, y->unit->id);
4291 }
4292
4293 int unit_cgroup_freezer_action(Unit *u, FreezerAction action) {
4294 _cleanup_free_ char *path = NULL;
4295 FreezerState target, kernel = _FREEZER_STATE_INVALID;
4296 int r, ret;
4297
4298 assert(u);
4299 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
4300
4301 if (!cg_freezer_supported())
4302 return 0;
4303
4304 /* Ignore all requests to thaw init.scope or -.slice and reject all requests to freeze them */
4305 if (unit_has_name(u, SPECIAL_ROOT_SLICE) || unit_has_name(u, SPECIAL_INIT_SCOPE))
4306 return action == FREEZER_FREEZE ? -EPERM : 0;
4307
4308 if (!u->cgroup_realized)
4309 return -EBUSY;
4310
4311 if (action == FREEZER_THAW) {
4312 Unit *slice = UNIT_GET_SLICE(u);
4313
4314 if (slice) {
4315 r = unit_cgroup_freezer_action(slice, FREEZER_THAW);
4316 if (r < 0)
4317 return log_unit_error_errno(u, r, "Failed to thaw slice %s of unit: %m", slice->id);
4318 }
4319 }
4320
4321 target = action == FREEZER_FREEZE ? FREEZER_FROZEN : FREEZER_RUNNING;
4322
4323 r = unit_freezer_state_kernel(u, &kernel);
4324 if (r < 0)
4325 log_unit_debug_errno(u, r, "Failed to obtain cgroup freezer state: %m");
4326
4327 if (target == kernel) {
4328 u->freezer_state = target;
4329 if (action == FREEZER_FREEZE)
4330 return 0;
4331 ret = 0;
4332 } else
4333 ret = 1;
4334
4335 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.freeze", &path);
4336 if (r < 0)
4337 return r;
4338
4339 log_unit_debug(u, "%s unit.", action == FREEZER_FREEZE ? "Freezing" : "Thawing");
4340
4341 if (target != kernel) {
4342 if (action == FREEZER_FREEZE)
4343 u->freezer_state = FREEZER_FREEZING;
4344 else
4345 u->freezer_state = FREEZER_THAWING;
4346 }
4347
4348 r = write_string_file(path, one_zero(action == FREEZER_FREEZE), WRITE_STRING_FILE_DISABLE_BUFFER);
4349 if (r < 0)
4350 return r;
4351
4352 return ret;
4353 }
4354
4355 int unit_get_cpuset(Unit *u, CPUSet *cpus, const char *name) {
4356 _cleanup_free_ char *v = NULL;
4357 int r;
4358
4359 assert(u);
4360 assert(cpus);
4361
4362 if (!u->cgroup_path)
4363 return -ENODATA;
4364
4365 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUSET) == 0)
4366 return -ENODATA;
4367
4368 r = cg_all_unified();
4369 if (r < 0)
4370 return r;
4371 if (r == 0)
4372 return -ENODATA;
4373
4374 r = cg_get_attribute("cpuset", u->cgroup_path, name, &v);
4375 if (r == -ENOENT)
4376 return -ENODATA;
4377 if (r < 0)
4378 return r;
4379
4380 return parse_cpu_set_full(v, cpus, false, NULL, NULL, 0, NULL);
4381 }
4382
4383 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
4384 [CGROUP_DEVICE_POLICY_AUTO] = "auto",
4385 [CGROUP_DEVICE_POLICY_CLOSED] = "closed",
4386 [CGROUP_DEVICE_POLICY_STRICT] = "strict",
4387 };
4388
4389 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);
4390
4391 static const char* const freezer_action_table[_FREEZER_ACTION_MAX] = {
4392 [FREEZER_FREEZE] = "freeze",
4393 [FREEZER_THAW] = "thaw",
4394 };
4395
4396 DEFINE_STRING_TABLE_LOOKUP(freezer_action, FreezerAction);
4397
4398 static const char* const cgroup_pressure_watch_table[_CGROUP_PRESSURE_WATCH_MAX] = {
4399 [CGROUP_PRESSURE_WATCH_OFF] = "off",
4400 [CGROUP_PRESSURE_WATCH_AUTO] = "auto",
4401 [CGROUP_PRESSURE_WATCH_ON] = "on",
4402 [CGROUP_PRESSURE_WATCH_SKIP] = "skip",
4403 };
4404
4405 DEFINE_STRING_TABLE_LOOKUP_WITH_BOOLEAN(cgroup_pressure_watch, CGroupPressureWatch, CGROUP_PRESSURE_WATCH_ON);