]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
Merge pull request #32664 from DaanDeMeyer/no-build
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
db9ecf05 1/* SPDX-License-Identifier: LGPL-2.1-or-later */
8e274523 2
c6c18be3 3#include <fcntl.h>
8c6db833 4
afcfaa69
LP
5#include "sd-messages.h"
6
a4817536 7#include "af-list.h"
b5efdb8a 8#include "alloc-util.h"
18c528e9 9#include "blockdev-util.h"
d8b4d14d 10#include "bpf-devices.h"
906c06f6 11#include "bpf-firewall.h"
506ea51b 12#include "bpf-foreign.h"
62e22490 13#include "bpf-restrict-ifaces.h"
cd09a5f3 14#include "bpf-socket-bind.h"
45c2e068 15#include "btrfs-util.h"
6592b975 16#include "bus-error.h"
78fa2f91 17#include "bus-locator.h"
fdb3deca 18#include "cgroup-setup.h"
03a7b521 19#include "cgroup-util.h"
3ffd4af2 20#include "cgroup.h"
7176f06c 21#include "devnum-util.h"
3ffd4af2 22#include "fd-util.h"
0d39fa9c 23#include "fileio.h"
dc7d69b3 24#include "firewall-util.h"
84ebe6f0 25#include "in-addr-prefix-util.h"
9e5fd717 26#include "inotify-util.h"
d9e45bc3 27#include "io-util.h"
5587ce7f 28#include "ip-protocol-list.h"
3a0f06c4 29#include "limits-util.h"
d9e45bc3 30#include "nulstr-util.h"
6bedfcbb 31#include "parse-util.h"
9eb977db 32#include "path-util.h"
1ead0b2a 33#include "percent-util.h"
03a7b521 34#include "process-util.h"
c36a69f4 35#include "procfs-util.h"
84c01612 36#include "set.h"
9cc54544 37#include "serialize.h"
9444b1f2 38#include "special.h"
906c06f6 39#include "stdio-util.h"
8b43440b 40#include "string-table.h"
07630cea 41#include "string-util.h"
cc6271f1 42#include "virt.h"
8e274523 43
b1994387
ILG
44#if BPF_FRAMEWORK
45#include "bpf-dlopen.h"
46#include "bpf-link.h"
47#include "bpf/restrict_fs/restrict-fs-skel.h"
48#endif
49
10f28641 50#define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
9a054909 51
39b9fefb
LP
52/* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
53 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
54 * out specific attributes from us. */
55#define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
56
94f0b13b 57uint64_t cgroup_tasks_max_resolve(const CGroupTasksMax *tasks_max) {
3a0f06c4
ZJS
58 if (tasks_max->scale == 0)
59 return tasks_max->value;
60
61 return system_tasks_max_scale(tasks_max->value, tasks_max->scale);
62}
63
611c4f8a 64bool manager_owns_host_root_cgroup(Manager *m) {
cc6271f1
LP
65 assert(m);
66
67 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
68 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
69 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
70 * we run in any kind of container virtualization. */
71
28cfdc5a
LP
72 if (MANAGER_IS_USER(m))
73 return false;
74
cc6271f1
LP
75 if (detect_container() > 0)
76 return false;
77
57ea45e1 78 return empty_or_root(m->cgroup_root);
cc6271f1
LP
79}
80
9dfb6a3a
PM
81bool unit_has_startup_cgroup_constraints(Unit *u) {
82 assert(u);
83
84 /* Returns true if this unit has any directives which apply during
85 * startup/shutdown phases. */
86
87 CGroupContext *c;
88
89 c = unit_get_cgroup_context(u);
90 if (!c)
91 return false;
92
93 return c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID ||
94 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
95 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
96 c->startup_cpuset_cpus.set ||
53fda560
LB
97 c->startup_cpuset_mems.set ||
98 c->startup_memory_high_set ||
99 c->startup_memory_max_set ||
100 c->startup_memory_swap_max_set||
101 c->startup_memory_zswap_max_set ||
102 c->startup_memory_low_set;
9dfb6a3a
PM
103}
104
611c4f8a 105bool unit_has_host_root_cgroup(Unit *u) {
f3725e64
LP
106 assert(u);
107
cc6271f1
LP
108 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
109 * the manager manages the root cgroup. */
f3725e64 110
611c4f8a 111 if (!manager_owns_host_root_cgroup(u->manager))
f3725e64
LP
112 return false;
113
cc6271f1 114 return unit_has_name(u, SPECIAL_ROOT_SLICE);
f3725e64
LP
115}
116
293d32df
LP
117static int set_attribute_and_warn(Unit *u, const char *controller, const char *attribute, const char *value) {
118 int r;
119
9cc54544
LP
120 assert(u);
121
122 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
123 if (!crt || !crt->cgroup_path)
124 return -EOWNERDEAD;
125
126 r = cg_set_attribute(controller, crt->cgroup_path, attribute, value);
293d32df 127 if (r < 0)
8ed6f81b 128 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r), r, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
9cc54544 129 strna(attribute), empty_to_root(crt->cgroup_path), (int) strcspn(value, NEWLINE), value);
293d32df
LP
130
131 return r;
132}
133
2b40998d 134static void cgroup_compat_warn(void) {
128fadc9
TH
135 static bool cgroup_compat_warned = false;
136
137 if (cgroup_compat_warned)
138 return;
139
cc6271f1
LP
140 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
141 "See cgroup-compat debug messages for details.");
142
128fadc9
TH
143 cgroup_compat_warned = true;
144}
145
146#define log_cgroup_compat(unit, fmt, ...) do { \
147 cgroup_compat_warn(); \
148 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 149 } while (false)
128fadc9 150
4ad49000
LP
151void cgroup_context_init(CGroupContext *c) {
152 assert(c);
153
154eb43f
LB
154 /* Initialize everything to the kernel defaults. When initializing a bool member to 'true', make
155 * sure to serialize in execute-serialize.c using serialize_bool() instead of
156 * serialize_bool_elide(), as sd-executor will initialize here to 'true', but serialize_bool_elide()
157 * skips serialization if the value is 'false' (as that's the common default), so if the value at
158 * runtime is zero it would be lost after deserialization. Same when initializing uint64_t and other
159 * values, update/add a conditional serialization check. This is to minimize the amount of
160 * serialized data that is sent to the sd-executor, so that there is less work to do on the default
161 * cases. */
4ad49000 162
de8a711a
LP
163 *c = (CGroupContext) {
164 .cpu_weight = CGROUP_WEIGHT_INVALID,
165 .startup_cpu_weight = CGROUP_WEIGHT_INVALID,
166 .cpu_quota_per_sec_usec = USEC_INFINITY,
10f28641 167 .cpu_quota_period_usec = USEC_INFINITY,
66ebf6c0 168
de8a711a
LP
169 .cpu_shares = CGROUP_CPU_SHARES_INVALID,
170 .startup_cpu_shares = CGROUP_CPU_SHARES_INVALID,
d53d9474 171
de8a711a 172 .memory_high = CGROUP_LIMIT_MAX,
53fda560 173 .startup_memory_high = CGROUP_LIMIT_MAX,
de8a711a 174 .memory_max = CGROUP_LIMIT_MAX,
53fda560 175 .startup_memory_max = CGROUP_LIMIT_MAX,
de8a711a 176 .memory_swap_max = CGROUP_LIMIT_MAX,
53fda560 177 .startup_memory_swap_max = CGROUP_LIMIT_MAX,
d7fe0a67 178 .memory_zswap_max = CGROUP_LIMIT_MAX,
53fda560 179 .startup_memory_zswap_max = CGROUP_LIMIT_MAX,
da4d897e 180
de8a711a 181 .memory_limit = CGROUP_LIMIT_MAX,
b2f8b02e 182
1ea275f1
MY
183 .memory_zswap_writeback = true,
184
de8a711a
LP
185 .io_weight = CGROUP_WEIGHT_INVALID,
186 .startup_io_weight = CGROUP_WEIGHT_INVALID,
13c31542 187
de8a711a
LP
188 .blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
189 .startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
d53d9474 190
94f0b13b 191 .tasks_max = CGROUP_TASKS_MAX_UNSET,
4d824a4e
AZ
192
193 .moom_swap = MANAGED_OOM_AUTO,
194 .moom_mem_pressure = MANAGED_OOM_AUTO,
4e806bfa 195 .moom_preference = MANAGED_OOM_PREFERENCE_NONE,
6bb00842
LP
196
197 .memory_pressure_watch = _CGROUP_PRESSURE_WATCH_INVALID,
198 .memory_pressure_threshold_usec = USEC_INFINITY,
de8a711a 199 };
4ad49000 200}
8e274523 201
9c02eb28 202int cgroup_context_add_io_device_weight_dup(CGroupContext *c, const CGroupIODeviceWeight *w) {
84c01612
MS
203 _cleanup_free_ CGroupIODeviceWeight *n = NULL;
204
205 assert(c);
206 assert(w);
207
9c02eb28 208 n = new(CGroupIODeviceWeight, 1);
84c01612
MS
209 if (!n)
210 return -ENOMEM;
211
9c02eb28
MY
212 *n = (CGroupIODeviceWeight) {
213 .path = strdup(w->path),
214 .weight = w->weight,
215 };
84c01612
MS
216 if (!n->path)
217 return -ENOMEM;
84c01612
MS
218
219 LIST_PREPEND(device_weights, c->io_device_weights, TAKE_PTR(n));
220 return 0;
221}
222
9c02eb28 223int cgroup_context_add_io_device_limit_dup(CGroupContext *c, const CGroupIODeviceLimit *l) {
84c01612
MS
224 _cleanup_free_ CGroupIODeviceLimit *n = NULL;
225
226 assert(c);
227 assert(l);
228
229 n = new0(CGroupIODeviceLimit, 1);
cae58298 230 if (!n)
84c01612
MS
231 return -ENOMEM;
232
233 n->path = strdup(l->path);
234 if (!n->path)
235 return -ENOMEM;
236
237 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
238 n->limits[type] = l->limits[type];
239
240 LIST_PREPEND(device_limits, c->io_device_limits, TAKE_PTR(n));
241 return 0;
242}
243
9c02eb28 244int cgroup_context_add_io_device_latency_dup(CGroupContext *c, const CGroupIODeviceLatency *l) {
84c01612
MS
245 _cleanup_free_ CGroupIODeviceLatency *n = NULL;
246
247 assert(c);
248 assert(l);
249
9c02eb28 250 n = new(CGroupIODeviceLatency, 1);
84c01612
MS
251 if (!n)
252 return -ENOMEM;
253
9c02eb28
MY
254 *n = (CGroupIODeviceLatency) {
255 .path = strdup(l->path),
256 .target_usec = l->target_usec,
257 };
84c01612
MS
258 if (!n->path)
259 return -ENOMEM;
260
84c01612
MS
261 LIST_PREPEND(device_latencies, c->io_device_latencies, TAKE_PTR(n));
262 return 0;
263}
264
9c02eb28 265int cgroup_context_add_block_io_device_weight_dup(CGroupContext *c, const CGroupBlockIODeviceWeight *w) {
84c01612
MS
266 _cleanup_free_ CGroupBlockIODeviceWeight *n = NULL;
267
268 assert(c);
269 assert(w);
270
9c02eb28 271 n = new(CGroupBlockIODeviceWeight, 1);
84c01612
MS
272 if (!n)
273 return -ENOMEM;
274
9c02eb28
MY
275 *n = (CGroupBlockIODeviceWeight) {
276 .path = strdup(w->path),
277 .weight = w->weight,
278 };
84c01612
MS
279 if (!n->path)
280 return -ENOMEM;
281
84c01612
MS
282 LIST_PREPEND(device_weights, c->blockio_device_weights, TAKE_PTR(n));
283 return 0;
284}
285
9c02eb28 286int cgroup_context_add_block_io_device_bandwidth_dup(CGroupContext *c, const CGroupBlockIODeviceBandwidth *b) {
84c01612
MS
287 _cleanup_free_ CGroupBlockIODeviceBandwidth *n = NULL;
288
289 assert(c);
290 assert(b);
291
9c02eb28 292 n = new(CGroupBlockIODeviceBandwidth, 1);
84c01612
MS
293 if (!n)
294 return -ENOMEM;
295
296 *n = (CGroupBlockIODeviceBandwidth) {
297 .rbps = b->rbps,
298 .wbps = b->wbps,
299 };
300
301 LIST_PREPEND(device_bandwidths, c->blockio_device_bandwidths, TAKE_PTR(n));
302 return 0;
303}
304
9c02eb28 305int cgroup_context_add_device_allow_dup(CGroupContext *c, const CGroupDeviceAllow *a) {
84c01612
MS
306 _cleanup_free_ CGroupDeviceAllow *n = NULL;
307
308 assert(c);
309 assert(a);
310
9c02eb28 311 n = new(CGroupDeviceAllow, 1);
84c01612
MS
312 if (!n)
313 return -ENOMEM;
314
9c02eb28
MY
315 *n = (CGroupDeviceAllow) {
316 .path = strdup(a->path),
317 .permissions = a->permissions,
318 };
84c01612
MS
319 if (!n->path)
320 return -ENOMEM;
321
84c01612
MS
322 LIST_PREPEND(device_allow, c->device_allow, TAKE_PTR(n));
323 return 0;
324}
325
9c02eb28 326static int cgroup_context_add_socket_bind_item_dup(CGroupContext *c, const CGroupSocketBindItem *i, CGroupSocketBindItem *h) {
84c01612
MS
327 _cleanup_free_ CGroupSocketBindItem *n = NULL;
328
329 assert(c);
330 assert(i);
331
9c02eb28 332 n = new(CGroupSocketBindItem, 1);
84c01612
MS
333 if (!n)
334 return -ENOMEM;
335
336 *n = (CGroupSocketBindItem) {
337 .address_family = i->address_family,
338 .ip_protocol = i->ip_protocol,
339 .nr_ports = i->nr_ports,
340 .port_min = i->port_min,
341 };
342
343 LIST_PREPEND(socket_bind_items, h, TAKE_PTR(n));
344 return 0;
345}
346
9c02eb28 347int cgroup_context_add_socket_bind_item_allow_dup(CGroupContext *c, const CGroupSocketBindItem *i) {
84c01612
MS
348 return cgroup_context_add_socket_bind_item_dup(c, i, c->socket_bind_allow);
349}
350
9c02eb28 351int cgroup_context_add_socket_bind_item_deny_dup(CGroupContext *c, const CGroupSocketBindItem *i) {
84c01612
MS
352 return cgroup_context_add_socket_bind_item_dup(c, i, c->socket_bind_deny);
353}
354
355int cgroup_context_copy(CGroupContext *dst, const CGroupContext *src) {
356 struct in_addr_prefix *i;
357 char *iface;
358 int r;
359
360 assert(src);
361 assert(dst);
362
363 dst->cpu_accounting = src->cpu_accounting;
364 dst->io_accounting = src->io_accounting;
365 dst->blockio_accounting = src->blockio_accounting;
366 dst->memory_accounting = src->memory_accounting;
367 dst->tasks_accounting = src->tasks_accounting;
368 dst->ip_accounting = src->ip_accounting;
369
2717d36d 370 dst->memory_oom_group = src->memory_oom_group;
84c01612
MS
371
372 dst->cpu_weight = src->cpu_weight;
373 dst->startup_cpu_weight = src->startup_cpu_weight;
374 dst->cpu_quota_per_sec_usec = src->cpu_quota_per_sec_usec;
375 dst->cpu_quota_period_usec = src->cpu_quota_period_usec;
376
377 dst->cpuset_cpus = src->cpuset_cpus;
378 dst->startup_cpuset_cpus = src->startup_cpuset_cpus;
379 dst->cpuset_mems = src->cpuset_mems;
380 dst->startup_cpuset_mems = src->startup_cpuset_mems;
381
382 dst->io_weight = src->io_weight;
383 dst->startup_io_weight = src->startup_io_weight;
384
385 LIST_FOREACH_BACKWARDS(device_weights, w, LIST_FIND_TAIL(device_weights, src->io_device_weights)) {
386 r = cgroup_context_add_io_device_weight_dup(dst, w);
387 if (r < 0)
388 return r;
389 }
390
391 LIST_FOREACH_BACKWARDS(device_limits, l, LIST_FIND_TAIL(device_limits, src->io_device_limits)) {
392 r = cgroup_context_add_io_device_limit_dup(dst, l);
393 if (r < 0)
394 return r;
395 }
396
397 LIST_FOREACH_BACKWARDS(device_latencies, l, LIST_FIND_TAIL(device_latencies, src->io_device_latencies)) {
398 r = cgroup_context_add_io_device_latency_dup(dst, l);
399 if (r < 0)
400 return r;
401 }
402
403 dst->default_memory_min = src->default_memory_min;
404 dst->default_memory_low = src->default_memory_low;
405 dst->default_startup_memory_low = src->default_startup_memory_low;
406 dst->memory_min = src->memory_min;
407 dst->memory_low = src->memory_low;
408 dst->startup_memory_low = src->startup_memory_low;
409 dst->memory_high = src->memory_high;
410 dst->startup_memory_high = src->startup_memory_high;
411 dst->memory_max = src->memory_max;
412 dst->startup_memory_max = src->startup_memory_max;
413 dst->memory_swap_max = src->memory_swap_max;
414 dst->startup_memory_swap_max = src->startup_memory_swap_max;
415 dst->memory_zswap_max = src->memory_zswap_max;
416 dst->startup_memory_zswap_max = src->startup_memory_zswap_max;
417
418 dst->default_memory_min_set = src->default_memory_min_set;
419 dst->default_memory_low_set = src->default_memory_low_set;
420 dst->default_startup_memory_low_set = src->default_startup_memory_low_set;
421 dst->memory_min_set = src->memory_min_set;
422 dst->memory_low_set = src->memory_low_set;
423 dst->startup_memory_low_set = src->startup_memory_low_set;
424 dst->startup_memory_high_set = src->startup_memory_high_set;
425 dst->startup_memory_max_set = src->startup_memory_max_set;
426 dst->startup_memory_swap_max_set = src->startup_memory_swap_max_set;
427 dst->startup_memory_zswap_max_set = src->startup_memory_zswap_max_set;
1ea275f1 428 dst->memory_zswap_writeback = src->memory_zswap_writeback;
84c01612
MS
429
430 SET_FOREACH(i, src->ip_address_allow) {
431 r = in_addr_prefix_add(&dst->ip_address_allow, i);
432 if (r < 0)
433 return r;
434 }
435
436 SET_FOREACH(i, src->ip_address_deny) {
437 r = in_addr_prefix_add(&dst->ip_address_deny, i);
438 if (r < 0)
439 return r;
440 }
441
442 dst->ip_address_allow_reduced = src->ip_address_allow_reduced;
443 dst->ip_address_deny_reduced = src->ip_address_deny_reduced;
444
445 if (!strv_isempty(src->ip_filters_ingress)) {
446 dst->ip_filters_ingress = strv_copy(src->ip_filters_ingress);
447 if (!dst->ip_filters_ingress)
448 return -ENOMEM;
449 }
450
451 if (!strv_isempty(src->ip_filters_egress)) {
452 dst->ip_filters_egress = strv_copy(src->ip_filters_egress);
453 if (!dst->ip_filters_egress)
454 return -ENOMEM;
455 }
456
457 LIST_FOREACH_BACKWARDS(programs, l, LIST_FIND_TAIL(programs, src->bpf_foreign_programs)) {
458 r = cgroup_context_add_bpf_foreign_program_dup(dst, l);
459 if (r < 0)
460 return r;
461 }
462
463 SET_FOREACH(iface, src->restrict_network_interfaces) {
464 r = set_put_strdup(&dst->restrict_network_interfaces, iface);
465 if (r < 0)
466 return r;
467 }
468 dst->restrict_network_interfaces_is_allow_list = src->restrict_network_interfaces_is_allow_list;
469
470 dst->cpu_shares = src->cpu_shares;
471 dst->startup_cpu_shares = src->startup_cpu_shares;
472
473 dst->blockio_weight = src->blockio_weight;
474 dst->startup_blockio_weight = src->startup_blockio_weight;
475
476 LIST_FOREACH_BACKWARDS(device_weights, l, LIST_FIND_TAIL(device_weights, src->blockio_device_weights)) {
477 r = cgroup_context_add_block_io_device_weight_dup(dst, l);
478 if (r < 0)
479 return r;
480 }
481
482 LIST_FOREACH_BACKWARDS(device_bandwidths, l, LIST_FIND_TAIL(device_bandwidths, src->blockio_device_bandwidths)) {
483 r = cgroup_context_add_block_io_device_bandwidth_dup(dst, l);
484 if (r < 0)
485 return r;
486 }
487
488 dst->memory_limit = src->memory_limit;
489
490 dst->device_policy = src->device_policy;
491 LIST_FOREACH_BACKWARDS(device_allow, l, LIST_FIND_TAIL(device_allow, src->device_allow)) {
492 r = cgroup_context_add_device_allow_dup(dst, l);
493 if (r < 0)
494 return r;
495 }
496
497 LIST_FOREACH_BACKWARDS(socket_bind_items, l, LIST_FIND_TAIL(socket_bind_items, src->socket_bind_allow)) {
498 r = cgroup_context_add_socket_bind_item_allow_dup(dst, l);
499 if (r < 0)
500 return r;
501
502 }
503
504 LIST_FOREACH_BACKWARDS(socket_bind_items, l, LIST_FIND_TAIL(socket_bind_items, src->socket_bind_deny)) {
505 r = cgroup_context_add_socket_bind_item_deny_dup(dst, l);
506 if (r < 0)
507 return r;
508 }
509
510 dst->tasks_max = src->tasks_max;
511
512 return 0;
513}
514
4ad49000
LP
515void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
516 assert(c);
517 assert(a);
518
71fda00f 519 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
520 free(a->path);
521 free(a);
522}
523
13c31542
TH
524void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
525 assert(c);
526 assert(w);
527
528 LIST_REMOVE(device_weights, c->io_device_weights, w);
529 free(w->path);
530 free(w);
531}
532
6ae4283c
TH
533void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l) {
534 assert(c);
535 assert(l);
536
537 LIST_REMOVE(device_latencies, c->io_device_latencies, l);
538 free(l->path);
539 free(l);
540}
541
13c31542
TH
542void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
543 assert(c);
544 assert(l);
545
546 LIST_REMOVE(device_limits, c->io_device_limits, l);
547 free(l->path);
548 free(l);
549}
550
4ad49000
LP
551void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
552 assert(c);
553 assert(w);
554
71fda00f 555 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
556 free(w->path);
557 free(w);
558}
559
560void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
561 assert(c);
8e274523 562 assert(b);
8e274523 563
71fda00f 564 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
565 free(b->path);
566 free(b);
567}
568
b894ef1b
JK
569void cgroup_context_remove_bpf_foreign_program(CGroupContext *c, CGroupBPFForeignProgram *p) {
570 assert(c);
571 assert(p);
572
573 LIST_REMOVE(programs, c->bpf_foreign_programs, p);
574 free(p->bpffs_path);
575 free(p);
576}
577
b18e9fc1 578void cgroup_context_remove_socket_bind(CGroupSocketBindItem **head) {
b18e9fc1
JK
579 assert(head);
580
9aad490e 581 LIST_CLEAR(socket_bind_items, *head, free);
b18e9fc1
JK
582}
583
4ad49000
LP
584void cgroup_context_done(CGroupContext *c) {
585 assert(c);
586
13c31542
TH
587 while (c->io_device_weights)
588 cgroup_context_free_io_device_weight(c, c->io_device_weights);
589
6ae4283c
TH
590 while (c->io_device_latencies)
591 cgroup_context_free_io_device_latency(c, c->io_device_latencies);
592
13c31542
TH
593 while (c->io_device_limits)
594 cgroup_context_free_io_device_limit(c, c->io_device_limits);
595
4ad49000
LP
596 while (c->blockio_device_weights)
597 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
598
599 while (c->blockio_device_bandwidths)
600 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
601
602 while (c->device_allow)
603 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f 604
b18e9fc1
JK
605 cgroup_context_remove_socket_bind(&c->socket_bind_allow);
606 cgroup_context_remove_socket_bind(&c->socket_bind_deny);
607
84ebe6f0
YW
608 c->ip_address_allow = set_free(c->ip_address_allow);
609 c->ip_address_deny = set_free(c->ip_address_deny);
fab34748
KL
610
611 c->ip_filters_ingress = strv_free(c->ip_filters_ingress);
612 c->ip_filters_egress = strv_free(c->ip_filters_egress);
047f5d63 613
b894ef1b
JK
614 while (c->bpf_foreign_programs)
615 cgroup_context_remove_bpf_foreign_program(c, c->bpf_foreign_programs);
616
9b412709 617 c->restrict_network_interfaces = set_free_free(c->restrict_network_interfaces);
6f50d4f7 618
047f5d63 619 cpu_set_reset(&c->cpuset_cpus);
31d3a520 620 cpu_set_reset(&c->startup_cpuset_cpus);
047f5d63 621 cpu_set_reset(&c->cpuset_mems);
31d3a520 622 cpu_set_reset(&c->startup_cpuset_mems);
a8b993dc
LP
623
624 c->delegate_subgroup = mfree(c->delegate_subgroup);
dc7d69b3
TM
625
626 nft_set_context_clear(&c->nft_set_context);
4ad49000
LP
627}
628
74b5fb27 629static int unit_get_kernel_memory_limit(Unit *u, const char *file, uint64_t *ret) {
74b5fb27
CD
630 assert(u);
631
9cc54544
LP
632 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
633 if (!crt || !crt->cgroup_path)
74b5fb27
CD
634 return -EOWNERDEAD;
635
9cc54544 636 return cg_get_attribute_as_uint64("memory", crt->cgroup_path, file, ret);
74b5fb27
CD
637}
638
639static int unit_compare_memory_limit(Unit *u, const char *property_name, uint64_t *ret_unit_value, uint64_t *ret_kernel_value) {
640 CGroupContext *c;
641 CGroupMask m;
642 const char *file;
643 uint64_t unit_value;
644 int r;
645
646 /* Compare kernel memcg configuration against our internal systemd state. Unsupported (and will
647 * return -ENODATA) on cgroup v1.
648 *
649 * Returns:
650 *
651 * <0: On error.
652 * 0: If the kernel memory setting doesn't match our configuration.
653 * >0: If the kernel memory setting matches our configuration.
654 *
655 * The following values are only guaranteed to be populated on return >=0:
656 *
657 * - ret_unit_value will contain our internal expected value for the unit, page-aligned.
658 * - ret_kernel_value will contain the actual value presented by the kernel. */
659
660 assert(u);
661
662 r = cg_all_unified();
663 if (r < 0)
664 return log_debug_errno(r, "Failed to determine cgroup hierarchy version: %m");
665
666 /* Unsupported on v1.
667 *
668 * We don't return ENOENT, since that could actually mask a genuine problem where somebody else has
669 * silently masked the controller. */
670 if (r == 0)
671 return -ENODATA;
672
673 /* The root slice doesn't have any controller files, so we can't compare anything. */
674 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
675 return -ENODATA;
676
677 /* It's possible to have MemoryFoo set without systemd wanting to have the memory controller enabled,
678 * for example, in the case of DisableControllers= or cgroup_disable on the kernel command line. To
679 * avoid specious errors in these scenarios, check that we even expect the memory controller to be
680 * enabled at all. */
681 m = unit_get_target_mask(u);
682 if (!FLAGS_SET(m, CGROUP_MASK_MEMORY))
683 return -ENODATA;
684
806a9362 685 assert_se(c = unit_get_cgroup_context(u));
74b5fb27 686
53fda560
LB
687 bool startup = u->manager && IN_SET(manager_state(u->manager), MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING);
688
74b5fb27
CD
689 if (streq(property_name, "MemoryLow")) {
690 unit_value = unit_get_ancestor_memory_low(u);
691 file = "memory.low";
53fda560
LB
692 } else if (startup && streq(property_name, "StartupMemoryLow")) {
693 unit_value = unit_get_ancestor_startup_memory_low(u);
694 file = "memory.low";
74b5fb27
CD
695 } else if (streq(property_name, "MemoryMin")) {
696 unit_value = unit_get_ancestor_memory_min(u);
697 file = "memory.min";
698 } else if (streq(property_name, "MemoryHigh")) {
699 unit_value = c->memory_high;
700 file = "memory.high";
53fda560
LB
701 } else if (startup && streq(property_name, "StartupMemoryHigh")) {
702 unit_value = c->startup_memory_high;
703 file = "memory.high";
74b5fb27
CD
704 } else if (streq(property_name, "MemoryMax")) {
705 unit_value = c->memory_max;
706 file = "memory.max";
53fda560
LB
707 } else if (startup && streq(property_name, "StartupMemoryMax")) {
708 unit_value = c->startup_memory_max;
709 file = "memory.max";
74b5fb27
CD
710 } else if (streq(property_name, "MemorySwapMax")) {
711 unit_value = c->memory_swap_max;
712 file = "memory.swap.max";
53fda560
LB
713 } else if (startup && streq(property_name, "StartupMemorySwapMax")) {
714 unit_value = c->startup_memory_swap_max;
715 file = "memory.swap.max";
d7fe0a67
PV
716 } else if (streq(property_name, "MemoryZSwapMax")) {
717 unit_value = c->memory_zswap_max;
718 file = "memory.zswap.max";
53fda560
LB
719 } else if (startup && streq(property_name, "StartupMemoryZSwapMax")) {
720 unit_value = c->startup_memory_zswap_max;
721 file = "memory.zswap.max";
74b5fb27
CD
722 } else
723 return -EINVAL;
724
725 r = unit_get_kernel_memory_limit(u, file, ret_kernel_value);
726 if (r < 0)
727 return log_unit_debug_errno(u, r, "Failed to parse %s: %m", file);
728
729 /* It's intended (soon) in a future kernel to not expose cgroup memory limits rounded to page
730 * boundaries, but instead separate the user-exposed limit, which is whatever userspace told us, from
731 * our internal page-counting. To support those future kernels, just check the value itself first
732 * without any page-alignment. */
733 if (*ret_kernel_value == unit_value) {
734 *ret_unit_value = unit_value;
735 return 1;
736 }
737
738 /* The current kernel behaviour, by comparison, is that even if you write a particular number of
739 * bytes into a cgroup memory file, it always returns that number page-aligned down (since the kernel
740 * internally stores cgroup limits in pages). As such, so long as it aligns properly, everything is
741 * cricket. */
742 if (unit_value != CGROUP_LIMIT_MAX)
743 unit_value = PAGE_ALIGN_DOWN(unit_value);
744
745 *ret_unit_value = unit_value;
746
747 return *ret_kernel_value == *ret_unit_value;
748}
749
bc0623df
CD
750#define FORMAT_CGROUP_DIFF_MAX 128
751
3f236f24 752static char *format_cgroup_memory_limit_comparison(Unit *u, const char *property_name, char *buf, size_t l) {
bc0623df
CD
753 uint64_t kval, sval;
754 int r;
755
756 assert(u);
3f236f24 757 assert(property_name);
bc0623df
CD
758 assert(buf);
759 assert(l > 0);
760
761 r = unit_compare_memory_limit(u, property_name, &sval, &kval);
762
763 /* memory.swap.max is special in that it relies on CONFIG_MEMCG_SWAP (and the default swapaccount=1).
764 * In the absence of reliably being able to detect whether memcg swap support is available or not,
d7fe0a67
PV
765 * only complain if the error is not ENOENT. This is similarly the case for memory.zswap.max relying
766 * on CONFIG_ZSWAP. */
bc0623df 767 if (r > 0 || IN_SET(r, -ENODATA, -EOWNERDEAD) ||
53fda560
LB
768 (r == -ENOENT && STR_IN_SET(property_name,
769 "MemorySwapMax",
770 "StartupMemorySwapMax",
771 "MemoryZSwapMax",
772 "StartupMemoryZSwapMax")))
bc0623df 773 buf[0] = 0;
38553034
ZJS
774 else if (r < 0) {
775 errno = -r;
776 (void) snprintf(buf, l, " (error getting kernel value: %m)");
777 } else
778 (void) snprintf(buf, l, " (different value in kernel: %" PRIu64 ")", kval);
bc0623df
CD
779
780 return buf;
781}
782
a1044811
LP
783const char *cgroup_device_permissions_to_string(CGroupDevicePermissions p) {
784 static const char *table[_CGROUP_DEVICE_PERMISSIONS_MAX] = {
785 /* Lets simply define a table with every possible combination. As long as those are just 8 we
786 * can get away with it. If this ever grows to more we need to revisit this logic though. */
787 [0] = "",
788 [CGROUP_DEVICE_READ] = "r",
789 [CGROUP_DEVICE_WRITE] = "w",
790 [CGROUP_DEVICE_MKNOD] = "m",
791 [CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE] = "rw",
792 [CGROUP_DEVICE_READ|CGROUP_DEVICE_MKNOD] = "rm",
793 [CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD] = "wm",
794 [CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD] = "rwm",
795 };
796
797 if (p < 0 || p >= _CGROUP_DEVICE_PERMISSIONS_MAX)
798 return NULL;
799
800 return table[p];
801}
802
803CGroupDevicePermissions cgroup_device_permissions_from_string(const char *s) {
804 CGroupDevicePermissions p = 0;
805
806 if (!s)
807 return _CGROUP_DEVICE_PERMISSIONS_INVALID;
808
809 for (const char *c = s; *c; c++) {
810 if (*c == 'r')
811 p |= CGROUP_DEVICE_READ;
812 else if (*c == 'w')
813 p |= CGROUP_DEVICE_WRITE;
814 else if (*c == 'm')
815 p |= CGROUP_DEVICE_MKNOD;
816 else
817 return _CGROUP_DEVICE_PERMISSIONS_INVALID;
818 }
819
820 return p;
821}
822
bc0623df 823void cgroup_context_dump(Unit *u, FILE* f, const char *prefix) {
7b3693e4 824 _cleanup_free_ char *disable_controllers_str = NULL, *delegate_controllers_str = NULL, *cpuset_cpus = NULL, *cpuset_mems = NULL, *startup_cpuset_cpus = NULL, *startup_cpuset_mems = NULL;
bc0623df 825 CGroupContext *c;
84ebe6f0 826 struct in_addr_prefix *iaai;
3f236f24
LP
827 char cda[FORMAT_CGROUP_DIFF_MAX], cdb[FORMAT_CGROUP_DIFF_MAX], cdc[FORMAT_CGROUP_DIFF_MAX], cdd[FORMAT_CGROUP_DIFF_MAX],
828 cde[FORMAT_CGROUP_DIFF_MAX], cdf[FORMAT_CGROUP_DIFF_MAX], cdg[FORMAT_CGROUP_DIFF_MAX], cdh[FORMAT_CGROUP_DIFF_MAX],
829 cdi[FORMAT_CGROUP_DIFF_MAX], cdj[FORMAT_CGROUP_DIFF_MAX], cdk[FORMAT_CGROUP_DIFF_MAX];
bc0623df
CD
830
831 assert(u);
4ad49000
LP
832 assert(f);
833
806a9362 834 assert_se(c = unit_get_cgroup_context(u));
bc0623df 835
4ad49000
LP
836 prefix = strempty(prefix);
837
25cc30c4 838 (void) cg_mask_to_string(c->disable_controllers, &disable_controllers_str);
7b3693e4 839 (void) cg_mask_to_string(c->delegate_controllers, &delegate_controllers_str);
25cc30c4 840
af2b151b
ZJS
841 /* "Delegate=" means "yes, but no controllers". Show this as "(none)". */
842 const char *delegate_str = delegate_controllers_str ?: c->delegate ? "(none)" : "no";
843
047f5d63 844 cpuset_cpus = cpu_set_to_range_string(&c->cpuset_cpus);
31d3a520 845 startup_cpuset_cpus = cpu_set_to_range_string(&c->startup_cpuset_cpus);
047f5d63 846 cpuset_mems = cpu_set_to_range_string(&c->cpuset_mems);
31d3a520 847 startup_cpuset_mems = cpu_set_to_range_string(&c->startup_cpuset_mems);
047f5d63 848
4ad49000 849 fprintf(f,
6dfb9282
CD
850 "%sCPUAccounting: %s\n"
851 "%sIOAccounting: %s\n"
852 "%sBlockIOAccounting: %s\n"
853 "%sMemoryAccounting: %s\n"
854 "%sTasksAccounting: %s\n"
855 "%sIPAccounting: %s\n"
856 "%sCPUWeight: %" PRIu64 "\n"
857 "%sStartupCPUWeight: %" PRIu64 "\n"
858 "%sCPUShares: %" PRIu64 "\n"
859 "%sStartupCPUShares: %" PRIu64 "\n"
860 "%sCPUQuotaPerSecSec: %s\n"
861 "%sCPUQuotaPeriodSec: %s\n"
862 "%sAllowedCPUs: %s\n"
31d3a520 863 "%sStartupAllowedCPUs: %s\n"
6dfb9282 864 "%sAllowedMemoryNodes: %s\n"
31d3a520 865 "%sStartupAllowedMemoryNodes: %s\n"
6dfb9282
CD
866 "%sIOWeight: %" PRIu64 "\n"
867 "%sStartupIOWeight: %" PRIu64 "\n"
868 "%sBlockIOWeight: %" PRIu64 "\n"
869 "%sStartupBlockIOWeight: %" PRIu64 "\n"
870 "%sDefaultMemoryMin: %" PRIu64 "\n"
871 "%sDefaultMemoryLow: %" PRIu64 "\n"
bc0623df
CD
872 "%sMemoryMin: %" PRIu64 "%s\n"
873 "%sMemoryLow: %" PRIu64 "%s\n"
53fda560 874 "%sStartupMemoryLow: %" PRIu64 "%s\n"
bc0623df 875 "%sMemoryHigh: %" PRIu64 "%s\n"
53fda560 876 "%sStartupMemoryHigh: %" PRIu64 "%s\n"
bc0623df 877 "%sMemoryMax: %" PRIu64 "%s\n"
53fda560 878 "%sStartupMemoryMax: %" PRIu64 "%s\n"
bc0623df 879 "%sMemorySwapMax: %" PRIu64 "%s\n"
53fda560 880 "%sStartupMemorySwapMax: %" PRIu64 "%s\n"
d7fe0a67 881 "%sMemoryZSwapMax: %" PRIu64 "%s\n"
53fda560 882 "%sStartupMemoryZSwapMax: %" PRIu64 "%s\n"
1ea275f1 883 "%sMemoryZSwapWriteback: %s\n"
6dfb9282
CD
884 "%sMemoryLimit: %" PRIu64 "\n"
885 "%sTasksMax: %" PRIu64 "\n"
886 "%sDevicePolicy: %s\n"
887 "%sDisableControllers: %s\n"
4d824a4e
AZ
888 "%sDelegate: %s\n"
889 "%sManagedOOMSwap: %s\n"
890 "%sManagedOOMMemoryPressure: %s\n"
d9d3f05d 891 "%sManagedOOMMemoryPressureLimit: " PERMYRIAD_AS_PERCENT_FORMAT_STR "\n"
6bb00842 892 "%sManagedOOMPreference: %s\n"
6cf96ab4
NR
893 "%sMemoryPressureWatch: %s\n"
894 "%sCoredumpReceive: %s\n",
4ad49000 895 prefix, yes_no(c->cpu_accounting),
13c31542 896 prefix, yes_no(c->io_accounting),
4ad49000
LP
897 prefix, yes_no(c->blockio_accounting),
898 prefix, yes_no(c->memory_accounting),
d53d9474 899 prefix, yes_no(c->tasks_accounting),
c21c9906 900 prefix, yes_no(c->ip_accounting),
66ebf6c0
TH
901 prefix, c->cpu_weight,
902 prefix, c->startup_cpu_weight,
4ad49000 903 prefix, c->cpu_shares,
95ae05c0 904 prefix, c->startup_cpu_shares,
5291f26d
ZJS
905 prefix, FORMAT_TIMESPAN(c->cpu_quota_per_sec_usec, 1),
906 prefix, FORMAT_TIMESPAN(c->cpu_quota_period_usec, 1),
85c3b278 907 prefix, strempty(cpuset_cpus),
31d3a520 908 prefix, strempty(startup_cpuset_cpus),
85c3b278 909 prefix, strempty(cpuset_mems),
31d3a520 910 prefix, strempty(startup_cpuset_mems),
13c31542
TH
911 prefix, c->io_weight,
912 prefix, c->startup_io_weight,
4ad49000 913 prefix, c->blockio_weight,
95ae05c0 914 prefix, c->startup_blockio_weight,
7ad5439e 915 prefix, c->default_memory_min,
c52db42b 916 prefix, c->default_memory_low,
3f236f24
LP
917 prefix, c->memory_min, format_cgroup_memory_limit_comparison(u, "MemoryMin", cda, sizeof(cda)),
918 prefix, c->memory_low, format_cgroup_memory_limit_comparison(u, "MemoryLow", cdb, sizeof(cdb)),
919 prefix, c->startup_memory_low, format_cgroup_memory_limit_comparison(u, "StartupMemoryLow", cdc, sizeof(cdc)),
920 prefix, c->memory_high, format_cgroup_memory_limit_comparison(u, "MemoryHigh", cdd, sizeof(cdd)),
921 prefix, c->startup_memory_high, format_cgroup_memory_limit_comparison(u, "StartupMemoryHigh", cde, sizeof(cde)),
922 prefix, c->memory_max, format_cgroup_memory_limit_comparison(u, "MemoryMax", cdf, sizeof(cdf)),
923 prefix, c->startup_memory_max, format_cgroup_memory_limit_comparison(u, "StartupMemoryMax", cdg, sizeof(cdg)),
924 prefix, c->memory_swap_max, format_cgroup_memory_limit_comparison(u, "MemorySwapMax", cdh, sizeof(cdh)),
925 prefix, c->startup_memory_swap_max, format_cgroup_memory_limit_comparison(u, "StartupMemorySwapMax", cdi, sizeof(cdi)),
926 prefix, c->memory_zswap_max, format_cgroup_memory_limit_comparison(u, "MemoryZSwapMax", cdj, sizeof(cdj)),
927 prefix, c->startup_memory_zswap_max, format_cgroup_memory_limit_comparison(u, "StartupMemoryZSwapMax", cdk, sizeof(cdk)),
1ea275f1 928 prefix, yes_no(c->memory_zswap_writeback),
4ad49000 929 prefix, c->memory_limit,
94f0b13b 930 prefix, cgroup_tasks_max_resolve(&c->tasks_max),
a931ad47 931 prefix, cgroup_device_policy_to_string(c->device_policy),
f4c43a81 932 prefix, strempty(disable_controllers_str),
af2b151b 933 prefix, delegate_str,
4d824a4e
AZ
934 prefix, managed_oom_mode_to_string(c->moom_swap),
935 prefix, managed_oom_mode_to_string(c->moom_mem_pressure),
d9d3f05d 936 prefix, PERMYRIAD_AS_PERCENT_FORMAT_VAL(UINT32_SCALE_TO_PERMYRIAD(c->moom_mem_pressure_limit)),
6bb00842 937 prefix, managed_oom_preference_to_string(c->moom_preference),
6cf96ab4
NR
938 prefix, cgroup_pressure_watch_to_string(c->memory_pressure_watch),
939 prefix, yes_no(c->coredump_receive));
6bb00842 940
a8b993dc
LP
941 if (c->delegate_subgroup)
942 fprintf(f, "%sDelegateSubgroup: %s\n",
943 prefix, c->delegate_subgroup);
944
6bb00842
LP
945 if (c->memory_pressure_threshold_usec != USEC_INFINITY)
946 fprintf(f, "%sMemoryPressureThresholdSec: %s\n",
947 prefix, FORMAT_TIMESPAN(c->memory_pressure_threshold_usec, 1));
4ad49000
LP
948
949 LIST_FOREACH(device_allow, a, c->device_allow)
14338cca 950 /* strna() below should be redundant, for avoiding -Werror=format-overflow= error. See #30223. */
4ad49000 951 fprintf(f,
a1044811 952 "%sDeviceAllow: %s %s\n",
4ad49000
LP
953 prefix,
954 a->path,
14338cca 955 strna(cgroup_device_permissions_to_string(a->permissions)));
4ad49000 956
13c31542
TH
957 LIST_FOREACH(device_weights, iw, c->io_device_weights)
958 fprintf(f,
6dfb9282 959 "%sIODeviceWeight: %s %" PRIu64 "\n",
13c31542
TH
960 prefix,
961 iw->path,
962 iw->weight);
963
6ae4283c
TH
964 LIST_FOREACH(device_latencies, l, c->io_device_latencies)
965 fprintf(f,
6dfb9282 966 "%sIODeviceLatencyTargetSec: %s %s\n",
6ae4283c
TH
967 prefix,
968 l->path,
5291f26d 969 FORMAT_TIMESPAN(l->target_usec, 1));
6ae4283c 970
2b59bf51 971 LIST_FOREACH(device_limits, il, c->io_device_limits)
e8616626 972 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
9be57249
TH
973 if (il->limits[type] != cgroup_io_limit_defaults[type])
974 fprintf(f,
6dfb9282 975 "%s%s: %s %s\n",
9be57249
TH
976 prefix,
977 cgroup_io_limit_type_to_string(type),
978 il->path,
2b59bf51 979 FORMAT_BYTES(il->limits[type]));
13c31542 980
4ad49000
LP
981 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
982 fprintf(f,
6dfb9282 983 "%sBlockIODeviceWeight: %s %" PRIu64,
4ad49000
LP
984 prefix,
985 w->path,
986 w->weight);
987
988 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
979d0311
TH
989 if (b->rbps != CGROUP_LIMIT_MAX)
990 fprintf(f,
6dfb9282 991 "%sBlockIOReadBandwidth: %s %s\n",
979d0311
TH
992 prefix,
993 b->path,
2b59bf51 994 FORMAT_BYTES(b->rbps));
979d0311
TH
995 if (b->wbps != CGROUP_LIMIT_MAX)
996 fprintf(f,
6dfb9282 997 "%sBlockIOWriteBandwidth: %s %s\n",
979d0311
TH
998 prefix,
999 b->path,
2b59bf51 1000 FORMAT_BYTES(b->wbps));
4ad49000 1001 }
c21c9906 1002
c71384a9
ZJS
1003 SET_FOREACH(iaai, c->ip_address_allow)
1004 fprintf(f, "%sIPAddressAllow: %s\n", prefix,
1005 IN_ADDR_PREFIX_TO_STRING(iaai->family, &iaai->address, iaai->prefixlen));
1006 SET_FOREACH(iaai, c->ip_address_deny)
1007 fprintf(f, "%sIPAddressDeny: %s\n", prefix,
1008 IN_ADDR_PREFIX_TO_STRING(iaai->family, &iaai->address, iaai->prefixlen));
fab34748
KL
1009
1010 STRV_FOREACH(path, c->ip_filters_ingress)
6dfb9282 1011 fprintf(f, "%sIPIngressFilterPath: %s\n", prefix, *path);
fab34748 1012 STRV_FOREACH(path, c->ip_filters_egress)
6dfb9282 1013 fprintf(f, "%sIPEgressFilterPath: %s\n", prefix, *path);
b894ef1b
JK
1014
1015 LIST_FOREACH(programs, p, c->bpf_foreign_programs)
1016 fprintf(f, "%sBPFProgram: %s:%s",
1017 prefix, bpf_cgroup_attach_type_to_string(p->attach_type), p->bpffs_path);
b18e9fc1
JK
1018
1019 if (c->socket_bind_allow) {
b0bb3be1
FS
1020 fprintf(f, "%sSocketBindAllow: ", prefix);
1021 cgroup_context_dump_socket_bind_items(c->socket_bind_allow, f);
b18e9fc1
JK
1022 fputc('\n', f);
1023 }
1024
1025 if (c->socket_bind_deny) {
b0bb3be1
FS
1026 fprintf(f, "%sSocketBindDeny: ", prefix);
1027 cgroup_context_dump_socket_bind_items(c->socket_bind_deny, f);
b18e9fc1
JK
1028 fputc('\n', f);
1029 }
6f50d4f7
MV
1030
1031 if (c->restrict_network_interfaces) {
1032 char *iface;
1033 SET_FOREACH(iface, c->restrict_network_interfaces)
1034 fprintf(f, "%sRestrictNetworkInterfaces: %s\n", prefix, iface);
1035 }
dc7d69b3
TM
1036
1037 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets)
1038 fprintf(f, "%sNFTSet: %s:%s:%s:%s\n", prefix, nft_set_source_to_string(nft_set->source),
1039 nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set);
b18e9fc1
JK
1040}
1041
1042void cgroup_context_dump_socket_bind_item(const CGroupSocketBindItem *item, FILE *f) {
5587ce7f 1043 const char *family, *colon1, *protocol = "", *colon2 = "";
a4817536
LP
1044
1045 family = strempty(af_to_ipv4_ipv6(item->address_family));
5587ce7f
JK
1046 colon1 = isempty(family) ? "" : ":";
1047
1048 if (item->ip_protocol != 0) {
1049 protocol = ip_protocol_to_tcp_udp(item->ip_protocol);
1050 colon2 = ":";
1051 }
b18e9fc1
JK
1052
1053 if (item->nr_ports == 0)
b0bb3be1 1054 fprintf(f, "%s%s%s%sany", family, colon1, protocol, colon2);
b18e9fc1 1055 else if (item->nr_ports == 1)
b0bb3be1 1056 fprintf(f, "%s%s%s%s%" PRIu16, family, colon1, protocol, colon2, item->port_min);
b18e9fc1
JK
1057 else {
1058 uint16_t port_max = item->port_min + item->nr_ports - 1;
b0bb3be1 1059 fprintf(f, "%s%s%s%s%" PRIu16 "-%" PRIu16, family, colon1, protocol, colon2,
5587ce7f 1060 item->port_min, port_max);
b18e9fc1 1061 }
4ad49000
LP
1062}
1063
b0bb3be1
FS
1064void cgroup_context_dump_socket_bind_items(const CGroupSocketBindItem *items, FILE *f) {
1065 bool first = true;
1066
1067 LIST_FOREACH(socket_bind_items, bi, items) {
1068 if (first)
1069 first = false;
1070 else
1071 fputc(' ', f);
1072
1073 cgroup_context_dump_socket_bind_item(bi, f);
1074 }
1075}
1076
a1044811 1077int cgroup_context_add_device_allow(CGroupContext *c, const char *dev, CGroupDevicePermissions p) {
fd870bac
YW
1078 _cleanup_free_ CGroupDeviceAllow *a = NULL;
1079 _cleanup_free_ char *d = NULL;
1080
1081 assert(c);
1082 assert(dev);
a1044811
LP
1083 assert(p >= 0 && p < _CGROUP_DEVICE_PERMISSIONS_MAX);
1084
1085 if (p == 0)
1086 p = _CGROUP_DEVICE_PERMISSIONS_ALL;
fd870bac
YW
1087
1088 a = new(CGroupDeviceAllow, 1);
1089 if (!a)
1090 return -ENOMEM;
1091
1092 d = strdup(dev);
1093 if (!d)
1094 return -ENOMEM;
1095
1096 *a = (CGroupDeviceAllow) {
1097 .path = TAKE_PTR(d),
a1044811 1098 .permissions = p,
fd870bac
YW
1099 };
1100
1101 LIST_PREPEND(device_allow, c->device_allow, a);
1102 TAKE_PTR(a);
1103
1104 return 0;
1105}
1106
a1044811 1107int cgroup_context_add_or_update_device_allow(CGroupContext *c, const char *dev, CGroupDevicePermissions p) {
c3166b25
LB
1108 assert(c);
1109 assert(dev);
a1044811
LP
1110 assert(p >= 0 && p < _CGROUP_DEVICE_PERMISSIONS_MAX);
1111
1112 if (p == 0)
1113 p = _CGROUP_DEVICE_PERMISSIONS_ALL;
c3166b25
LB
1114
1115 LIST_FOREACH(device_allow, b, c->device_allow)
1116 if (path_equal(b->path, dev)) {
a1044811 1117 b->permissions = p;
c3166b25
LB
1118 return 0;
1119 }
1120
a1044811 1121 return cgroup_context_add_device_allow(c, dev, p);
c3166b25
LB
1122}
1123
c6f2dca6 1124int cgroup_context_add_bpf_foreign_program(CGroupContext *c, uint32_t attach_type, const char *bpffs_path) {
b894ef1b
JK
1125 CGroupBPFForeignProgram *p;
1126 _cleanup_free_ char *d = NULL;
1127
1128 assert(c);
1129 assert(bpffs_path);
1130
1131 if (!path_is_normalized(bpffs_path) || !path_is_absolute(bpffs_path))
4e494e6a 1132 return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Path is not normalized.");
b894ef1b
JK
1133
1134 d = strdup(bpffs_path);
1135 if (!d)
1136 return log_oom();
1137
1138 p = new(CGroupBPFForeignProgram, 1);
1139 if (!p)
1140 return log_oom();
1141
1142 *p = (CGroupBPFForeignProgram) {
1143 .attach_type = attach_type,
1144 .bpffs_path = TAKE_PTR(d),
1145 };
1146
1147 LIST_PREPEND(programs, c->bpf_foreign_programs, TAKE_PTR(p));
1148
1149 return 0;
1150}
1151
6264b85e
CD
1152#define UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(entry) \
1153 uint64_t unit_get_ancestor_##entry(Unit *u) { \
1154 CGroupContext *c; \
1155 \
1156 /* 1. Is entry set in this unit? If so, use that. \
1157 * 2. Is the default for this entry set in any \
1158 * ancestor? If so, use that. \
1159 * 3. Otherwise, return CGROUP_LIMIT_MIN. */ \
1160 \
1161 assert(u); \
1162 \
1163 c = unit_get_cgroup_context(u); \
c5322608 1164 if (c && c->entry##_set) \
6264b85e
CD
1165 return c->entry; \
1166 \
12f64221 1167 while ((u = UNIT_GET_SLICE(u))) { \
6264b85e 1168 c = unit_get_cgroup_context(u); \
c5322608 1169 if (c && c->default_##entry##_set) \
6264b85e
CD
1170 return c->default_##entry; \
1171 } \
1172 \
1173 /* We've reached the root, but nobody had default for \
1174 * this entry set, so set it to the kernel default. */ \
1175 return CGROUP_LIMIT_MIN; \
c52db42b
CD
1176}
1177
6264b85e 1178UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_low);
53fda560 1179UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(startup_memory_low);
7ad5439e 1180UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_min);
6264b85e 1181
17d047f5 1182static void unit_set_xattr_graceful(Unit *u, const char *name, const void *data, size_t size) {
1fa3b6c2
LP
1183 int r;
1184
1185 assert(u);
1186 assert(name);
1187
9cc54544
LP
1188 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1189 if (!crt || !crt->cgroup_path)
17d047f5 1190 return;
1fa3b6c2 1191
9cc54544 1192 r = cg_set_xattr(crt->cgroup_path, name, data, size, 0);
1fa3b6c2 1193 if (r < 0)
9cc54544 1194 log_unit_debug_errno(u, r, "Failed to set '%s' xattr on control group %s, ignoring: %m", name, empty_to_root(crt->cgroup_path));
1fa3b6c2
LP
1195}
1196
17d047f5 1197static void unit_remove_xattr_graceful(Unit *u, const char *name) {
1fa3b6c2
LP
1198 int r;
1199
1200 assert(u);
1201 assert(name);
1202
9cc54544
LP
1203 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1204 if (!crt || !crt->cgroup_path)
17d047f5 1205 return;
1fa3b6c2 1206
9cc54544 1207 r = cg_remove_xattr(crt->cgroup_path, name);
00675c36 1208 if (r < 0 && !ERRNO_IS_XATTR_ABSENT(r))
9cc54544 1209 log_unit_debug_errno(u, r, "Failed to remove '%s' xattr flag on control group %s, ignoring: %m", name, empty_to_root(crt->cgroup_path));
1fa3b6c2
LP
1210}
1211
64c71f4f 1212static void cgroup_oomd_xattr_apply(Unit *u) {
4e806bfa 1213 CGroupContext *c;
4e806bfa
AZ
1214
1215 assert(u);
1216
1217 c = unit_get_cgroup_context(u);
1218 if (!c)
1219 return;
1220
1fa3b6c2 1221 if (c->moom_preference == MANAGED_OOM_PREFERENCE_OMIT)
17d047f5 1222 unit_set_xattr_graceful(u, "user.oomd_omit", "1", 1);
4e806bfa 1223
1fa3b6c2 1224 if (c->moom_preference == MANAGED_OOM_PREFERENCE_AVOID)
17d047f5 1225 unit_set_xattr_graceful(u, "user.oomd_avoid", "1", 1);
4e806bfa 1226
1fa3b6c2 1227 if (c->moom_preference != MANAGED_OOM_PREFERENCE_AVOID)
17d047f5 1228 unit_remove_xattr_graceful(u, "user.oomd_avoid");
4e806bfa 1229
1fa3b6c2 1230 if (c->moom_preference != MANAGED_OOM_PREFERENCE_OMIT)
17d047f5 1231 unit_remove_xattr_graceful(u, "user.oomd_omit");
4e806bfa
AZ
1232}
1233
64c71f4f 1234static int cgroup_log_xattr_apply(Unit *u) {
523ea123
QD
1235 ExecContext *c;
1236 size_t len, allowed_patterns_len, denied_patterns_len;
1237 _cleanup_free_ char *patterns = NULL, *allowed_patterns = NULL, *denied_patterns = NULL;
48d85160 1238 char *last;
523ea123
QD
1239 int r;
1240
1241 assert(u);
1242
1243 c = unit_get_exec_context(u);
1244 if (!c)
1245 /* Some unit types have a cgroup context but no exec context, so we do not log
1246 * any error here to avoid confusion. */
1247 return 0;
1248
1249 if (set_isempty(c->log_filter_allowed_patterns) && set_isempty(c->log_filter_denied_patterns)) {
17d047f5 1250 unit_remove_xattr_graceful(u, "user.journald_log_filter_patterns");
523ea123
QD
1251 return 0;
1252 }
1253
1254 r = set_make_nulstr(c->log_filter_allowed_patterns, &allowed_patterns, &allowed_patterns_len);
1255 if (r < 0)
1256 return log_debug_errno(r, "Failed to make nulstr from set: %m");
1257
1258 r = set_make_nulstr(c->log_filter_denied_patterns, &denied_patterns, &denied_patterns_len);
1259 if (r < 0)
1260 return log_debug_errno(r, "Failed to make nulstr from set: %m");
1261
1262 /* Use nul character separated strings without trailing nul */
1263 allowed_patterns_len = LESS_BY(allowed_patterns_len, 1u);
1264 denied_patterns_len = LESS_BY(denied_patterns_len, 1u);
1265
1266 len = allowed_patterns_len + 1 + denied_patterns_len;
1267 patterns = new(char, len);
1268 if (!patterns)
1269 return log_oom_debug();
1270
48d85160
QD
1271 last = mempcpy_safe(patterns, allowed_patterns, allowed_patterns_len);
1272 *(last++) = '\xff';
1273 memcpy_safe(last, denied_patterns, denied_patterns_len);
523ea123 1274
17d047f5 1275 unit_set_xattr_graceful(u, "user.journald_log_filter_patterns", patterns, len);
523ea123
QD
1276
1277 return 0;
1278}
1279
d46510de 1280static void cgroup_invocation_id_xattr_apply(Unit *u) {
d9bc1c36 1281 bool b;
0d2d6fbf
CD
1282
1283 assert(u);
1284
1fa3b6c2
LP
1285 b = !sd_id128_is_null(u->invocation_id);
1286 FOREACH_STRING(xn, "trusted.invocation_id", "user.invocation_id") {
1287 if (b)
17d047f5 1288 unit_set_xattr_graceful(u, xn, SD_ID128_TO_STRING(u->invocation_id), 32);
1fa3b6c2 1289 else
17d047f5 1290 unit_remove_xattr_graceful(u, xn);
3288ea8f 1291 }
d46510de
LP
1292}
1293
6cf96ab4
NR
1294static void cgroup_coredump_xattr_apply(Unit *u) {
1295 CGroupContext *c;
1296
1297 assert(u);
1298
1299 c = unit_get_cgroup_context(u);
1300 if (!c)
1301 return;
1302
1303 if (unit_cgroup_delegate(u) && c->coredump_receive)
1304 unit_set_xattr_graceful(u, "user.coredump_receive", "1", 1);
1305 else
1306 unit_remove_xattr_graceful(u, "user.coredump_receive");
1307}
1308
d46510de
LP
1309static void cgroup_delegate_xattr_apply(Unit *u) {
1310 bool b;
1311
1312 assert(u);
0d2d6fbf 1313
d9bc1c36
LP
1314 /* Indicate on the cgroup whether delegation is on, via an xattr. This is best-effort, as old kernels
1315 * didn't support xattrs on cgroups at all. Later they got support for setting 'trusted.*' xattrs,
1316 * and even later 'user.*' xattrs. We started setting this field when 'trusted.*' was added, and
1317 * given this is now pretty much API, let's continue to support that. But also set 'user.*' as well,
1318 * since it is readable by any user, not just CAP_SYS_ADMIN. This hence comes with slightly weaker
1319 * security (as users who got delegated cgroups could turn it off if they like), but this shouldn't
1320 * be a big problem given this communicates delegation state to clients, but the manager never reads
1321 * it. */
1322 b = unit_cgroup_delegate(u);
1323 FOREACH_STRING(xn, "trusted.delegate", "user.delegate") {
1fa3b6c2 1324 if (b)
17d047f5 1325 unit_set_xattr_graceful(u, xn, "1", 1);
1fa3b6c2 1326 else
17d047f5 1327 unit_remove_xattr_graceful(u, xn);
3288ea8f 1328 }
d46510de
LP
1329}
1330
1331static void cgroup_survive_xattr_apply(Unit *u) {
1332 int r;
1333
1334 assert(u);
559214cb 1335
9cc54544
LP
1336 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1337 if (!crt)
1338 return;
1339
559214cb 1340 if (u->survive_final_kill_signal) {
bd1791b5 1341 r = cg_set_xattr(
9cc54544 1342 crt->cgroup_path,
bd1791b5
LP
1343 "user.survive_final_kill_signal",
1344 "1",
1345 1,
1346 /* flags= */ 0);
559214cb
LB
1347 /* user xattr support was added in kernel v5.7 */
1348 if (ERRNO_IS_NEG_NOT_SUPPORTED(r))
bd1791b5 1349 r = cg_set_xattr(
9cc54544 1350 crt->cgroup_path,
559214cb
LB
1351 "trusted.survive_final_kill_signal",
1352 "1",
1353 1,
1354 /* flags= */ 0);
1355 if (r < 0)
1356 log_unit_debug_errno(u,
1357 r,
1358 "Failed to set 'survive_final_kill_signal' xattr on control "
1359 "group %s, ignoring: %m",
9cc54544 1360 empty_to_root(crt->cgroup_path));
559214cb 1361 } else {
17d047f5
LP
1362 unit_remove_xattr_graceful(u, "user.survive_final_kill_signal");
1363 unit_remove_xattr_graceful(u, "trusted.survive_final_kill_signal");
559214cb 1364 }
0d2d6fbf
CD
1365}
1366
d46510de
LP
1367static void cgroup_xattr_apply(Unit *u) {
1368 assert(u);
1369
1370 /* The 'user.*' xattrs can be set from a user manager. */
1371 cgroup_oomd_xattr_apply(u);
1372 cgroup_log_xattr_apply(u);
6cf96ab4 1373 cgroup_coredump_xattr_apply(u);
d46510de
LP
1374
1375 if (!MANAGER_IS_SYSTEM(u->manager))
1376 return;
1377
1378 cgroup_invocation_id_xattr_apply(u);
1379 cgroup_delegate_xattr_apply(u);
1380 cgroup_survive_xattr_apply(u);
1381}
1382
45c2e068 1383static int lookup_block_device(const char *p, dev_t *ret) {
f5855697
YS
1384 dev_t rdev, dev = 0;
1385 mode_t mode;
45c2e068 1386 int r;
4ad49000
LP
1387
1388 assert(p);
45c2e068 1389 assert(ret);
4ad49000 1390
f5855697 1391 r = device_path_parse_major_minor(p, &mode, &rdev);
d5aecba6 1392 if (r == -ENODEV) { /* not a parsable device node, need to go to disk */
f5855697 1393 struct stat st;
57f1030b 1394
d5aecba6
LP
1395 if (stat(p, &st) < 0)
1396 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
57f1030b 1397
f5855697 1398 mode = st.st_mode;
a0d6590c
LP
1399 rdev = st.st_rdev;
1400 dev = st.st_dev;
d5aecba6
LP
1401 } else if (r < 0)
1402 return log_warning_errno(r, "Failed to parse major/minor from path '%s': %m", p);
1403
57f1030b
LP
1404 if (S_ISCHR(mode))
1405 return log_warning_errno(SYNTHETIC_ERRNO(ENOTBLK),
1406 "Device node '%s' is a character device, but block device needed.", p);
1407 if (S_ISBLK(mode))
f5855697
YS
1408 *ret = rdev;
1409 else if (major(dev) != 0)
1410 *ret = dev; /* If this is not a device node then use the block device this file is stored on */
45c2e068
LP
1411 else {
1412 /* If this is btrfs, getting the backing block device is a bit harder */
1413 r = btrfs_get_block_device(p, ret);
57f1030b
LP
1414 if (r == -ENOTTY)
1415 return log_warning_errno(SYNTHETIC_ERRNO(ENODEV),
1416 "'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
1417 if (r < 0)
45c2e068 1418 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
4ad49000 1419 }
8e274523 1420
b7cf4b4e
BB
1421 /* If this is a LUKS/DM device, recursively try to get the originating block device */
1422 while (block_get_originating(*ret, ret) > 0);
45c2e068
LP
1423
1424 /* If this is a partition, try to get the originating block device */
1425 (void) block_get_whole_disk(*ret, ret);
8e274523 1426 return 0;
8e274523
LP
1427}
1428
66ebf6c0
TH
1429static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
1430 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
1431 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
1432}
1433
1434static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
1435 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
1436 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
1437}
1438
31d3a520
PM
1439static bool cgroup_context_has_allowed_cpus(CGroupContext *c) {
1440 return c->cpuset_cpus.set || c->startup_cpuset_cpus.set;
1441}
1442
1443static bool cgroup_context_has_allowed_mems(CGroupContext *c) {
1444 return c->cpuset_mems.set || c->startup_cpuset_mems.set;
1445}
1446
a8157796
LP
1447uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
1448 assert(c);
1449
9dfb6a3a 1450 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
66ebf6c0
TH
1451 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
1452 return c->startup_cpu_weight;
1453 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
1454 return c->cpu_weight;
1455 else
1456 return CGROUP_WEIGHT_DEFAULT;
1457}
1458
1459static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
9dfb6a3a 1460 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
66ebf6c0
TH
1461 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
1462 return c->startup_cpu_shares;
1463 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
1464 return c->cpu_shares;
1465 else
1466 return CGROUP_CPU_SHARES_DEFAULT;
1467}
1468
31d3a520 1469static CPUSet *cgroup_context_allowed_cpus(CGroupContext *c, ManagerState state) {
9dfb6a3a 1470 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
31d3a520
PM
1471 c->startup_cpuset_cpus.set)
1472 return &c->startup_cpuset_cpus;
1473 else
1474 return &c->cpuset_cpus;
1475}
1476
1477static CPUSet *cgroup_context_allowed_mems(CGroupContext *c, ManagerState state) {
9dfb6a3a 1478 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
31d3a520
PM
1479 c->startup_cpuset_mems.set)
1480 return &c->startup_cpuset_mems;
1481 else
1482 return &c->cpuset_mems;
1483}
1484
10f28641
FB
1485usec_t cgroup_cpu_adjust_period(usec_t period, usec_t quota, usec_t resolution, usec_t max_period) {
1486 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
1487 * need to be higher than that boundary. quota is specified in USecPerSec.
1488 * Additionally, period must be at most max_period. */
1489 assert(quota > 0);
1490
1491 return MIN(MAX3(period, resolution, resolution * USEC_PER_SEC / quota), max_period);
1492}
1493
1494static usec_t cgroup_cpu_adjust_period_and_log(Unit *u, usec_t period, usec_t quota) {
1495 usec_t new_period;
1496
9cc54544
LP
1497 assert(u);
1498
1499 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1500 if (!crt)
1501 return USEC_INFINITY;
1502
10f28641
FB
1503 if (quota == USEC_INFINITY)
1504 /* Always use default period for infinity quota. */
1505 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC;
1506
1507 if (period == USEC_INFINITY)
1508 /* Default period was requested. */
1509 period = CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC;
1510
1511 /* Clamp to interval [1ms, 1s] */
1512 new_period = cgroup_cpu_adjust_period(period, quota, USEC_PER_MSEC, USEC_PER_SEC);
1513
1514 if (new_period != period) {
9cc54544 1515 log_unit_full(u, crt->warned_clamping_cpu_quota_period ? LOG_DEBUG : LOG_WARNING,
10f28641 1516 "Clamping CPU interval for cpu.max: period is now %s",
5291f26d 1517 FORMAT_TIMESPAN(new_period, 1));
9cc54544 1518 crt->warned_clamping_cpu_quota_period = true;
10f28641
FB
1519 }
1520
1521 return new_period;
1522}
1523
52fecf20
LP
1524static void cgroup_apply_unified_cpu_weight(Unit *u, uint64_t weight) {
1525 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
66ebf6c0 1526
c8340822 1527 if (weight == CGROUP_WEIGHT_IDLE)
1528 return;
66ebf6c0 1529 xsprintf(buf, "%" PRIu64 "\n", weight);
293d32df 1530 (void) set_attribute_and_warn(u, "cpu", "cpu.weight", buf);
52fecf20
LP
1531}
1532
c8340822 1533static void cgroup_apply_unified_cpu_idle(Unit *u, uint64_t weight) {
1534 int r;
1535 bool is_idle;
1536 const char *idle_val;
1537
9cc54544
LP
1538 assert(u);
1539
1540 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1541 if (!crt || !crt->cgroup_path)
1542 return;
1543
c8340822 1544 is_idle = weight == CGROUP_WEIGHT_IDLE;
1545 idle_val = one_zero(is_idle);
9cc54544 1546 r = cg_set_attribute("cpu", crt->cgroup_path, "cpu.idle", idle_val);
c8340822 1547 if (r < 0 && (r != -ENOENT || is_idle))
1548 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r), r, "Failed to set '%s' attribute on '%s' to '%s': %m",
9cc54544 1549 "cpu.idle", empty_to_root(crt->cgroup_path), idle_val);
c8340822 1550}
1551
10f28641 1552static void cgroup_apply_unified_cpu_quota(Unit *u, usec_t quota, usec_t period) {
52fecf20 1553 char buf[(DECIMAL_STR_MAX(usec_t) + 1) * 2 + 1];
66ebf6c0 1554
9cc54544
LP
1555 assert(u);
1556
10f28641 1557 period = cgroup_cpu_adjust_period_and_log(u, period, quota);
66ebf6c0
TH
1558 if (quota != USEC_INFINITY)
1559 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
10f28641 1560 MAX(quota * period / USEC_PER_SEC, USEC_PER_MSEC), period);
66ebf6c0 1561 else
10f28641 1562 xsprintf(buf, "max " USEC_FMT "\n", period);
293d32df 1563 (void) set_attribute_and_warn(u, "cpu", "cpu.max", buf);
66ebf6c0
TH
1564}
1565
52fecf20
LP
1566static void cgroup_apply_legacy_cpu_shares(Unit *u, uint64_t shares) {
1567 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
66ebf6c0
TH
1568
1569 xsprintf(buf, "%" PRIu64 "\n", shares);
293d32df 1570 (void) set_attribute_and_warn(u, "cpu", "cpu.shares", buf);
52fecf20
LP
1571}
1572
10f28641 1573static void cgroup_apply_legacy_cpu_quota(Unit *u, usec_t quota, usec_t period) {
52fecf20 1574 char buf[DECIMAL_STR_MAX(usec_t) + 2];
66ebf6c0 1575
10f28641
FB
1576 period = cgroup_cpu_adjust_period_and_log(u, period, quota);
1577
1578 xsprintf(buf, USEC_FMT "\n", period);
293d32df 1579 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_period_us", buf);
66ebf6c0
TH
1580
1581 if (quota != USEC_INFINITY) {
10f28641 1582 xsprintf(buf, USEC_FMT "\n", MAX(quota * period / USEC_PER_SEC, USEC_PER_MSEC));
293d32df 1583 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", buf);
66ebf6c0 1584 } else
589a5f7a 1585 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", "-1\n");
66ebf6c0
TH
1586}
1587
1588static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
1589 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
1590 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
1591}
1592
1593static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
c8340822 1594 /* we don't support idle in cgroupv1 */
1595 if (weight == CGROUP_WEIGHT_IDLE)
1596 return CGROUP_CPU_SHARES_MIN;
1597
66ebf6c0
TH
1598 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
1599 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
1600}
1601
2cea199e 1602static void cgroup_apply_unified_cpuset(Unit *u, const CPUSet *cpus, const char *name) {
047f5d63
PH
1603 _cleanup_free_ char *buf = NULL;
1604
2cea199e 1605 buf = cpu_set_to_range_string(cpus);
c259ac9a
LP
1606 if (!buf) {
1607 log_oom();
1608 return;
1609 }
047f5d63
PH
1610
1611 (void) set_attribute_and_warn(u, "cpuset", name, buf);
1612}
1613
508c45da 1614static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
1615 return c->io_accounting ||
1616 c->io_weight != CGROUP_WEIGHT_INVALID ||
1617 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
1618 c->io_device_weights ||
6ae4283c 1619 c->io_device_latencies ||
538b4852
TH
1620 c->io_device_limits;
1621}
1622
508c45da 1623static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
1624 return c->blockio_accounting ||
1625 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
1626 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
1627 c->blockio_device_weights ||
1628 c->blockio_device_bandwidths;
1629}
1630
508c45da 1631static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
9dfb6a3a 1632 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
64faf04c
TH
1633 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
1634 return c->startup_io_weight;
d38655d7 1635 if (c->io_weight != CGROUP_WEIGHT_INVALID)
64faf04c 1636 return c->io_weight;
d38655d7 1637 return CGROUP_WEIGHT_DEFAULT;
64faf04c
TH
1638}
1639
508c45da 1640static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
9dfb6a3a 1641 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING) &&
64faf04c
TH
1642 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
1643 return c->startup_blockio_weight;
d38655d7 1644 if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
64faf04c 1645 return c->blockio_weight;
d38655d7 1646 return CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
1647}
1648
508c45da 1649static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
1650 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
1651 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
1652}
1653
508c45da 1654static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
1655 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
1656 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
1657}
1658
3e6eafdd 1659static int set_bfq_weight(Unit *u, const char *controller, dev_t dev, uint64_t io_weight) {
1cf4a685
MK
1660 static const char * const prop_names[] = {
1661 "IOWeight",
1662 "BlockIOWeight",
1663 "IODeviceWeight",
1664 "BlockIODeviceWeight",
1665 };
8d75f60e 1666 static bool warned = false;
9f0c0c4e 1667 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+STRLEN("\n")];
bec17e80
MK
1668 const char *p;
1669 uint64_t bfq_weight;
8d75f60e 1670 int r;
bec17e80 1671
9cc54544
LP
1672 assert(u);
1673
1674 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1675 if (!crt || !crt->cgroup_path)
1676 return -EOWNERDEAD;
1677
bec17e80
MK
1678 /* FIXME: drop this function when distro kernels properly support BFQ through "io.weight"
1679 * See also: https://github.com/systemd/systemd/pull/13335 and
1680 * https://github.com/torvalds/linux/commit/65752aef0a407e1ef17ec78a7fc31ba4e0b360f9. */
1681 p = strjoina(controller, ".bfq.weight");
1682 /* Adjust to kernel range is 1..1000, the default is 100. */
1683 bfq_weight = BFQ_WEIGHT(io_weight);
1684
9f0c0c4e 1685 if (major(dev) > 0)
ec61371f 1686 xsprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), bfq_weight);
9f0c0c4e
MK
1687 else
1688 xsprintf(buf, "%" PRIu64 "\n", bfq_weight);
bec17e80 1689
9cc54544 1690 r = cg_set_attribute(controller, crt->cgroup_path, p, buf);
8d75f60e
MK
1691
1692 /* FIXME: drop this when kernels prior
1693 * 795fe54c2a82 ("bfq: Add per-device weight") v5.4
1694 * are not interesting anymore. Old kernels will fail with EINVAL, while new kernels won't return
1695 * EINVAL on properly formatted input by us. Treat EINVAL accordingly. */
3e6eafdd
MK
1696 if (r == -EINVAL && major(dev) > 0) {
1697 if (!warned) {
1698 log_unit_warning(u, "Kernel version does not accept per-device setting in %s.", p);
1699 warned = true;
1700 }
1701 r = -EOPNOTSUPP; /* mask as unconfigured device */
1702 } else if (r >= 0 && io_weight != bfq_weight)
1cf4a685
MK
1703 log_unit_debug(u, "%s=%" PRIu64 " scaled to %s=%" PRIu64,
1704 prop_names[2*(major(dev) > 0) + streq(controller, "blkio")],
bec17e80 1705 io_weight, p, bfq_weight);
3e6eafdd 1706 return r;
bec17e80
MK
1707}
1708
f29ff115 1709static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
1710 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1711 dev_t dev;
3e6eafdd 1712 int r, r1, r2;
64faf04c 1713
9cc54544
LP
1714 assert(u);
1715
1716 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1717 if (!crt || !crt->cgroup_path)
1718 return;
1719
3e6eafdd 1720 if (lookup_block_device(dev_path, &dev) < 0)
64faf04c
TH
1721 return;
1722
3e6eafdd 1723 r1 = set_bfq_weight(u, "io", dev, io_weight);
9f0c0c4e 1724
ec61371f 1725 xsprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), io_weight);
9cc54544 1726 r2 = cg_set_attribute("io", crt->cgroup_path, "io.weight", buf);
3e6eafdd
MK
1727
1728 /* Look at the configured device, when both fail, prefer io.weight errno. */
1729 r = r2 == -EOPNOTSUPP ? r1 : r2;
1730
1731 if (r < 0)
1732 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r),
1733 r, "Failed to set 'io[.bfq].weight' attribute on '%s' to '%.*s': %m",
9cc54544 1734 empty_to_root(crt->cgroup_path), (int) strcspn(buf, NEWLINE), buf);
64faf04c
TH
1735}
1736
f29ff115 1737static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
1738 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1739 dev_t dev;
1740 int r;
1741
1742 r = lookup_block_device(dev_path, &dev);
1743 if (r < 0)
1744 return;
1745
ec61371f 1746 xsprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), blkio_weight);
293d32df 1747 (void) set_attribute_and_warn(u, "blkio", "blkio.weight_device", buf);
64faf04c
TH
1748}
1749
6ae4283c
TH
1750static void cgroup_apply_io_device_latency(Unit *u, const char *dev_path, usec_t target) {
1751 char buf[DECIMAL_STR_MAX(dev_t)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
1752 dev_t dev;
1753 int r;
1754
1755 r = lookup_block_device(dev_path, &dev);
1756 if (r < 0)
1757 return;
1758
1759 if (target != USEC_INFINITY)
ec61371f 1760 xsprintf(buf, DEVNUM_FORMAT_STR " target=%" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), target);
6ae4283c 1761 else
ec61371f 1762 xsprintf(buf, DEVNUM_FORMAT_STR " target=max\n", DEVNUM_FORMAT_VAL(dev));
6ae4283c 1763
293d32df 1764 (void) set_attribute_and_warn(u, "io", "io.latency", buf);
6ae4283c
TH
1765}
1766
17ae2780 1767static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
4c1f9343
ZJS
1768 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)],
1769 buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
64faf04c 1770 dev_t dev;
64faf04c 1771
4c1f9343 1772 if (lookup_block_device(dev_path, &dev) < 0)
17ae2780 1773 return;
64faf04c 1774
4c1f9343 1775 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
17ae2780 1776 if (limits[type] != cgroup_io_limit_defaults[type])
64faf04c 1777 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
17ae2780 1778 else
64faf04c 1779 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
64faf04c 1780
ec61371f 1781 xsprintf(buf, DEVNUM_FORMAT_STR " rbps=%s wbps=%s riops=%s wiops=%s\n", DEVNUM_FORMAT_VAL(dev),
64faf04c
TH
1782 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
1783 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
293d32df 1784 (void) set_attribute_and_warn(u, "io", "io.max", buf);
64faf04c
TH
1785}
1786
17ae2780 1787static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
1788 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
1789 dev_t dev;
64faf04c 1790
4c1f9343 1791 if (lookup_block_device(dev_path, &dev) < 0)
17ae2780 1792 return;
64faf04c 1793
ec61371f 1794 sprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), rbps);
293d32df 1795 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.read_bps_device", buf);
64faf04c 1796
ec61371f 1797 sprintf(buf, DEVNUM_FORMAT_STR " %" PRIu64 "\n", DEVNUM_FORMAT_VAL(dev), wbps);
293d32df 1798 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.write_bps_device", buf);
64faf04c
TH
1799}
1800
c52db42b
CD
1801static bool unit_has_unified_memory_config(Unit *u) {
1802 CGroupContext *c;
1803
1804 assert(u);
1805
806a9362 1806 assert_se(c = unit_get_cgroup_context(u));
c52db42b 1807
53fda560
LB
1808 return unit_get_ancestor_memory_min(u) > 0 ||
1809 unit_get_ancestor_memory_low(u) > 0 || unit_get_ancestor_startup_memory_low(u) > 0 ||
1810 c->memory_high != CGROUP_LIMIT_MAX || c->startup_memory_high_set ||
1811 c->memory_max != CGROUP_LIMIT_MAX || c->startup_memory_max_set ||
1812 c->memory_swap_max != CGROUP_LIMIT_MAX || c->startup_memory_swap_max_set ||
1813 c->memory_zswap_max != CGROUP_LIMIT_MAX || c->startup_memory_zswap_max_set;
da4d897e
TH
1814}
1815
f29ff115 1816static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
589a5f7a 1817 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
da4d897e
TH
1818
1819 if (v != CGROUP_LIMIT_MAX)
1820 xsprintf(buf, "%" PRIu64 "\n", v);
1821
293d32df 1822 (void) set_attribute_and_warn(u, "memory", file, buf);
da4d897e
TH
1823}
1824
0f2d84d2 1825static void cgroup_apply_firewall(Unit *u) {
0f2d84d2
LP
1826 assert(u);
1827
acf7f253 1828 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
906c06f6 1829
acf7f253 1830 if (bpf_firewall_compile(u) < 0)
906c06f6
DM
1831 return;
1832
fab34748 1833 (void) bpf_firewall_load_custom(u);
906c06f6 1834 (void) bpf_firewall_install(u);
906c06f6
DM
1835}
1836
49b6babb 1837void unit_modify_nft_set(Unit *u, bool add) {
dc7d69b3 1838 int r;
dc7d69b3
TM
1839
1840 assert(u);
1841
1842 if (!MANAGER_IS_SYSTEM(u->manager))
1843 return;
1844
49b6babb
LP
1845 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1846 return;
1847
dc7d69b3
TM
1848 if (cg_all_unified() <= 0)
1849 return;
1850
9cc54544
LP
1851 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1852 if (!crt || crt->cgroup_id == 0)
dc7d69b3
TM
1853 return;
1854
1855 if (!u->manager->fw_ctx) {
1856 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
1857 if (r < 0)
1858 return;
1859
1860 assert(u->manager->fw_ctx);
1861 }
1862
49b6babb
LP
1863 CGroupContext *c = ASSERT_PTR(unit_get_cgroup_context(u));
1864
dc7d69b3 1865 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
b2082753
TM
1866 if (nft_set->source != NFT_SET_SOURCE_CGROUP)
1867 continue;
1868
9cc54544 1869 uint64_t element = crt->cgroup_id;
dc7d69b3
TM
1870
1871 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
1872 if (r < 0)
1873 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, cgroup %" PRIu64 ", ignoring: %m",
9cc54544 1874 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, crt->cgroup_id);
dc7d69b3
TM
1875 else
1876 log_debug("%s NFT set: family %s, table %s, set %s, cgroup %" PRIu64,
9cc54544 1877 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, crt->cgroup_id);
dc7d69b3
TM
1878 }
1879}
1880
a8e5eb17
JK
1881static void cgroup_apply_socket_bind(Unit *u) {
1882 assert(u);
1883
cd09a5f3 1884 (void) bpf_socket_bind_install(u);
a8e5eb17
JK
1885}
1886
6f50d4f7
MV
1887static void cgroup_apply_restrict_network_interfaces(Unit *u) {
1888 assert(u);
1889
62e22490 1890 (void) bpf_restrict_ifaces_install(u);
6f50d4f7
MV
1891}
1892
8b139557 1893static int cgroup_apply_devices(Unit *u) {
76dc1725 1894 _cleanup_(bpf_program_freep) BPFProgram *prog = NULL;
8b139557 1895 CGroupContext *c;
45669ae2 1896 CGroupDevicePolicy policy;
8b139557
ZJS
1897 int r;
1898
1899 assert_se(c = unit_get_cgroup_context(u));
9cc54544
LP
1900
1901 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
1902 if (!crt || !crt->cgroup_path)
1903 return -EOWNERDEAD;
8b139557 1904
45669ae2
ZJS
1905 policy = c->device_policy;
1906
8b139557 1907 if (cg_all_unified() > 0) {
45669ae2 1908 r = bpf_devices_cgroup_init(&prog, policy, c->device_allow);
8b139557
ZJS
1909 if (r < 0)
1910 return log_unit_warning_errno(u, r, "Failed to initialize device control bpf program: %m");
1911
1912 } else {
1913 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore
1914 * EINVAL here. */
1915
45669ae2 1916 if (c->device_allow || policy != CGROUP_DEVICE_POLICY_AUTO)
9cc54544 1917 r = cg_set_attribute("devices", crt->cgroup_path, "devices.deny", "a");
8b139557 1918 else
9cc54544 1919 r = cg_set_attribute("devices", crt->cgroup_path, "devices.allow", "a");
8b139557 1920 if (r < 0)
8ed6f81b
YW
1921 log_unit_full_errno(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING, r,
1922 "Failed to reset devices.allow/devices.deny: %m");
8b139557
ZJS
1923 }
1924
6b000af4 1925 bool allow_list_static = policy == CGROUP_DEVICE_POLICY_CLOSED ||
45669ae2 1926 (policy == CGROUP_DEVICE_POLICY_AUTO && c->device_allow);
8b139557 1927
958b73be
LP
1928 bool any = false;
1929 if (allow_list_static) {
9cc54544 1930 r = bpf_devices_allow_list_static(prog, crt->cgroup_path);
958b73be
LP
1931 if (r > 0)
1932 any = true;
1933 }
1934
8b139557 1935 LIST_FOREACH(device_allow, a, c->device_allow) {
a1044811
LP
1936 const char *val;
1937
1938 if (a->permissions == 0)
8b139557 1939 continue;
8b139557
ZJS
1940
1941 if (path_startswith(a->path, "/dev/"))
9cc54544 1942 r = bpf_devices_allow_list_device(prog, crt->cgroup_path, a->path, a->permissions);
8b139557 1943 else if ((val = startswith(a->path, "block-")))
9cc54544 1944 r = bpf_devices_allow_list_major(prog, crt->cgroup_path, val, 'b', a->permissions);
8b139557 1945 else if ((val = startswith(a->path, "char-")))
9cc54544 1946 r = bpf_devices_allow_list_major(prog, crt->cgroup_path, val, 'c', a->permissions);
45669ae2 1947 else {
8b139557 1948 log_unit_debug(u, "Ignoring device '%s' while writing cgroup attribute.", a->path);
45669ae2
ZJS
1949 continue;
1950 }
1951
958b73be 1952 if (r > 0)
45669ae2
ZJS
1953 any = true;
1954 }
1955
1956 if (prog && !any) {
4e494e6a 1957 log_unit_warning(u, "No devices matched by device filter.");
45669ae2
ZJS
1958
1959 /* The kernel verifier would reject a program we would build with the normal intro and outro
6b000af4 1960 but no allow-listing rules (outro would contain an unreachable instruction for successful
45669ae2
ZJS
1961 return). */
1962 policy = CGROUP_DEVICE_POLICY_STRICT;
8b139557
ZJS
1963 }
1964
9cc54544 1965 r = bpf_devices_apply_policy(&prog, policy, any, crt->cgroup_path, &crt->bpf_device_control_installed);
8b139557
ZJS
1966 if (r < 0) {
1967 static bool warned = false;
1968
1969 log_full_errno(warned ? LOG_DEBUG : LOG_WARNING, r,
1970 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1971 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1972 "(This warning is only shown for the first loaded unit using device ACL.)", u->id);
1973
1974 warned = true;
1975 }
1976 return r;
1977}
1978
17283ce7
YW
1979static void set_io_weight(Unit *u, uint64_t weight) {
1980 char buf[STRLEN("default \n")+DECIMAL_STR_MAX(uint64_t)];
17283ce7
YW
1981
1982 assert(u);
29eb0eef 1983
3e6eafdd 1984 (void) set_bfq_weight(u, "io", makedev(0, 0), weight);
29eb0eef 1985
29eb0eef 1986 xsprintf(buf, "default %" PRIu64 "\n", weight);
17283ce7
YW
1987 (void) set_attribute_and_warn(u, "io", "io.weight", buf);
1988}
1989
1990static void set_blkio_weight(Unit *u, uint64_t weight) {
1991 char buf[STRLEN("\n")+DECIMAL_STR_MAX(uint64_t)];
17283ce7
YW
1992
1993 assert(u);
29eb0eef 1994
3e6eafdd 1995 (void) set_bfq_weight(u, "blkio", makedev(0, 0), weight);
17283ce7
YW
1996
1997 xsprintf(buf, "%" PRIu64 "\n", weight);
1998 (void) set_attribute_and_warn(u, "blkio", "blkio.weight", buf);
29eb0eef
ZJS
1999}
2000
506ea51b
JK
2001static void cgroup_apply_bpf_foreign_program(Unit *u) {
2002 assert(u);
2003
2004 (void) bpf_foreign_install(u);
2005}
2006
906c06f6
DM
2007static void cgroup_context_apply(
2008 Unit *u,
2009 CGroupMask apply_mask,
906c06f6
DM
2010 ManagerState state) {
2011
9cc54544 2012 bool is_host_root, is_local_root;
f29ff115
TH
2013 const char *path;
2014 CGroupContext *c;
4ad49000
LP
2015 int r;
2016
f29ff115
TH
2017 assert(u);
2018
906c06f6 2019 /* Nothing to do? Exit early! */
17f14955 2020 if (apply_mask == 0)
4ad49000 2021 return;
8e274523 2022
52fecf20
LP
2023 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
2024 * attributes should only be managed for cgroups further down the tree. */
2025 is_local_root = unit_has_name(u, SPECIAL_ROOT_SLICE);
2026 is_host_root = unit_has_host_root_cgroup(u);
f3725e64
LP
2027
2028 assert_se(c = unit_get_cgroup_context(u));
9cc54544
LP
2029
2030 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2031 if (!crt || !crt->cgroup_path)
2032 return;
2033
2034 path = crt->cgroup_path;
f3725e64 2035
52fecf20 2036 if (is_local_root) /* Make sure we don't try to display messages with an empty path. */
6da13913 2037 path = "/";
01efdf13 2038
be2c0327
LP
2039 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
2040 * then), and missing cgroups, i.e. EROFS and ENOENT. */
714e2e1d 2041
be2c0327
LP
2042 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
2043 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
2044 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
4e1dfa45 2045 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
be2c0327
LP
2046 * we couldn't even write to them if we wanted to). */
2047 if ((apply_mask & CGROUP_MASK_CPU) && !is_local_root) {
8e274523 2048
b4cccbc1 2049 if (cg_all_unified() > 0) {
be2c0327 2050 uint64_t weight;
b2f8b02e 2051
be2c0327
LP
2052 if (cgroup_context_has_cpu_weight(c))
2053 weight = cgroup_context_cpu_weight(c, state);
2054 else if (cgroup_context_has_cpu_shares(c)) {
2055 uint64_t shares;
66ebf6c0 2056
be2c0327
LP
2057 shares = cgroup_context_cpu_shares(c, state);
2058 weight = cgroup_cpu_shares_to_weight(shares);
66ebf6c0 2059
be2c0327
LP
2060 log_cgroup_compat(u, "Applying [Startup]CPUShares=%" PRIu64 " as [Startup]CPUWeight=%" PRIu64 " on %s",
2061 shares, weight, path);
2062 } else
2063 weight = CGROUP_WEIGHT_DEFAULT;
66ebf6c0 2064
c8340822 2065 cgroup_apply_unified_cpu_idle(u, weight);
be2c0327 2066 cgroup_apply_unified_cpu_weight(u, weight);
10f28641 2067 cgroup_apply_unified_cpu_quota(u, c->cpu_quota_per_sec_usec, c->cpu_quota_period_usec);
66ebf6c0 2068
52fecf20 2069 } else {
be2c0327 2070 uint64_t shares;
52fecf20 2071
be2c0327
LP
2072 if (cgroup_context_has_cpu_weight(c)) {
2073 uint64_t weight;
52fecf20 2074
be2c0327
LP
2075 weight = cgroup_context_cpu_weight(c, state);
2076 shares = cgroup_cpu_weight_to_shares(weight);
52fecf20 2077
be2c0327
LP
2078 log_cgroup_compat(u, "Applying [Startup]CPUWeight=%" PRIu64 " as [Startup]CPUShares=%" PRIu64 " on %s",
2079 weight, shares, path);
2080 } else if (cgroup_context_has_cpu_shares(c))
2081 shares = cgroup_context_cpu_shares(c, state);
2082 else
2083 shares = CGROUP_CPU_SHARES_DEFAULT;
66ebf6c0 2084
be2c0327 2085 cgroup_apply_legacy_cpu_shares(u, shares);
10f28641 2086 cgroup_apply_legacy_cpu_quota(u, c->cpu_quota_per_sec_usec, c->cpu_quota_period_usec);
66ebf6c0 2087 }
4ad49000
LP
2088 }
2089
047f5d63 2090 if ((apply_mask & CGROUP_MASK_CPUSET) && !is_local_root) {
31d3a520
PM
2091 cgroup_apply_unified_cpuset(u, cgroup_context_allowed_cpus(c, state), "cpuset.cpus");
2092 cgroup_apply_unified_cpuset(u, cgroup_context_allowed_mems(c, state), "cpuset.mems");
047f5d63
PH
2093 }
2094
4e1dfa45 2095 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
52fecf20
LP
2096 * controller), and in case of containers we want to leave control of these attributes to the container manager
2097 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
2098 if ((apply_mask & CGROUP_MASK_IO) && !is_local_root) {
52fecf20
LP
2099 bool has_io, has_blockio;
2100 uint64_t weight;
13c31542 2101
52fecf20
LP
2102 has_io = cgroup_context_has_io_config(c);
2103 has_blockio = cgroup_context_has_blockio_config(c);
13c31542 2104
52fecf20
LP
2105 if (has_io)
2106 weight = cgroup_context_io_weight(c, state);
2107 else if (has_blockio) {
2108 uint64_t blkio_weight;
128fadc9 2109
52fecf20
LP
2110 blkio_weight = cgroup_context_blkio_weight(c, state);
2111 weight = cgroup_weight_blkio_to_io(blkio_weight);
128fadc9 2112
67e2ea15 2113 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight=%" PRIu64 " as [Startup]IOWeight=%" PRIu64,
52fecf20
LP
2114 blkio_weight, weight);
2115 } else
2116 weight = CGROUP_WEIGHT_DEFAULT;
13c31542 2117
17283ce7 2118 set_io_weight(u, weight);
2dbc45ae 2119
52fecf20 2120 if (has_io) {
52fecf20
LP
2121 LIST_FOREACH(device_weights, w, c->io_device_weights)
2122 cgroup_apply_io_device_weight(u, w->path, w->weight);
128fadc9 2123
52fecf20
LP
2124 LIST_FOREACH(device_limits, limit, c->io_device_limits)
2125 cgroup_apply_io_device_limit(u, limit->path, limit->limits);
6ae4283c 2126
52fecf20
LP
2127 LIST_FOREACH(device_latencies, latency, c->io_device_latencies)
2128 cgroup_apply_io_device_latency(u, latency->path, latency->target_usec);
6ae4283c 2129
52fecf20 2130 } else if (has_blockio) {
52fecf20
LP
2131 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
2132 weight = cgroup_weight_blkio_to_io(w->weight);
17ae2780 2133
67e2ea15 2134 log_cgroup_compat(u, "Applying BlockIODeviceWeight=%" PRIu64 " as IODeviceWeight=%" PRIu64 " for %s",
52fecf20 2135 w->weight, weight, w->path);
538b4852 2136
52fecf20
LP
2137 cgroup_apply_io_device_weight(u, w->path, weight);
2138 }
538b4852 2139
17ae2780 2140 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
538b4852 2141 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
538b4852 2142
e8616626 2143 for (CGroupIOLimitType type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
538b4852
TH
2144 limits[type] = cgroup_io_limit_defaults[type];
2145
2146 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
2147 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
2148
67e2ea15 2149 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax= for %s",
128fadc9
TH
2150 b->rbps, b->wbps, b->path);
2151
17ae2780 2152 cgroup_apply_io_device_limit(u, b->path, limits);
538b4852 2153 }
13c31542
TH
2154 }
2155 }
2156
906c06f6 2157 if (apply_mask & CGROUP_MASK_BLKIO) {
52fecf20 2158 bool has_io, has_blockio;
4ad49000 2159
52fecf20
LP
2160 has_io = cgroup_context_has_io_config(c);
2161 has_blockio = cgroup_context_has_blockio_config(c);
2162
2163 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
2164 * left to our container manager, too. */
2165 if (!is_local_root) {
64faf04c 2166 uint64_t weight;
64faf04c 2167
7d862ab8 2168 if (has_io) {
52fecf20 2169 uint64_t io_weight;
128fadc9 2170
52fecf20 2171 io_weight = cgroup_context_io_weight(c, state);
538b4852 2172 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9 2173
67e2ea15 2174 log_cgroup_compat(u, "Applying [Startup]IOWeight=%" PRIu64 " as [Startup]BlockIOWeight=%" PRIu64,
128fadc9 2175 io_weight, weight);
7d862ab8
TH
2176 } else if (has_blockio)
2177 weight = cgroup_context_blkio_weight(c, state);
2178 else
538b4852 2179 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c 2180
17283ce7 2181 set_blkio_weight(u, weight);
35e7a62c 2182
03677889 2183 if (has_io)
128fadc9
TH
2184 LIST_FOREACH(device_weights, w, c->io_device_weights) {
2185 weight = cgroup_weight_io_to_blkio(w->weight);
2186
67e2ea15 2187 log_cgroup_compat(u, "Applying IODeviceWeight=%" PRIu64 " as BlockIODeviceWeight=%" PRIu64 " for %s",
128fadc9
TH
2188 w->weight, weight, w->path);
2189
2190 cgroup_apply_blkio_device_weight(u, w->path, weight);
2191 }
03677889 2192 else if (has_blockio)
7d862ab8
TH
2193 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
2194 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
4ad49000
LP
2195 }
2196
5238e957 2197 /* The bandwidth limits are something that make sense to be applied to the host's root but not container
52fecf20
LP
2198 * roots, as there we want the container manager to handle it */
2199 if (is_host_root || !is_local_root) {
03677889 2200 if (has_io)
52fecf20 2201 LIST_FOREACH(device_limits, l, c->io_device_limits) {
67e2ea15 2202 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth=%" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax= for %s",
52fecf20 2203 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
128fadc9 2204
52fecf20
LP
2205 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
2206 }
03677889 2207 else if (has_blockio)
52fecf20
LP
2208 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
2209 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
d686d8a9 2210 }
8e274523
LP
2211 }
2212
be2c0327
LP
2213 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
2214 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
4e1dfa45 2215 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
be2c0327
LP
2216 * write to this if we wanted to.) */
2217 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_local_root) {
efdb0237 2218
52fecf20 2219 if (cg_all_unified() > 0) {
53fda560 2220 uint64_t max, swap_max = CGROUP_LIMIT_MAX, zswap_max = CGROUP_LIMIT_MAX, high = CGROUP_LIMIT_MAX;
be2c0327 2221
c52db42b 2222 if (unit_has_unified_memory_config(u)) {
53fda560
LB
2223 bool startup = IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING, MANAGER_STOPPING);
2224
2225 high = startup && c->startup_memory_high_set ? c->startup_memory_high : c->memory_high;
2226 max = startup && c->startup_memory_max_set ? c->startup_memory_max : c->memory_max;
2227 swap_max = startup && c->startup_memory_swap_max_set ? c->startup_memory_swap_max : c->memory_swap_max;
2228 zswap_max = startup && c->startup_memory_zswap_max_set ? c->startup_memory_zswap_max : c->memory_zswap_max;
be2c0327
LP
2229 } else {
2230 max = c->memory_limit;
efdb0237 2231
be2c0327
LP
2232 if (max != CGROUP_LIMIT_MAX)
2233 log_cgroup_compat(u, "Applying MemoryLimit=%" PRIu64 " as MemoryMax=", max);
128fadc9 2234 }
da4d897e 2235
64fe532e 2236 cgroup_apply_unified_memory_limit(u, "memory.min", unit_get_ancestor_memory_min(u));
c52db42b 2237 cgroup_apply_unified_memory_limit(u, "memory.low", unit_get_ancestor_memory_low(u));
53fda560 2238 cgroup_apply_unified_memory_limit(u, "memory.high", high);
be2c0327
LP
2239 cgroup_apply_unified_memory_limit(u, "memory.max", max);
2240 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
d7fe0a67 2241 cgroup_apply_unified_memory_limit(u, "memory.zswap.max", zswap_max);
128fadc9 2242
afcfaa69 2243 (void) set_attribute_and_warn(u, "memory", "memory.oom.group", one_zero(c->memory_oom_group));
1ea275f1 2244 (void) set_attribute_and_warn(u, "memory", "memory.zswap.writeback", one_zero(c->memory_zswap_writeback));
afcfaa69 2245
be2c0327
LP
2246 } else {
2247 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
2248 uint64_t val;
52fecf20 2249
c52db42b 2250 if (unit_has_unified_memory_config(u)) {
be2c0327 2251 val = c->memory_max;
b7a41491
MK
2252 if (val != CGROUP_LIMIT_MAX)
2253 log_cgroup_compat(u, "Applying MemoryMax=%" PRIu64 " as MemoryLimit=", val);
be2c0327
LP
2254 } else
2255 val = c->memory_limit;
78a4ee59 2256
be2c0327
LP
2257 if (val == CGROUP_LIMIT_MAX)
2258 strncpy(buf, "-1\n", sizeof(buf));
2259 else
2260 xsprintf(buf, "%" PRIu64 "\n", val);
2261
2262 (void) set_attribute_and_warn(u, "memory", "memory.limit_in_bytes", buf);
da4d897e 2263 }
4ad49000 2264 }
8e274523 2265
4e1dfa45 2266 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
52fecf20
LP
2267 * containers, where we leave this to the manager */
2268 if ((apply_mask & (CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES)) &&
8b139557
ZJS
2269 (is_host_root || cg_all_unified() > 0 || !is_local_root))
2270 (void) cgroup_apply_devices(u);
03a7b521 2271
00b5974f
LP
2272 if (apply_mask & CGROUP_MASK_PIDS) {
2273
52fecf20 2274 if (is_host_root) {
00b5974f
LP
2275 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
2276 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
2277 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
2278 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
2279 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
2280 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
2281 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
2282 * it also counts. But if the user never set a limit through us (i.e. we are the default of
2283 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
2284 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
5238e957 2285 * which is desirable so that there's an official way to release control of the sysctl from
00b5974f
LP
2286 * systemd: set the limit to unbounded and reload. */
2287
94f0b13b 2288 if (cgroup_tasks_max_isset(&c->tasks_max)) {
00b5974f 2289 u->manager->sysctl_pid_max_changed = true;
94f0b13b 2290 r = procfs_tasks_set_limit(cgroup_tasks_max_resolve(&c->tasks_max));
00b5974f
LP
2291 } else if (u->manager->sysctl_pid_max_changed)
2292 r = procfs_tasks_set_limit(TASKS_MAX);
2293 else
2294 r = 0;
00b5974f 2295 if (r < 0)
8ed6f81b
YW
2296 log_unit_full_errno(u, LOG_LEVEL_CGROUP_WRITE(r), r,
2297 "Failed to write to tasks limit sysctls: %m");
52fecf20 2298 }
03a7b521 2299
52fecf20
LP
2300 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
2301 * leave it for the container manager. */
2302 if (!is_local_root) {
94f0b13b 2303 if (cgroup_tasks_max_isset(&c->tasks_max)) {
3a0f06c4 2304 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
03a7b521 2305
94f0b13b 2306 xsprintf(buf, "%" PRIu64 "\n", cgroup_tasks_max_resolve(&c->tasks_max));
293d32df 2307 (void) set_attribute_and_warn(u, "pids", "pids.max", buf);
00b5974f 2308 } else
589a5f7a 2309 (void) set_attribute_and_warn(u, "pids", "pids.max", "max\n");
00b5974f 2310 }
03a7b521 2311 }
906c06f6 2312
17f14955 2313 if (apply_mask & CGROUP_MASK_BPF_FIREWALL)
0f2d84d2 2314 cgroup_apply_firewall(u);
506ea51b
JK
2315
2316 if (apply_mask & CGROUP_MASK_BPF_FOREIGN)
2317 cgroup_apply_bpf_foreign_program(u);
a8e5eb17
JK
2318
2319 if (apply_mask & CGROUP_MASK_BPF_SOCKET_BIND)
2320 cgroup_apply_socket_bind(u);
6f50d4f7
MV
2321
2322 if (apply_mask & CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES)
2323 cgroup_apply_restrict_network_interfaces(u);
dc7d69b3 2324
49b6babb 2325 unit_modify_nft_set(u, /* add = */ true);
fb385181
LP
2326}
2327
16492445
LP
2328static bool unit_get_needs_bpf_firewall(Unit *u) {
2329 CGroupContext *c;
16492445
LP
2330 assert(u);
2331
2332 c = unit_get_cgroup_context(u);
2333 if (!c)
2334 return false;
2335
2336 if (c->ip_accounting ||
84ebe6f0
YW
2337 !set_isempty(c->ip_address_allow) ||
2338 !set_isempty(c->ip_address_deny) ||
fab34748
KL
2339 c->ip_filters_ingress ||
2340 c->ip_filters_egress)
16492445
LP
2341 return true;
2342
2343 /* If any parent slice has an IP access list defined, it applies too */
e8616626 2344 for (Unit *p = UNIT_GET_SLICE(u); p; p = UNIT_GET_SLICE(p)) {
16492445
LP
2345 c = unit_get_cgroup_context(p);
2346 if (!c)
2347 return false;
2348
84ebe6f0
YW
2349 if (!set_isempty(c->ip_address_allow) ||
2350 !set_isempty(c->ip_address_deny))
16492445
LP
2351 return true;
2352 }
2353
2354 return false;
2355}
2356
506ea51b
JK
2357static bool unit_get_needs_bpf_foreign_program(Unit *u) {
2358 CGroupContext *c;
2359 assert(u);
2360
2361 c = unit_get_cgroup_context(u);
2362 if (!c)
2363 return false;
2364
64903d18 2365 return !!c->bpf_foreign_programs;
506ea51b
JK
2366}
2367
a8e5eb17
JK
2368static bool unit_get_needs_socket_bind(Unit *u) {
2369 CGroupContext *c;
2370 assert(u);
2371
2372 c = unit_get_cgroup_context(u);
2373 if (!c)
2374 return false;
2375
11ab01e4 2376 return c->socket_bind_allow || c->socket_bind_deny;
a8e5eb17
JK
2377}
2378
6f50d4f7
MV
2379static bool unit_get_needs_restrict_network_interfaces(Unit *u) {
2380 CGroupContext *c;
2381 assert(u);
2382
2383 c = unit_get_cgroup_context(u);
2384 if (!c)
2385 return false;
2386
2387 return !set_isempty(c->restrict_network_interfaces);
2388}
2389
c52db42b 2390static CGroupMask unit_get_cgroup_mask(Unit *u) {
efdb0237 2391 CGroupMask mask = 0;
c52db42b
CD
2392 CGroupContext *c;
2393
2394 assert(u);
2395
806a9362 2396 assert_se(c = unit_get_cgroup_context(u));
c710d3b4 2397
fae9bc29 2398 /* Figure out which controllers we need, based on the cgroup context object */
8e274523 2399
fae9bc29 2400 if (c->cpu_accounting)
f98c2585 2401 mask |= get_cpu_accounting_mask();
fae9bc29
LP
2402
2403 if (cgroup_context_has_cpu_weight(c) ||
66ebf6c0 2404 cgroup_context_has_cpu_shares(c) ||
3a43da28 2405 c->cpu_quota_per_sec_usec != USEC_INFINITY)
fae9bc29 2406 mask |= CGROUP_MASK_CPU;
ecedd90f 2407
31d3a520 2408 if (cgroup_context_has_allowed_cpus(c) || cgroup_context_has_allowed_mems(c))
047f5d63
PH
2409 mask |= CGROUP_MASK_CPUSET;
2410
538b4852
TH
2411 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
2412 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 2413
4ad49000 2414 if (c->memory_accounting ||
da4d897e 2415 c->memory_limit != CGROUP_LIMIT_MAX ||
c52db42b 2416 unit_has_unified_memory_config(u))
efdb0237 2417 mask |= CGROUP_MASK_MEMORY;
8e274523 2418
a931ad47 2419 if (c->device_allow ||
084870f9 2420 c->device_policy != CGROUP_DEVICE_POLICY_AUTO)
084c7007 2421 mask |= CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES;
4ad49000 2422
03a7b521 2423 if (c->tasks_accounting ||
94f0b13b 2424 cgroup_tasks_max_isset(&c->tasks_max))
03a7b521
LP
2425 mask |= CGROUP_MASK_PIDS;
2426
fae9bc29 2427 return CGROUP_MASK_EXTEND_JOINED(mask);
8e274523
LP
2428}
2429
53aea74a 2430static CGroupMask unit_get_bpf_mask(Unit *u) {
17f14955
RG
2431 CGroupMask mask = 0;
2432
fae9bc29
LP
2433 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
2434 * too. */
2435
17f14955
RG
2436 if (unit_get_needs_bpf_firewall(u))
2437 mask |= CGROUP_MASK_BPF_FIREWALL;
2438
506ea51b
JK
2439 if (unit_get_needs_bpf_foreign_program(u))
2440 mask |= CGROUP_MASK_BPF_FOREIGN;
2441
a8e5eb17
JK
2442 if (unit_get_needs_socket_bind(u))
2443 mask |= CGROUP_MASK_BPF_SOCKET_BIND;
2444
6f50d4f7
MV
2445 if (unit_get_needs_restrict_network_interfaces(u))
2446 mask |= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES;
2447
17f14955
RG
2448 return mask;
2449}
2450
efdb0237 2451CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 2452 CGroupContext *c;
8e274523 2453
442ce775
LP
2454 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
2455 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
2456
2457 if (u->load_state != UNIT_LOADED)
2458 return 0;
efdb0237 2459
4ad49000
LP
2460 c = unit_get_cgroup_context(u);
2461 if (!c)
2462 return 0;
8e274523 2463
12b975e0 2464 return unit_get_cgroup_mask(u) | unit_get_bpf_mask(u) | unit_get_delegate_mask(u);
02638280
LP
2465}
2466
2467CGroupMask unit_get_delegate_mask(Unit *u) {
2468 CGroupContext *c;
2469
2470 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
2471 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
19af675e 2472 *
02638280 2473 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
a931ad47 2474
1d9cc876 2475 if (!unit_cgroup_delegate(u))
02638280
LP
2476 return 0;
2477
2478 if (cg_all_unified() <= 0) {
a931ad47
LP
2479 ExecContext *e;
2480
2481 e = unit_get_exec_context(u);
02638280
LP
2482 if (e && !exec_context_maintains_privileges(e))
2483 return 0;
a931ad47
LP
2484 }
2485
1d9cc876 2486 assert_se(c = unit_get_cgroup_context(u));
fae9bc29 2487 return CGROUP_MASK_EXTEND_JOINED(c->delegate_controllers);
8e274523
LP
2488}
2489
d9ef5944
MK
2490static CGroupMask unit_get_subtree_mask(Unit *u) {
2491
2492 /* Returns the mask of this subtree, meaning of the group
2493 * itself and its children. */
2494
2495 return unit_get_own_mask(u) | unit_get_members_mask(u);
2496}
2497
efdb0237 2498CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 2499 assert(u);
bc432dc7 2500
02638280 2501 /* Returns the mask of controllers all of the unit's children require, merged */
efdb0237 2502
9cc54544
LP
2503 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2504 if (crt && crt->cgroup_members_mask_valid)
2505 return crt->cgroup_members_mask; /* Use cached value if possible */
bc432dc7 2506
9cc54544 2507 CGroupMask m = 0;
bc432dc7
LP
2508 if (u->type == UNIT_SLICE) {
2509 Unit *member;
bc432dc7 2510
d219a2b0 2511 UNIT_FOREACH_DEPENDENCY(member, u, UNIT_ATOM_SLICE_OF)
9cc54544 2512 m |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
bc432dc7
LP
2513 }
2514
9cc54544
LP
2515 if (crt) {
2516 crt->cgroup_members_mask = m;
2517 crt->cgroup_members_mask_valid = true;
2518 }
2519
2520 return m;
246aa6dd
LP
2521}
2522
efdb0237 2523CGroupMask unit_get_siblings_mask(Unit *u) {
12f64221 2524 Unit *slice;
4ad49000 2525 assert(u);
246aa6dd 2526
efdb0237
LP
2527 /* Returns the mask of controllers all of the unit's siblings
2528 * require, i.e. the members mask of the unit's parent slice
2529 * if there is one. */
2530
12f64221
LP
2531 slice = UNIT_GET_SLICE(u);
2532 if (slice)
2533 return unit_get_members_mask(slice);
4ad49000 2534
64e844e5 2535 return unit_get_subtree_mask(u); /* we are the top-level slice */
246aa6dd
LP
2536}
2537
d9ef5944 2538static CGroupMask unit_get_disable_mask(Unit *u) {
4f6f62e4
CD
2539 CGroupContext *c;
2540
2541 c = unit_get_cgroup_context(u);
2542 if (!c)
2543 return 0;
2544
2545 return c->disable_controllers;
2546}
2547
2548CGroupMask unit_get_ancestor_disable_mask(Unit *u) {
2549 CGroupMask mask;
12f64221 2550 Unit *slice;
4f6f62e4
CD
2551
2552 assert(u);
2553 mask = unit_get_disable_mask(u);
2554
2555 /* Returns the mask of controllers which are marked as forcibly
2556 * disabled in any ancestor unit or the unit in question. */
2557
12f64221
LP
2558 slice = UNIT_GET_SLICE(u);
2559 if (slice)
2560 mask |= unit_get_ancestor_disable_mask(slice);
4f6f62e4
CD
2561
2562 return mask;
2563}
2564
efdb0237 2565CGroupMask unit_get_target_mask(Unit *u) {
a437c5e4 2566 CGroupMask own_mask, mask;
efdb0237 2567
a437c5e4
LP
2568 /* This returns the cgroup mask of all controllers to enable for a specific cgroup, i.e. everything
2569 * it needs itself, plus all that its children need, plus all that its siblings need. This is
2570 * primarily useful on the legacy cgroup hierarchy, where we need to duplicate each cgroup in each
efdb0237 2571 * hierarchy that shall be enabled for it. */
6414b7c9 2572
a437c5e4 2573 own_mask = unit_get_own_mask(u);
84d2744b 2574
a437c5e4 2575 if (own_mask & CGROUP_MASK_BPF_FIREWALL & ~u->manager->cgroup_supported)
84d2744b
ZJS
2576 emit_bpf_firewall_warning(u);
2577
a437c5e4
LP
2578 mask = own_mask | unit_get_members_mask(u) | unit_get_siblings_mask(u);
2579
efdb0237 2580 mask &= u->manager->cgroup_supported;
c72703e2 2581 mask &= ~unit_get_ancestor_disable_mask(u);
efdb0237
LP
2582
2583 return mask;
2584}
2585
2586CGroupMask unit_get_enable_mask(Unit *u) {
2587 CGroupMask mask;
2588
2589 /* This returns the cgroup mask of all controllers to enable
2590 * for the children of a specific cgroup. This is primarily
2591 * useful for the unified cgroup hierarchy, where each cgroup
2592 * controls which controllers are enabled for its children. */
2593
2594 mask = unit_get_members_mask(u);
6414b7c9 2595 mask &= u->manager->cgroup_supported;
c72703e2 2596 mask &= ~unit_get_ancestor_disable_mask(u);
6414b7c9
DS
2597
2598 return mask;
2599}
2600
5af88058 2601void unit_invalidate_cgroup_members_masks(Unit *u) {
12f64221
LP
2602 Unit *slice;
2603
bc432dc7
LP
2604 assert(u);
2605
9cc54544
LP
2606 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2607 if (!crt)
2608 return;
2609
5af88058 2610 /* Recurse invalidate the member masks cache all the way up the tree */
9cc54544 2611 crt->cgroup_members_mask_valid = false;
bc432dc7 2612
12f64221
LP
2613 slice = UNIT_GET_SLICE(u);
2614 if (slice)
2615 unit_invalidate_cgroup_members_masks(slice);
6414b7c9
DS
2616}
2617
6592b975 2618const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
03b90d4b 2619
6592b975 2620 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
03b90d4b
LP
2621
2622 while (u) {
9cc54544
LP
2623 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2624 if (crt &&
2625 crt->cgroup_path &&
2626 crt->cgroup_realized &&
2627 FLAGS_SET(crt->cgroup_realized_mask, mask))
2628 return crt->cgroup_path;
03b90d4b 2629
12f64221 2630 u = UNIT_GET_SLICE(u);
03b90d4b
LP
2631 }
2632
2633 return NULL;
2634}
2635
6592b975 2636static const char *migrate_callback(CGroupMask mask, void *userdata) {
7b639614
MK
2637 /* If not realized at all, migrate to root ("").
2638 * It may happen if we're upgrading from older version that didn't clean up.
2639 */
2640 return strempty(unit_get_realized_cgroup_path(userdata, mask));
6592b975
LP
2641}
2642
1a56b0c0
LP
2643int unit_default_cgroup_path(const Unit *u, char **ret) {
2644 _cleanup_free_ char *p = NULL;
efdb0237
LP
2645 int r;
2646
2647 assert(u);
1a56b0c0 2648 assert(ret);
efdb0237
LP
2649
2650 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1a56b0c0
LP
2651 p = strdup(u->manager->cgroup_root);
2652 else {
2653 _cleanup_free_ char *escaped = NULL, *slice_path = NULL;
2654 Unit *slice;
efdb0237 2655
1a56b0c0
LP
2656 slice = UNIT_GET_SLICE(u);
2657 if (slice && !unit_has_name(slice, SPECIAL_ROOT_SLICE)) {
2658 r = cg_slice_to_path(slice->id, &slice_path);
2659 if (r < 0)
2660 return r;
2661 }
2662
2663 r = cg_escape(u->id, &escaped);
efdb0237 2664 if (r < 0)
1a56b0c0 2665 return r;
efdb0237 2666
1a56b0c0
LP
2667 p = path_join(empty_to_root(u->manager->cgroup_root), slice_path, escaped);
2668 }
2669 if (!p)
2670 return -ENOMEM;
efdb0237 2671
1a56b0c0
LP
2672 *ret = TAKE_PTR(p);
2673 return 0;
efdb0237
LP
2674}
2675
2676int unit_set_cgroup_path(Unit *u, const char *path) {
2677 _cleanup_free_ char *p = NULL;
9cc54544 2678 CGroupRuntime *crt;
efdb0237
LP
2679 int r;
2680
2681 assert(u);
2682
9cc54544
LP
2683 crt = unit_get_cgroup_runtime(u);
2684
2685 if (crt && streq_ptr(crt->cgroup_path, path))
5210387e
LP
2686 return 0;
2687
9cc54544
LP
2688 unit_release_cgroup(u);
2689
2690 crt = unit_setup_cgroup_runtime(u);
2691 if (!crt)
2692 return -ENOMEM;
2693
efdb0237
LP
2694 if (path) {
2695 p = strdup(path);
2696 if (!p)
2697 return -ENOMEM;
efdb0237 2698
efdb0237
LP
2699 r = hashmap_put(u->manager->cgroup_unit, p, u);
2700 if (r < 0)
2701 return r;
2702 }
2703
9cc54544
LP
2704 assert(!crt->cgroup_path);
2705 crt->cgroup_path = TAKE_PTR(p);
efdb0237
LP
2706
2707 return 1;
2708}
2709
2710int unit_watch_cgroup(Unit *u) {
ab2c3861 2711 _cleanup_free_ char *events = NULL;
efdb0237
LP
2712 int r;
2713
2714 assert(u);
2715
0bb814c2
LP
2716 /* Watches the "cgroups.events" attribute of this unit's cgroup for "empty" events, but only if
2717 * cgroupv2 is available. */
2718
9cc54544
LP
2719 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2720 if (!crt || !crt->cgroup_path)
efdb0237
LP
2721 return 0;
2722
9cc54544 2723 if (crt->cgroup_control_inotify_wd >= 0)
efdb0237
LP
2724 return 0;
2725
2726 /* Only applies to the unified hierarchy */
c22800e4 2727 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2728 if (r < 0)
2729 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
2730 if (r == 0)
efdb0237
LP
2731 return 0;
2732
0bb814c2 2733 /* No point in watch the top-level slice, it's never going to run empty. */
efdb0237
LP
2734 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
2735 return 0;
2736
0bb814c2 2737 r = hashmap_ensure_allocated(&u->manager->cgroup_control_inotify_wd_unit, &trivial_hash_ops);
efdb0237
LP
2738 if (r < 0)
2739 return log_oom();
2740
9cc54544 2741 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, "cgroup.events", &events);
efdb0237
LP
2742 if (r < 0)
2743 return log_oom();
2744
9cc54544
LP
2745 crt->cgroup_control_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
2746 if (crt->cgroup_control_inotify_wd < 0) {
efdb0237 2747
0bb814c2
LP
2748 if (errno == ENOENT) /* If the directory is already gone we don't need to track it, so this
2749 * is not an error */
efdb0237
LP
2750 return 0;
2751
9cc54544 2752 return log_unit_error_errno(u, errno, "Failed to add control inotify watch descriptor for control group %s: %m", empty_to_root(crt->cgroup_path));
efdb0237
LP
2753 }
2754
9cc54544 2755 r = hashmap_put(u->manager->cgroup_control_inotify_wd_unit, INT_TO_PTR(crt->cgroup_control_inotify_wd), u);
efdb0237 2756 if (r < 0)
9cc54544 2757 return log_unit_error_errno(u, r, "Failed to add control inotify watch descriptor for control group %s to hash map: %m", empty_to_root(crt->cgroup_path));
efdb0237
LP
2758
2759 return 0;
2760}
2761
afcfaa69
LP
2762int unit_watch_cgroup_memory(Unit *u) {
2763 _cleanup_free_ char *events = NULL;
afcfaa69
LP
2764 int r;
2765
2766 assert(u);
2767
2768 /* Watches the "memory.events" attribute of this unit's cgroup for "oom_kill" events, but only if
2769 * cgroupv2 is available. */
2770
9cc54544
LP
2771 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2772 if (!crt || !crt->cgroup_path)
afcfaa69
LP
2773 return 0;
2774
9cc54544 2775 CGroupContext *c = unit_get_cgroup_context(u);
afcfaa69
LP
2776 if (!c)
2777 return 0;
2778
2779 /* The "memory.events" attribute is only available if the memory controller is on. Let's hence tie
2780 * this to memory accounting, in a way watching for OOM kills is a form of memory accounting after
2781 * all. */
2782 if (!c->memory_accounting)
2783 return 0;
2784
2785 /* Don't watch inner nodes, as the kernel doesn't report oom_kill events recursively currently, and
2786 * we also don't want to generate a log message for each parent cgroup of a process. */
2787 if (u->type == UNIT_SLICE)
2788 return 0;
2789
9cc54544 2790 if (crt->cgroup_memory_inotify_wd >= 0)
afcfaa69
LP
2791 return 0;
2792
2793 /* Only applies to the unified hierarchy */
2794 r = cg_all_unified();
2795 if (r < 0)
2796 return log_error_errno(r, "Failed to determine whether the memory controller is unified: %m");
2797 if (r == 0)
2798 return 0;
2799
2800 r = hashmap_ensure_allocated(&u->manager->cgroup_memory_inotify_wd_unit, &trivial_hash_ops);
2801 if (r < 0)
2802 return log_oom();
2803
9cc54544 2804 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, "memory.events", &events);
afcfaa69
LP
2805 if (r < 0)
2806 return log_oom();
2807
9cc54544
LP
2808 crt->cgroup_memory_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
2809 if (crt->cgroup_memory_inotify_wd < 0) {
afcfaa69
LP
2810
2811 if (errno == ENOENT) /* If the directory is already gone we don't need to track it, so this
2812 * is not an error */
2813 return 0;
2814
9cc54544 2815 return log_unit_error_errno(u, errno, "Failed to add memory inotify watch descriptor for control group %s: %m", empty_to_root(crt->cgroup_path));
afcfaa69
LP
2816 }
2817
9cc54544 2818 r = hashmap_put(u->manager->cgroup_memory_inotify_wd_unit, INT_TO_PTR(crt->cgroup_memory_inotify_wd), u);
afcfaa69 2819 if (r < 0)
9cc54544 2820 return log_unit_error_errno(u, r, "Failed to add memory inotify watch descriptor for control group %s to hash map: %m", empty_to_root(crt->cgroup_path));
afcfaa69
LP
2821
2822 return 0;
2823}
2824
a4634b21
LP
2825int unit_pick_cgroup_path(Unit *u) {
2826 _cleanup_free_ char *path = NULL;
2827 int r;
2828
2829 assert(u);
2830
a4634b21
LP
2831 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2832 return -EINVAL;
2833
9cc54544
LP
2834 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
2835 if (!crt)
2836 return -ENOMEM;
2837 if (crt->cgroup_path)
2838 return 0;
2839
1a56b0c0
LP
2840 r = unit_default_cgroup_path(u, &path);
2841 if (r < 0)
2842 return log_unit_error_errno(u, r, "Failed to generate default cgroup path: %m");
a4634b21
LP
2843
2844 r = unit_set_cgroup_path(u, path);
2845 if (r == -EEXIST)
6178e2f8 2846 return log_unit_error_errno(u, r, "Control group %s exists already.", empty_to_root(path));
a4634b21 2847 if (r < 0)
6178e2f8 2848 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", empty_to_root(path));
a4634b21
LP
2849
2850 return 0;
2851}
2852
7b639614 2853static int unit_update_cgroup(
efdb0237
LP
2854 Unit *u,
2855 CGroupMask target_mask,
0d2d6fbf
CD
2856 CGroupMask enable_mask,
2857 ManagerState state) {
efdb0237 2858
7b639614
MK
2859 bool created, is_root_slice;
2860 CGroupMask migrate_mask = 0;
184b4f78 2861 _cleanup_free_ char *cgroup_full_path = NULL;
27adcc97 2862 int r;
64747e2d 2863
4ad49000 2864 assert(u);
64747e2d 2865
27c4ed79 2866 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0cd385d3
LP
2867 return 0;
2868
7923e949
AV
2869 if (u->freezer_state != FREEZER_RUNNING)
2870 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EBUSY), "Cannot realize cgroup for frozen unit.");
2871
a4634b21
LP
2872 /* Figure out our cgroup path */
2873 r = unit_pick_cgroup_path(u);
2874 if (r < 0)
2875 return r;
b58b8e11 2876
9cc54544
LP
2877 CGroupRuntime *crt = ASSERT_PTR(unit_get_cgroup_runtime(u));
2878
03b90d4b 2879 /* First, create our own group */
9cc54544 2880 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, crt->cgroup_path);
23bbb0de 2881 if (r < 0)
9cc54544 2882 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", empty_to_root(crt->cgroup_path));
490c5a37 2883 created = r;
efdb0237 2884
184b4f78 2885 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
1b420223
LP
2886 uint64_t cgroup_id = 0;
2887
9cc54544 2888 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, NULL, &cgroup_full_path);
184b4f78
ILG
2889 if (r == 0) {
2890 r = cg_path_get_cgroupid(cgroup_full_path, &cgroup_id);
2891 if (r < 0)
1b420223
LP
2892 log_unit_full_errno(u, ERRNO_IS_NOT_SUPPORTED(r) ? LOG_DEBUG : LOG_WARNING, r,
2893 "Failed to get cgroup ID of cgroup %s, ignoring: %m", cgroup_full_path);
184b4f78 2894 } else
9cc54544 2895 log_unit_warning_errno(u, r, "Failed to get full cgroup path on cgroup %s, ignoring: %m", empty_to_root(crt->cgroup_path));
184b4f78 2896
9cc54544 2897 crt->cgroup_id = cgroup_id;
184b4f78
ILG
2898 }
2899
efdb0237
LP
2900 /* Start watching it */
2901 (void) unit_watch_cgroup(u);
afcfaa69 2902 (void) unit_watch_cgroup_memory(u);
efdb0237 2903
7b639614
MK
2904 /* For v2 we preserve enabled controllers in delegated units, adjust others,
2905 * for v1 we figure out which controller hierarchies need migration. */
9cc54544 2906 if (created || !crt->cgroup_realized || !unit_cgroup_delegate(u)) {
27adcc97 2907 CGroupMask result_mask = 0;
65be7e06
ZJS
2908
2909 /* Enable all controllers we need */
9cc54544 2910 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, crt->cgroup_path, &result_mask);
65be7e06 2911 if (r < 0)
9cc54544 2912 log_unit_warning_errno(u, r, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", empty_to_root(crt->cgroup_path));
27adcc97 2913
27adcc97 2914 /* Remember what's actually enabled now */
9cc54544 2915 crt->cgroup_enabled_mask = result_mask;
7b639614 2916
9cc54544 2917 migrate_mask = crt->cgroup_realized_mask ^ target_mask;
65be7e06 2918 }
03b90d4b
LP
2919
2920 /* Keep track that this is now realized */
9cc54544
LP
2921 crt->cgroup_realized = true;
2922 crt->cgroup_realized_mask = target_mask;
4ad49000 2923
7b639614
MK
2924 /* Migrate processes in controller hierarchies both downwards (enabling) and upwards (disabling).
2925 *
2926 * Unnecessary controller cgroups are trimmed (after emptied by upward migration).
2927 * We perform migration also with whole slices for cases when users don't care about leave
2928 * granularity. Since delegated_mask is subset of target mask, we won't trim slice subtree containing
2929 * delegated units.
2930 */
2931 if (cg_all_unified() == 0) {
9cc54544 2932 r = cg_migrate_v1_controllers(u->manager->cgroup_supported, migrate_mask, crt->cgroup_path, migrate_callback, u);
7b639614 2933 if (r < 0)
9cc54544 2934 log_unit_warning_errno(u, r, "Failed to migrate controller cgroups from %s, ignoring: %m", empty_to_root(crt->cgroup_path));
0cd385d3 2935
7b639614 2936 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
9cc54544 2937 r = cg_trim_v1_controllers(u->manager->cgroup_supported, ~target_mask, crt->cgroup_path, !is_root_slice);
0cd385d3 2938 if (r < 0)
9cc54544 2939 log_unit_warning_errno(u, r, "Failed to delete controller cgroups %s, ignoring: %m", empty_to_root(crt->cgroup_path));
0cd385d3 2940 }
03b90d4b 2941
0d2d6fbf
CD
2942 /* Set attributes */
2943 cgroup_context_apply(u, target_mask, state);
2944 cgroup_xattr_apply(u);
2945
29e6b0c1
LP
2946 /* For most units we expect that memory monitoring is set up before the unit is started and we won't
2947 * touch it after. For PID 1 this is different though, because we couldn't possibly do that given
2948 * that PID 1 runs before init.scope is even set up. Hence, whenever init.scope is realized, let's
2949 * try to open the memory pressure interface anew. */
2950 if (unit_has_name(u, SPECIAL_INIT_SCOPE))
2951 (void) manager_setup_memory_pressure_event_source(u->manager);
2952
64747e2d
LP
2953 return 0;
2954}
2955
6592b975
LP
2956static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
2957 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2958 char *pp;
7b3fd631 2959 int r;
6592b975 2960
7b3fd631
LP
2961 assert(u);
2962
6592b975
LP
2963 if (MANAGER_IS_SYSTEM(u->manager))
2964 return -EINVAL;
2965
2966 if (!u->manager->system_bus)
2967 return -EIO;
2968
9cc54544
LP
2969 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2970 if (!crt || !crt->cgroup_path)
2971 return -EOWNERDEAD;
6592b975
LP
2972
2973 /* Determine this unit's cgroup path relative to our cgroup root */
9cc54544 2974 pp = path_startswith(crt->cgroup_path, u->manager->cgroup_root);
6592b975
LP
2975 if (!pp)
2976 return -EINVAL;
2977
2978 pp = strjoina("/", pp, suffix_path);
4ff361cc 2979 path_simplify(pp);
6592b975 2980
78fa2f91 2981 r = bus_call_method(u->manager->system_bus,
2982 bus_systemd_mgr,
2983 "AttachProcessesToUnit",
2984 &error, NULL,
2985 "ssau",
2986 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
7b3fd631 2987 if (r < 0)
6592b975
LP
2988 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
2989
2990 return 0;
2991}
2992
2993int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
8e7e4a73 2994 _cleanup_free_ char *joined = NULL;
6592b975
LP
2995 CGroupMask delegated_mask;
2996 const char *p;
495e75ed 2997 PidRef *pid;
db4229d1 2998 int ret, r;
6592b975
LP
2999
3000 assert(u);
3001
3002 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3003 return -EINVAL;
3004
3005 if (set_isempty(pids))
3006 return 0;
7b3fd631 3007
fab34748
KL
3008 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
3009 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
3010 r = bpf_firewall_load_custom(u);
3011 if (r < 0)
3012 return r;
3013
6592b975 3014 r = unit_realize_cgroup(u);
7b3fd631
LP
3015 if (r < 0)
3016 return r;
3017
9cc54544
LP
3018 CGroupRuntime *crt = ASSERT_PTR(unit_get_cgroup_runtime(u));
3019
6592b975 3020 if (isempty(suffix_path))
9cc54544 3021 p = crt->cgroup_path;
8e7e4a73 3022 else {
9cc54544 3023 joined = path_join(crt->cgroup_path, suffix_path);
8e7e4a73
LP
3024 if (!joined)
3025 return -ENOMEM;
3026
3027 p = joined;
3028 }
6592b975
LP
3029
3030 delegated_mask = unit_get_delegate_mask(u);
3031
db4229d1 3032 ret = 0;
495e75ed
LP
3033 SET_FOREACH(pid, pids) {
3034
3035 /* Unfortunately we cannot add pids by pidfd to a cgroup. Hence we have to use PIDs instead,
3036 * which of course is racy. Let's shorten the race a bit though, and re-validate the PID
3037 * before we use it */
3038 r = pidref_verify(pid);
3039 if (r < 0) {
3040 log_unit_info_errno(u, r, "PID " PID_FMT " vanished before we could move it to target cgroup '%s', skipping: %m", pid->pid, empty_to_root(p));
3041 continue;
3042 }
6592b975
LP
3043
3044 /* First, attach the PID to the main cgroup hierarchy */
495e75ed 3045 r = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid->pid);
db4229d1
LP
3046 if (r < 0) {
3047 bool again = MANAGER_IS_USER(u->manager) && ERRNO_IS_PRIVILEGE(r);
6592b975 3048
db4229d1 3049 log_unit_full_errno(u, again ? LOG_DEBUG : LOG_INFO, r,
7a2ba407 3050 "Couldn't move process "PID_FMT" to%s requested cgroup '%s': %m",
495e75ed 3051 pid->pid, again ? " directly" : "", empty_to_root(p));
7a2ba407
ZJS
3052
3053 if (again) {
6592b975
LP
3054 int z;
3055
7a2ba407
ZJS
3056 /* If we are in a user instance, and we can't move the process ourselves due
3057 * to permission problems, let's ask the system instance about it instead.
3058 * Since it's more privileged it might be able to move the process across the
3059 * leaves of a subtree whose top node is not owned by us. */
6592b975 3060
495e75ed 3061 z = unit_attach_pid_to_cgroup_via_bus(u, pid->pid, suffix_path);
6592b975 3062 if (z < 0)
495e75ed 3063 log_unit_info_errno(u, z, "Couldn't move process "PID_FMT" to requested cgroup '%s' (directly or via the system bus): %m", pid->pid, empty_to_root(p));
c65417a0
JW
3064 else {
3065 if (ret >= 0)
3066 ret++; /* Count successful additions */
6592b975 3067 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
c65417a0 3068 }
6592b975
LP
3069 }
3070
db4229d1
LP
3071 if (ret >= 0)
3072 ret = r; /* Remember first error */
6592b975
LP
3073
3074 continue;
8d3e4ac7
LP
3075 } else if (ret >= 0)
3076 ret++; /* Count successful additions */
6592b975 3077
db4229d1
LP
3078 r = cg_all_unified();
3079 if (r < 0)
3080 return r;
3081 if (r > 0)
6592b975
LP
3082 continue;
3083
3084 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
3085 * innermost realized one */
3086
e8616626 3087 for (CGroupController c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
6592b975
LP
3088 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
3089 const char *realized;
3090
3091 if (!(u->manager->cgroup_supported & bit))
3092 continue;
3093
3094 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
9cc54544 3095 if (delegated_mask & crt->cgroup_realized_mask & bit) {
495e75ed 3096 r = cg_attach(cgroup_controller_to_string(c), p, pid->pid);
db4229d1 3097 if (r >= 0)
6592b975
LP
3098 continue; /* Success! */
3099
db4229d1 3100 log_unit_debug_errno(u, r, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
495e75ed 3101 pid->pid, empty_to_root(p), cgroup_controller_to_string(c));
6592b975
LP
3102 }
3103
3104 /* So this controller is either not delegate or realized, or something else weird happened. In
3105 * that case let's attach the PID at least to the closest cgroup up the tree that is
3106 * realized. */
3107 realized = unit_get_realized_cgroup_path(u, bit);
3108 if (!realized)
3109 continue; /* Not even realized in the root slice? Then let's not bother */
3110
495e75ed 3111 r = cg_attach(cgroup_controller_to_string(c), realized, pid->pid);
db4229d1
LP
3112 if (r < 0)
3113 log_unit_debug_errno(u, r, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
495e75ed 3114 pid->pid, realized, cgroup_controller_to_string(c));
6592b975
LP
3115 }
3116 }
3117
db4229d1 3118 return ret;
7b3fd631
LP
3119}
3120
906c06f6
DM
3121static bool unit_has_mask_realized(
3122 Unit *u,
3123 CGroupMask target_mask,
17f14955 3124 CGroupMask enable_mask) {
906c06f6 3125
bc432dc7
LP
3126 assert(u);
3127
9cc54544
LP
3128 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3129 if (!crt)
3130 return false;
3131
d5095dcd
LP
3132 /* Returns true if this unit is fully realized. We check four things:
3133 *
3134 * 1. Whether the cgroup was created at all
4e1dfa45
CD
3135 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
3136 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
d5095dcd
LP
3137 * 4. Whether the invalidation mask is currently zero
3138 *
3139 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
4e1dfa45
CD
3140 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
3141 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
3142 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
d5095dcd
LP
3143 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
3144 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
3145 * simply don't matter. */
3146
9cc54544
LP
3147 return crt->cgroup_realized &&
3148 ((crt->cgroup_realized_mask ^ target_mask) & CGROUP_MASK_V1) == 0 &&
3149 ((crt->cgroup_enabled_mask ^ enable_mask) & CGROUP_MASK_V2) == 0 &&
3150 crt->cgroup_invalidated_mask == 0;
6414b7c9
DS
3151}
3152
4f6f62e4
CD
3153static bool unit_has_mask_disables_realized(
3154 Unit *u,
3155 CGroupMask target_mask,
3156 CGroupMask enable_mask) {
3157
3158 assert(u);
3159
9cc54544
LP
3160 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3161 if (!crt)
3162 return true;
3163
4f6f62e4
CD
3164 /* Returns true if all controllers which should be disabled are indeed disabled.
3165 *
3166 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
3167 * already removed. */
3168
9cc54544
LP
3169 return !crt->cgroup_realized ||
3170 (FLAGS_SET(crt->cgroup_realized_mask, target_mask & CGROUP_MASK_V1) &&
3171 FLAGS_SET(crt->cgroup_enabled_mask, enable_mask & CGROUP_MASK_V2));
4f6f62e4
CD
3172}
3173
a57669d2
CD
3174static bool unit_has_mask_enables_realized(
3175 Unit *u,
3176 CGroupMask target_mask,
3177 CGroupMask enable_mask) {
3178
3179 assert(u);
3180
9cc54544
LP
3181 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3182 if (!crt)
3183 return false;
3184
a57669d2
CD
3185 /* Returns true if all controllers which should be enabled are indeed enabled.
3186 *
3187 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
3188 * we want to add is already added. */
3189
9cc54544
LP
3190 return crt->cgroup_realized &&
3191 ((crt->cgroup_realized_mask | target_mask) & CGROUP_MASK_V1) == (crt->cgroup_realized_mask & CGROUP_MASK_V1) &&
3192 ((crt->cgroup_enabled_mask | enable_mask) & CGROUP_MASK_V2) == (crt->cgroup_enabled_mask & CGROUP_MASK_V2);
a57669d2
CD
3193}
3194
020b2e41 3195void unit_add_to_cgroup_realize_queue(Unit *u) {
2aa57a65
LP
3196 assert(u);
3197
3198 if (u->in_cgroup_realize_queue)
3199 return;
3200
a479c21e 3201 LIST_APPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
2aa57a65
LP
3202 u->in_cgroup_realize_queue = true;
3203}
3204
3205static void unit_remove_from_cgroup_realize_queue(Unit *u) {
3206 assert(u);
3207
3208 if (!u->in_cgroup_realize_queue)
3209 return;
3210
3211 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
3212 u->in_cgroup_realize_queue = false;
3213}
3214
a57669d2
CD
3215/* Controllers can only be enabled breadth-first, from the root of the
3216 * hierarchy downwards to the unit in question. */
3217static int unit_realize_cgroup_now_enable(Unit *u, ManagerState state) {
3218 CGroupMask target_mask, enable_mask, new_target_mask, new_enable_mask;
12f64221 3219 Unit *slice;
a57669d2
CD
3220 int r;
3221
3222 assert(u);
3223
3224 /* First go deal with this unit's parent, or we won't be able to enable
3225 * any new controllers at this layer. */
12f64221
LP
3226 slice = UNIT_GET_SLICE(u);
3227 if (slice) {
3228 r = unit_realize_cgroup_now_enable(slice, state);
a57669d2
CD
3229 if (r < 0)
3230 return r;
3231 }
3232
3233 target_mask = unit_get_target_mask(u);
3234 enable_mask = unit_get_enable_mask(u);
3235
3236 /* We can only enable in this direction, don't try to disable anything.
3237 */
3238 if (unit_has_mask_enables_realized(u, target_mask, enable_mask))
3239 return 0;
3240
9cc54544
LP
3241 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3242
3243 new_target_mask = (crt ? crt->cgroup_realized_mask : 0) | target_mask;
3244 new_enable_mask = (crt ? crt->cgroup_enabled_mask : 0) | enable_mask;
a57669d2 3245
7b639614 3246 return unit_update_cgroup(u, new_target_mask, new_enable_mask, state);
a57669d2
CD
3247}
3248
4f6f62e4
CD
3249/* Controllers can only be disabled depth-first, from the leaves of the
3250 * hierarchy upwards to the unit in question. */
3251static int unit_realize_cgroup_now_disable(Unit *u, ManagerState state) {
4f6f62e4 3252 Unit *m;
4f6f62e4
CD
3253
3254 assert(u);
3255
3256 if (u->type != UNIT_SLICE)
3257 return 0;
3258
d219a2b0 3259 UNIT_FOREACH_DEPENDENCY(m, u, UNIT_ATOM_SLICE_OF) {
4f6f62e4
CD
3260 CGroupMask target_mask, enable_mask, new_target_mask, new_enable_mask;
3261 int r;
3262
9cc54544
LP
3263 CGroupRuntime *rt = unit_get_cgroup_runtime(m);
3264 if (!rt)
3265 continue;
3266
defe63b0
LP
3267 /* The cgroup for this unit might not actually be fully realised yet, in which case it isn't
3268 * holding any controllers open anyway. */
9cc54544 3269 if (!rt->cgroup_realized)
4f6f62e4
CD
3270 continue;
3271
defe63b0 3272 /* We must disable those below us first in order to release the controller. */
4f6f62e4
CD
3273 if (m->type == UNIT_SLICE)
3274 (void) unit_realize_cgroup_now_disable(m, state);
3275
3276 target_mask = unit_get_target_mask(m);
3277 enable_mask = unit_get_enable_mask(m);
3278
defe63b0 3279 /* We can only disable in this direction, don't try to enable anything. */
4f6f62e4
CD
3280 if (unit_has_mask_disables_realized(m, target_mask, enable_mask))
3281 continue;
3282
9cc54544
LP
3283 new_target_mask = rt->cgroup_realized_mask & target_mask;
3284 new_enable_mask = rt->cgroup_enabled_mask & enable_mask;
4f6f62e4 3285
7b639614 3286 r = unit_update_cgroup(m, new_target_mask, new_enable_mask, state);
4f6f62e4
CD
3287 if (r < 0)
3288 return r;
3289 }
3290
3291 return 0;
3292}
a57669d2 3293
6414b7c9
DS
3294/* Check if necessary controllers and attributes for a unit are in place.
3295 *
a57669d2
CD
3296 * - If so, do nothing.
3297 * - If not, create paths, move processes over, and set attributes.
3298 *
3299 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
3300 * a depth-first way. As such the process looks like this:
3301 *
3302 * Suppose we have a cgroup hierarchy which looks like this:
3303 *
3304 * root
3305 * / \
3306 * / \
3307 * / \
3308 * a b
3309 * / \ / \
3310 * / \ / \
3311 * c d e f
3312 * / \ / \ / \ / \
3313 * h i j k l m n o
3314 *
3315 * 1. We want to realise cgroup "d" now.
c72703e2 3316 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
a57669d2
CD
3317 * 3. cgroup "k" just started requesting the memory controller.
3318 *
3319 * To make this work we must do the following in order:
3320 *
3321 * 1. Disable CPU controller in k, j
3322 * 2. Disable CPU controller in d
3323 * 3. Enable memory controller in root
3324 * 4. Enable memory controller in a
3325 * 5. Enable memory controller in d
3326 * 6. Enable memory controller in k
3327 *
3328 * Notice that we need to touch j in one direction, but not the other. We also
3329 * don't go beyond d when disabling -- it's up to "a" to get realized if it
3330 * wants to disable further. The basic rules are therefore:
3331 *
3332 * - If you're disabling something, you need to realise all of the cgroups from
3333 * your recursive descendants to the root. This starts from the leaves.
3334 * - If you're enabling something, you need to realise from the root cgroup
3335 * downwards, but you don't need to iterate your recursive descendants.
6414b7c9
DS
3336 *
3337 * Returns 0 on success and < 0 on failure. */
db785129 3338static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 3339 CGroupMask target_mask, enable_mask;
12f64221 3340 Unit *slice;
6414b7c9 3341 int r;
64747e2d 3342
4ad49000 3343 assert(u);
64747e2d 3344
2aa57a65 3345 unit_remove_from_cgroup_realize_queue(u);
64747e2d 3346
efdb0237 3347 target_mask = unit_get_target_mask(u);
ccf78df1
TH
3348 enable_mask = unit_get_enable_mask(u);
3349
17f14955 3350 if (unit_has_mask_realized(u, target_mask, enable_mask))
0a1eb06d 3351 return 0;
64747e2d 3352
4f6f62e4
CD
3353 /* Disable controllers below us, if there are any */
3354 r = unit_realize_cgroup_now_disable(u, state);
3355 if (r < 0)
3356 return r;
3357
3358 /* Enable controllers above us, if there are any */
12f64221
LP
3359 slice = UNIT_GET_SLICE(u);
3360 if (slice) {
3361 r = unit_realize_cgroup_now_enable(slice, state);
6414b7c9
DS
3362 if (r < 0)
3363 return r;
3364 }
4ad49000 3365
0d2d6fbf 3366 /* Now actually deal with the cgroup we were trying to realise and set attributes */
7b639614 3367 r = unit_update_cgroup(u, target_mask, enable_mask, state);
6414b7c9
DS
3368 if (r < 0)
3369 return r;
3370
9cc54544
LP
3371 CGroupRuntime *crt = ASSERT_PTR(unit_get_cgroup_runtime(u));
3372
c2baf11c 3373 /* Now, reset the invalidation mask */
9cc54544 3374 crt->cgroup_invalidated_mask = 0;
6414b7c9 3375 return 0;
64747e2d
LP
3376}
3377
91a6073e 3378unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
db785129 3379 ManagerState state;
4ad49000 3380 unsigned n = 0;
db785129 3381 Unit *i;
6414b7c9 3382 int r;
ecedd90f 3383
91a6073e
LP
3384 assert(m);
3385
db785129
LP
3386 state = manager_state(m);
3387
91a6073e
LP
3388 while ((i = m->cgroup_realize_queue)) {
3389 assert(i->in_cgroup_realize_queue);
ecedd90f 3390
2aa57a65
LP
3391 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
3392 /* Maybe things changed, and the unit is not actually active anymore? */
3393 unit_remove_from_cgroup_realize_queue(i);
3394 continue;
3395 }
3396
db785129 3397 r = unit_realize_cgroup_now(i, state);
6414b7c9 3398 if (r < 0)
efdb0237 3399 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 3400
4ad49000
LP
3401 n++;
3402 }
ecedd90f 3403
4ad49000 3404 return n;
8e274523
LP
3405}
3406
4c591f39
MK
3407void unit_add_family_to_cgroup_realize_queue(Unit *u) {
3408 assert(u);
3409 assert(u->type == UNIT_SLICE);
ca949c9d 3410
4c591f39
MK
3411 /* Family of a unit for is defined as (immediate) children of the unit and immediate children of all
3412 * its ancestors.
3413 *
3414 * Ideally we would enqueue ancestor path only (bottom up). However, on cgroup-v1 scheduling becomes
3415 * very weird if two units that own processes reside in the same slice, but one is realized in the
3416 * "cpu" hierarchy and one is not (for example because one has CPUWeight= set and the other does
3417 * not), because that means individual processes need to be scheduled against whole cgroups. Let's
3418 * avoid this asymmetry by always ensuring that siblings of a unit are always realized in their v1
3419 * controller hierarchies too (if unit requires the controller to be realized).
e1e98911 3420 *
4c591f39
MK
3421 * The function must invalidate cgroup_members_mask of all ancestors in order to calculate up to date
3422 * masks. */
3423
3424 do {
9cc54544 3425 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
8f53a7b8 3426
4c591f39 3427 /* Children of u likely changed when we're called */
9cc54544
LP
3428 if (crt)
3429 crt->cgroup_members_mask_valid = false;
f23ba94d 3430
9cc54544 3431 Unit *m;
d219a2b0 3432 UNIT_FOREACH_DEPENDENCY(m, u, UNIT_ATOM_SLICE_OF) {
8e274523 3433
65f6b6bd 3434 /* No point in doing cgroup application for units without active processes. */
6414b7c9
DS
3435 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
3436 continue;
3437
e1e98911
LP
3438 /* We only enqueue siblings if they were realized once at least, in the main
3439 * hierarchy. */
9cc54544
LP
3440 crt = unit_get_cgroup_runtime(m);
3441 if (!crt || !crt->cgroup_realized)
e1e98911
LP
3442 continue;
3443
defe63b0
LP
3444 /* If the unit doesn't need any new controllers and has current ones
3445 * realized, it doesn't need any changes. */
906c06f6
DM
3446 if (unit_has_mask_realized(m,
3447 unit_get_target_mask(m),
17f14955 3448 unit_get_enable_mask(m)))
6414b7c9
DS
3449 continue;
3450
91a6073e 3451 unit_add_to_cgroup_realize_queue(m);
50159e6a
LP
3452 }
3453
4c591f39
MK
3454 /* Parent comes after children */
3455 unit_add_to_cgroup_realize_queue(u);
12f64221
LP
3456
3457 u = UNIT_GET_SLICE(u);
3458 } while (u);
4ad49000
LP
3459}
3460
0a1eb06d 3461int unit_realize_cgroup(Unit *u) {
12f64221
LP
3462 Unit *slice;
3463
4ad49000
LP
3464 assert(u);
3465
35b7ff80 3466 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 3467 return 0;
8e274523 3468
4c591f39
MK
3469 /* So, here's the deal: when realizing the cgroups for this unit, we need to first create all
3470 * parents, but there's more actually: for the weight-based controllers we also need to make sure
3471 * that all our siblings (i.e. units that are in the same slice as we are) have cgroups, too. On the
3472 * other hand, when a controller is removed from realized set, it may become unnecessary in siblings
3473 * and ancestors and they should be (de)realized too.
3474 *
3475 * This call will defer work on the siblings and derealized ancestors to the next event loop
3476 * iteration and synchronously creates the parent cgroups (unit_realize_cgroup_now). */
ca949c9d 3477
12f64221
LP
3478 slice = UNIT_GET_SLICE(u);
3479 if (slice)
3480 unit_add_family_to_cgroup_realize_queue(slice);
4ad49000 3481
6414b7c9 3482 /* And realize this one now (and apply the values) */
db785129 3483 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
3484}
3485
efdb0237
LP
3486void unit_release_cgroup(Unit *u) {
3487 assert(u);
3488
8a0d5388
LP
3489 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
3490 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
efdb0237 3491
9cc54544
LP
3492 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3493 if (!crt)
3494 return;
3495
3496 if (crt->cgroup_path) {
3497 (void) hashmap_remove(u->manager->cgroup_unit, crt->cgroup_path);
3498 crt->cgroup_path = mfree(crt->cgroup_path);
efdb0237
LP
3499 }
3500
9cc54544
LP
3501 if (crt->cgroup_control_inotify_wd >= 0) {
3502 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, crt->cgroup_control_inotify_wd) < 0)
3503 log_unit_debug_errno(u, errno, "Failed to remove cgroup control inotify watch %i for %s, ignoring: %m", crt->cgroup_control_inotify_wd, u->id);
efdb0237 3504
9cc54544
LP
3505 (void) hashmap_remove(u->manager->cgroup_control_inotify_wd_unit, INT_TO_PTR(crt->cgroup_control_inotify_wd));
3506 crt->cgroup_control_inotify_wd = -1;
efdb0237 3507 }
afcfaa69 3508
9cc54544
LP
3509 if (crt->cgroup_memory_inotify_wd >= 0) {
3510 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, crt->cgroup_memory_inotify_wd) < 0)
3511 log_unit_debug_errno(u, errno, "Failed to remove cgroup memory inotify watch %i for %s, ignoring: %m", crt->cgroup_memory_inotify_wd, u->id);
afcfaa69 3512
9cc54544
LP
3513 (void) hashmap_remove(u->manager->cgroup_memory_inotify_wd_unit, INT_TO_PTR(crt->cgroup_memory_inotify_wd));
3514 crt->cgroup_memory_inotify_wd = -1;
afcfaa69 3515 }
9cc54544
LP
3516
3517 *(CGroupRuntime**) ((uint8_t*) u + UNIT_VTABLE(u)->cgroup_runtime_offset) = cgroup_runtime_free(crt);
3518}
3519
3520int unit_cgroup_is_empty(Unit *u) {
3521 int r;
3522
3523 assert(u);
3524
3525 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3526 if (!crt)
3527 return -ENXIO;
3528 if (!crt->cgroup_path)
3529 return -EOWNERDEAD;
3530
3531 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path);
3532 if (r < 0)
3533 return log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty, ignoring: %m", empty_to_root(crt->cgroup_path));
3534
3535 return r;
efdb0237
LP
3536}
3537
e08dabfe
AZ
3538bool unit_maybe_release_cgroup(Unit *u) {
3539 int r;
3540
3541 assert(u);
3542
9cc54544
LP
3543 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3544 if (!crt || !crt->cgroup_path)
e08dabfe
AZ
3545 return true;
3546
9cc54544
LP
3547 /* Don't release the cgroup if there are still processes under it. If we get notified later when all
3548 * the processes exit (e.g. the processes were in D-state and exited after the unit was marked as
3549 * failed) we need the cgroup paths to continue to be tracked by the manager so they can be looked up
3550 * and cleaned up later. */
3551 r = unit_cgroup_is_empty(u);
3552 if (r == 1) {
e08dabfe
AZ
3553 unit_release_cgroup(u);
3554 return true;
3555 }
3556
3557 return false;
3558}
3559
efdb0237 3560void unit_prune_cgroup(Unit *u) {
8e274523 3561 int r;
efdb0237 3562 bool is_root_slice;
8e274523 3563
4ad49000 3564 assert(u);
8e274523 3565
efdb0237 3566 /* Removes the cgroup, if empty and possible, and stops watching it. */
9cc54544
LP
3567 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3568 if (!crt || !crt->cgroup_path)
4ad49000 3569 return;
8e274523 3570
ad009380
MY
3571 /* Cache the last CPU and memory usage values before we destroy the cgroup */
3572 (void) unit_get_cpu_usage(u, /* ret = */ NULL);
3573
3574 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++)
3575 (void) unit_get_memory_accounting(u, metric, /* ret = */ NULL);
fe700f46 3576
b1994387 3577#if BPF_FRAMEWORK
352ec23c 3578 (void) bpf_restrict_fs_cleanup(u); /* Remove cgroup from the global LSM BPF map */
b1994387
ILG
3579#endif
3580
49b6babb 3581 unit_modify_nft_set(u, /* add = */ false);
dc7d69b3 3582
efdb0237
LP
3583 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
3584
9cc54544 3585 r = cg_trim_everywhere(u->manager->cgroup_supported, crt->cgroup_path, !is_root_slice);
0219b352
DB
3586 if (r < 0)
3587 /* One reason we could have failed here is, that the cgroup still contains a process.
3588 * However, if the cgroup becomes removable at a later time, it might be removed when
3589 * the containing slice is stopped. So even if we failed now, this unit shouldn't assume
3590 * that the cgroup is still realized the next time it is started. Do not return early
3591 * on error, continue cleanup. */
9cc54544 3592 log_unit_full_errno(u, r == -EBUSY ? LOG_DEBUG : LOG_WARNING, r, "Failed to destroy cgroup %s, ignoring: %m", empty_to_root(crt->cgroup_path));
8e274523 3593
efdb0237
LP
3594 if (is_root_slice)
3595 return;
3596
e08dabfe
AZ
3597 if (!unit_maybe_release_cgroup(u)) /* Returns true if the cgroup was released */
3598 return;
0a1eb06d 3599
9cc54544
LP
3600 crt = unit_get_cgroup_runtime(u); /* The above might have destroyed the runtime object, let's see if it's still there */
3601 if (!crt)
3602 return;
3603
3604 crt->cgroup_realized = false;
3605 crt->cgroup_realized_mask = 0;
3606 crt->cgroup_enabled_mask = 0;
084c7007 3607
9cc54544 3608 crt->bpf_device_control_installed = bpf_program_free(crt->bpf_device_control_installed);
8e274523
LP
3609}
3610
495e75ed
LP
3611int unit_search_main_pid(Unit *u, PidRef *ret) {
3612 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
4ad49000 3613 _cleanup_fclose_ FILE *f = NULL;
efdb0237 3614 int r;
4ad49000
LP
3615
3616 assert(u);
efdb0237 3617 assert(ret);
4ad49000 3618
9cc54544
LP
3619 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3620 if (!crt || !crt->cgroup_path)
efdb0237 3621 return -ENXIO;
4ad49000 3622
9cc54544 3623 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, &f);
efdb0237
LP
3624 if (r < 0)
3625 return r;
4ad49000 3626
495e75ed
LP
3627 for (;;) {
3628 _cleanup_(pidref_done) PidRef npidref = PIDREF_NULL;
4ad49000 3629
495e75ed
LP
3630 r = cg_read_pidref(f, &npidref);
3631 if (r < 0)
3632 return r;
3633 if (r == 0)
3634 break;
8e274523 3635
495e75ed 3636 if (pidref_equal(&pidref, &npidref)) /* seen already, cgroupfs reports duplicates! */
4ad49000 3637 continue;
8e274523 3638
6774be42 3639 if (pidref_is_my_child(&npidref) <= 0) /* ignore processes further down the tree */
495e75ed 3640 continue;
efdb0237 3641
495e75ed
LP
3642 if (pidref_is_set(&pidref) != 0)
3643 /* Dang, there's more than one daemonized PID in this group, so we don't know what
3644 * process is the main process. */
efdb0237 3645 return -ENODATA;
8e274523 3646
495e75ed 3647 pidref = TAKE_PIDREF(npidref);
8e274523
LP
3648 }
3649
495e75ed
LP
3650 if (!pidref_is_set(&pidref))
3651 return -ENODATA;
3652
3653 *ret = TAKE_PIDREF(pidref);
efdb0237
LP
3654 return 0;
3655}
3656
3657static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 3658 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
3659 _cleanup_fclose_ FILE *f = NULL;
3660 int ret = 0, r;
3661
3662 assert(u);
3663 assert(path);
3664
3665 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
3666 if (r < 0)
495e75ed 3667 RET_GATHER(ret, r);
efdb0237 3668 else {
495e75ed
LP
3669 for (;;) {
3670 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
3671
3672 r = cg_read_pidref(f, &pid);
3673 if (r == 0)
3674 break;
3675 if (r < 0) {
3676 RET_GATHER(ret, r);
3677 break;
3678 }
efdb0237 3679
495e75ed 3680 RET_GATHER(ret, unit_watch_pidref(u, &pid, /* exclusive= */ false));
efdb0237 3681 }
efdb0237
LP
3682 }
3683
3684 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
495e75ed
LP
3685 if (r < 0)
3686 RET_GATHER(ret, r);
3687 else {
3688 for (;;) {
3689 _cleanup_free_ char *fn = NULL, *p = NULL;
3690
3691 r = cg_read_subgroup(d, &fn);
3692 if (r == 0)
3693 break;
3694 if (r < 0) {
3695 RET_GATHER(ret, r);
3696 break;
3697 }
efdb0237 3698
95b21cff 3699 p = path_join(empty_to_root(path), fn);
efdb0237
LP
3700 if (!p)
3701 return -ENOMEM;
3702
495e75ed 3703 RET_GATHER(ret, unit_watch_pids_in_path(u, p));
efdb0237 3704 }
efdb0237
LP
3705 }
3706
3707 return ret;
3708}
3709
11aef522
LP
3710int unit_synthesize_cgroup_empty_event(Unit *u) {
3711 int r;
3712
3713 assert(u);
3714
3715 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
3716 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
3717 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
3718
9cc54544
LP
3719 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3720 if (!crt || !crt->cgroup_path)
11aef522
LP
3721 return -ENOENT;
3722
3723 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3724 if (r < 0)
3725 return r;
3726 if (r > 0) /* On unified we have reliable notifications, and don't need this */
3727 return 0;
3728
3729 if (!set_isempty(u->pids))
3730 return 0;
3731
3732 unit_add_to_cgroup_empty_queue(u);
3733 return 0;
3734}
3735
efdb0237 3736int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
3737 int r;
3738
efdb0237
LP
3739 assert(u);
3740
3741 /* Adds all PIDs from our cgroup to the set of PIDs we
3742 * watch. This is a fallback logic for cases where we do not
3743 * get reliable cgroup empty notifications: we try to use
3744 * SIGCHLD as replacement. */
3745
9cc54544
LP
3746 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3747 if (!crt || !crt->cgroup_path)
efdb0237
LP
3748 return -ENOENT;
3749
c22800e4 3750 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
3751 if (r < 0)
3752 return r;
3753 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
3754 return 0;
3755
9cc54544 3756 return unit_watch_pids_in_path(u, crt->cgroup_path);
efdb0237
LP
3757}
3758
09e24654 3759static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
99534007 3760 Manager *m = ASSERT_PTR(userdata);
09e24654 3761 Unit *u;
efdb0237
LP
3762 int r;
3763
09e24654 3764 assert(s);
efdb0237 3765
09e24654
LP
3766 u = m->cgroup_empty_queue;
3767 if (!u)
efdb0237
LP
3768 return 0;
3769
09e24654
LP
3770 assert(u->in_cgroup_empty_queue);
3771 u->in_cgroup_empty_queue = false;
3772 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
3773
3774 if (m->cgroup_empty_queue) {
3775 /* More stuff queued, let's make sure we remain enabled */
3776 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
3777 if (r < 0)
19a691a9 3778 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
09e24654 3779 }
efdb0237 3780
f7829525
NK
3781 /* Update state based on OOM kills before we notify about cgroup empty event */
3782 (void) unit_check_oom(u);
3783 (void) unit_check_oomd_kill(u);
3784
efdb0237
LP
3785 unit_add_to_gc_queue(u);
3786
380dd177
RP
3787 if (IN_SET(unit_active_state(u), UNIT_INACTIVE, UNIT_FAILED))
3788 unit_prune_cgroup(u);
3789 else if (UNIT_VTABLE(u)->notify_cgroup_empty)
efdb0237
LP
3790 UNIT_VTABLE(u)->notify_cgroup_empty(u);
3791
3792 return 0;
3793}
3794
09e24654
LP
3795void unit_add_to_cgroup_empty_queue(Unit *u) {
3796 int r;
3797
3798 assert(u);
3799
3800 /* Note that there are four different ways how cgroup empty events reach us:
3801 *
3802 * 1. On the unified hierarchy we get an inotify event on the cgroup
3803 *
3804 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
3805 *
3806 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
3807 *
3808 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
3809 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
3810 *
3811 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
3812 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
3813 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
3814 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
3815 * case for scope units). */
3816
3817 if (u->in_cgroup_empty_queue)
3818 return;
3819
3820 /* Let's verify that the cgroup is really empty */
9cc54544
LP
3821 r = unit_cgroup_is_empty(u);
3822 if (r <= 0)
09e24654
LP
3823 return;
3824
3825 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
3826 u->in_cgroup_empty_queue = true;
3827
3828 /* Trigger the defer event */
3829 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
3830 if (r < 0)
3831 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
3832}
3833
d9e45bc3
MS
3834static void unit_remove_from_cgroup_empty_queue(Unit *u) {
3835 assert(u);
3836
3837 if (!u->in_cgroup_empty_queue)
3838 return;
3839
3840 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
3841 u->in_cgroup_empty_queue = false;
3842}
3843
fe8d22fb
AZ
3844int unit_check_oomd_kill(Unit *u) {
3845 _cleanup_free_ char *value = NULL;
3846 bool increased;
3847 uint64_t n = 0;
3848 int r;
3849
9cc54544
LP
3850 assert(u);
3851
3852 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3853 if (!crt || !crt->cgroup_path)
fe8d22fb
AZ
3854 return 0;
3855
3856 r = cg_all_unified();
3857 if (r < 0)
3858 return log_unit_debug_errno(u, r, "Couldn't determine whether we are in all unified mode: %m");
3859 else if (r == 0)
3860 return 0;
3861
9cc54544 3862 r = cg_get_xattr_malloc(crt->cgroup_path, "user.oomd_ooms", &value);
00675c36 3863 if (r < 0 && !ERRNO_IS_XATTR_ABSENT(r))
fe8d22fb
AZ
3864 return r;
3865
3866 if (!isempty(value)) {
3867 r = safe_atou64(value, &n);
3868 if (r < 0)
3869 return r;
3870 }
3871
9cc54544
LP
3872 increased = n > crt->managed_oom_kill_last;
3873 crt->managed_oom_kill_last = n;
fe8d22fb
AZ
3874
3875 if (!increased)
3876 return 0;
3877
38c41427
NK
3878 n = 0;
3879 value = mfree(value);
9cc54544 3880 r = cg_get_xattr_malloc(crt->cgroup_path, "user.oomd_kill", &value);
38c41427
NK
3881 if (r >= 0 && !isempty(value))
3882 (void) safe_atou64(value, &n);
3883
fe8d22fb 3884 if (n > 0)
c2503e35
RH
3885 log_unit_struct(u, LOG_NOTICE,
3886 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR,
3887 LOG_UNIT_INVOCATION_ID(u),
38c41427
NK
3888 LOG_UNIT_MESSAGE(u, "systemd-oomd killed %"PRIu64" process(es) in this unit.", n),
3889 "N_PROCESSES=%" PRIu64, n);
3890 else
3891 log_unit_struct(u, LOG_NOTICE,
3892 "MESSAGE_ID=" SD_MESSAGE_UNIT_OOMD_KILL_STR,
3893 LOG_UNIT_INVOCATION_ID(u),
3894 LOG_UNIT_MESSAGE(u, "systemd-oomd killed some process(es) in this unit."));
3895
3896 unit_notify_cgroup_oom(u, /* ManagedOOM= */ true);
fe8d22fb
AZ
3897
3898 return 1;
3899}
3900
2ba6ae6b 3901int unit_check_oom(Unit *u) {
afcfaa69
LP
3902 _cleanup_free_ char *oom_kill = NULL;
3903 bool increased;
3904 uint64_t c;
3905 int r;
3906
9cc54544
LP
3907 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3908 if (!crt || !crt->cgroup_path)
afcfaa69
LP
3909 return 0;
3910
9cc54544
LP
3911 r = cg_get_keyed_attribute(
3912 "memory",
3913 crt->cgroup_path,
3914 "memory.events",
3915 STRV_MAKE("oom_kill"),
3916 &oom_kill);
fc594dee
LP
3917 if (IN_SET(r, -ENOENT, -ENXIO)) /* Handle gracefully if cgroup or oom_kill attribute don't exist */
3918 c = 0;
3919 else if (r < 0)
afcfaa69 3920 return log_unit_debug_errno(u, r, "Failed to read oom_kill field of memory.events cgroup attribute: %m");
fc594dee
LP
3921 else {
3922 r = safe_atou64(oom_kill, &c);
3923 if (r < 0)
3924 return log_unit_debug_errno(u, r, "Failed to parse oom_kill field: %m");
3925 }
afcfaa69 3926
9cc54544
LP
3927 increased = c > crt->oom_kill_last;
3928 crt->oom_kill_last = c;
afcfaa69
LP
3929
3930 if (!increased)
3931 return 0;
3932
c2503e35
RH
3933 log_unit_struct(u, LOG_NOTICE,
3934 "MESSAGE_ID=" SD_MESSAGE_UNIT_OUT_OF_MEMORY_STR,
3935 LOG_UNIT_INVOCATION_ID(u),
3936 LOG_UNIT_MESSAGE(u, "A process of this unit has been killed by the OOM killer."));
afcfaa69 3937
38c41427 3938 unit_notify_cgroup_oom(u, /* ManagedOOM= */ false);
afcfaa69
LP
3939
3940 return 1;
3941}
3942
3943static int on_cgroup_oom_event(sd_event_source *s, void *userdata) {
99534007 3944 Manager *m = ASSERT_PTR(userdata);
afcfaa69
LP
3945 Unit *u;
3946 int r;
3947
3948 assert(s);
afcfaa69
LP
3949
3950 u = m->cgroup_oom_queue;
3951 if (!u)
3952 return 0;
3953
3954 assert(u->in_cgroup_oom_queue);
3955 u->in_cgroup_oom_queue = false;
3956 LIST_REMOVE(cgroup_oom_queue, m->cgroup_oom_queue, u);
3957
3958 if (m->cgroup_oom_queue) {
3959 /* More stuff queued, let's make sure we remain enabled */
3960 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
3961 if (r < 0)
3962 log_debug_errno(r, "Failed to reenable cgroup oom event source, ignoring: %m");
3963 }
3964
3965 (void) unit_check_oom(u);
935f8042
LP
3966 unit_add_to_gc_queue(u);
3967
afcfaa69
LP
3968 return 0;
3969}
3970
3971static void unit_add_to_cgroup_oom_queue(Unit *u) {
3972 int r;
3973
3974 assert(u);
3975
3976 if (u->in_cgroup_oom_queue)
3977 return;
9cc54544
LP
3978
3979 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3980 if (!crt || !crt->cgroup_path)
afcfaa69
LP
3981 return;
3982
3983 LIST_PREPEND(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
3984 u->in_cgroup_oom_queue = true;
3985
3986 /* Trigger the defer event */
3987 if (!u->manager->cgroup_oom_event_source) {
3988 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
3989
3990 r = sd_event_add_defer(u->manager->event, &s, on_cgroup_oom_event, u->manager);
3991 if (r < 0) {
3992 log_error_errno(r, "Failed to create cgroup oom event source: %m");
3993 return;
3994 }
3995
d42b61d2 3996 r = sd_event_source_set_priority(s, EVENT_PRIORITY_CGROUP_OOM);
afcfaa69
LP
3997 if (r < 0) {
3998 log_error_errno(r, "Failed to set priority of cgroup oom event source: %m");
3999 return;
4000 }
4001
4002 (void) sd_event_source_set_description(s, "cgroup-oom");
4003 u->manager->cgroup_oom_event_source = TAKE_PTR(s);
4004 }
4005
4006 r = sd_event_source_set_enabled(u->manager->cgroup_oom_event_source, SD_EVENT_ONESHOT);
4007 if (r < 0)
4008 log_error_errno(r, "Failed to enable cgroup oom event source: %m");
4009}
4010
d9e45bc3
MS
4011static int unit_check_cgroup_events(Unit *u) {
4012 char *values[2] = {};
4013 int r;
4014
4015 assert(u);
4016
9cc54544
LP
4017 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4018 if (!crt || !crt->cgroup_path)
869f52f2
DS
4019 return 0;
4020
9cc54544
LP
4021 r = cg_get_keyed_attribute_graceful(
4022 SYSTEMD_CGROUP_CONTROLLER,
4023 crt->cgroup_path,
4024 "cgroup.events",
4025 STRV_MAKE("populated", "frozen"),
4026 values);
d9e45bc3
MS
4027 if (r < 0)
4028 return r;
4029
4030 /* The cgroup.events notifications can be merged together so act as we saw the given state for the
4031 * first time. The functions we call to handle given state are idempotent, which makes them
4032 * effectively remember the previous state. */
4033 if (values[0]) {
4034 if (streq(values[0], "1"))
4035 unit_remove_from_cgroup_empty_queue(u);
4036 else
4037 unit_add_to_cgroup_empty_queue(u);
4038 }
4039
16b6af6a
AV
4040 /* Disregard freezer state changes due to operations not initiated by us.
4041 * See: https://github.com/systemd/systemd/pull/13512/files#r416469963 and
4042 * https://github.com/systemd/systemd/pull/13512#issuecomment-573007207 */
4043 if (values[1] && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT, FREEZER_THAWING)) {
d9e45bc3
MS
4044 if (streq(values[1], "0"))
4045 unit_thawed(u);
4046 else
4047 unit_frozen(u);
4048 }
4049
4050 free(values[0]);
4051 free(values[1]);
4052
4053 return 0;
4054}
4055
efdb0237 4056static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
99534007 4057 Manager *m = ASSERT_PTR(userdata);
efdb0237
LP
4058
4059 assert(s);
4060 assert(fd >= 0);
efdb0237
LP
4061
4062 for (;;) {
4063 union inotify_event_buffer buffer;
efdb0237
LP
4064 ssize_t l;
4065
4066 l = read(fd, &buffer, sizeof(buffer));
4067 if (l < 0) {
8add30a0 4068 if (ERRNO_IS_TRANSIENT(errno))
efdb0237
LP
4069 return 0;
4070
4071 return log_error_errno(errno, "Failed to read control group inotify events: %m");
4072 }
4073
00adc340 4074 FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
efdb0237
LP
4075 Unit *u;
4076
4077 if (e->wd < 0)
4078 /* Queue overflow has no watch descriptor */
4079 continue;
4080
4081 if (e->mask & IN_IGNORED)
4082 /* The watch was just removed */
4083 continue;
4084
afcfaa69
LP
4085 /* Note that inotify might deliver events for a watch even after it was removed,
4086 * because it was queued before the removal. Let's ignore this here safely. */
4087
0bb814c2 4088 u = hashmap_get(m->cgroup_control_inotify_wd_unit, INT_TO_PTR(e->wd));
afcfaa69 4089 if (u)
d9e45bc3 4090 unit_check_cgroup_events(u);
efdb0237 4091
afcfaa69
LP
4092 u = hashmap_get(m->cgroup_memory_inotify_wd_unit, INT_TO_PTR(e->wd));
4093 if (u)
4094 unit_add_to_cgroup_oom_queue(u);
efdb0237
LP
4095 }
4096 }
8e274523
LP
4097}
4098
17f14955
RG
4099static int cg_bpf_mask_supported(CGroupMask *ret) {
4100 CGroupMask mask = 0;
4101 int r;
4102
4103 /* BPF-based firewall */
4104 r = bpf_firewall_supported();
ad13559e
YW
4105 if (r < 0)
4106 return r;
17f14955
RG
4107 if (r > 0)
4108 mask |= CGROUP_MASK_BPF_FIREWALL;
4109
084c7007
RG
4110 /* BPF-based device access control */
4111 r = bpf_devices_supported();
ad13559e
YW
4112 if (r < 0)
4113 return r;
084c7007
RG
4114 if (r > 0)
4115 mask |= CGROUP_MASK_BPF_DEVICES;
4116
506ea51b
JK
4117 /* BPF pinned prog */
4118 r = bpf_foreign_supported();
ad13559e
YW
4119 if (r < 0)
4120 return r;
506ea51b
JK
4121 if (r > 0)
4122 mask |= CGROUP_MASK_BPF_FOREIGN;
4123
a8e5eb17 4124 /* BPF-based bind{4|6} hooks */
cd09a5f3 4125 r = bpf_socket_bind_supported();
ad13559e
YW
4126 if (r < 0)
4127 return r;
a8e5eb17
JK
4128 if (r > 0)
4129 mask |= CGROUP_MASK_BPF_SOCKET_BIND;
4130
6f50d4f7 4131 /* BPF-based cgroup_skb/{egress|ingress} hooks */
62e22490 4132 r = bpf_restrict_ifaces_supported();
ad13559e
YW
4133 if (r < 0)
4134 return r;
6f50d4f7
MV
4135 if (r > 0)
4136 mask |= CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES;
4137
17f14955
RG
4138 *ret = mask;
4139 return 0;
4140}
4141
8e274523 4142int manager_setup_cgroup(Manager *m) {
9444b1f2 4143 _cleanup_free_ char *path = NULL;
10bd3e2e 4144 const char *scope_path;
b4cccbc1 4145 int r, all_unified;
17f14955 4146 CGroupMask mask;
efdb0237 4147 char *e;
8e274523
LP
4148
4149 assert(m);
4150
35d2e7ec 4151 /* 1. Determine hierarchy */
efdb0237 4152 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 4153 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
4154 if (r < 0)
4155 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 4156
efdb0237
LP
4157 /* Chop off the init scope, if we are already located in it */
4158 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 4159
efdb0237
LP
4160 /* LEGACY: Also chop off the system slice if we are in
4161 * it. This is to support live upgrades from older systemd
4162 * versions where PID 1 was moved there. Also see
4163 * cg_get_root_path(). */
463d0d15 4164 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 4165 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 4166 if (!e)
efdb0237 4167 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 4168 }
efdb0237
LP
4169 if (e)
4170 *e = 0;
7ccfb64a 4171
7546145e
LP
4172 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
4173 * easily prepend it everywhere. */
4174 delete_trailing_chars(m->cgroup_root, "/");
8e274523 4175
35d2e7ec 4176 /* 2. Show data */
9444b1f2 4177 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
4178 if (r < 0)
4179 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 4180
d4d99bc6 4181 r = cg_unified();
415fc41c
TH
4182 if (r < 0)
4183 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 4184
b4cccbc1 4185 all_unified = cg_all_unified();
d4c819ed
ZJS
4186 if (all_unified < 0)
4187 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
4188 if (all_unified > 0)
efdb0237 4189 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 4190 else {
c22800e4 4191 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
4192 if (r < 0)
4193 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
4194 if (r > 0)
4195 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
4196 else
4197 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
4198 }
efdb0237 4199
09e24654 4200 /* 3. Allocate cgroup empty defer event source */
5dcadb4c 4201 m->cgroup_empty_event_source = sd_event_source_disable_unref(m->cgroup_empty_event_source);
09e24654
LP
4202 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
4203 if (r < 0)
4204 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
4205
cbe83389
LP
4206 /* Schedule cgroup empty checks early, but after having processed service notification messages or
4207 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
4208 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
d42b61d2 4209 r = sd_event_source_set_priority(m->cgroup_empty_event_source, EVENT_PRIORITY_CGROUP_EMPTY);
09e24654
LP
4210 if (r < 0)
4211 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
4212
4213 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
4214 if (r < 0)
4215 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
4216
4217 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
4218
4219 /* 4. Install notifier inotify object, or agent */
10bd3e2e 4220 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 4221
09e24654 4222 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
efdb0237 4223
5dcadb4c 4224 m->cgroup_inotify_event_source = sd_event_source_disable_unref(m->cgroup_inotify_event_source);
10bd3e2e 4225 safe_close(m->cgroup_inotify_fd);
efdb0237 4226
10bd3e2e
LP
4227 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
4228 if (m->cgroup_inotify_fd < 0)
4229 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 4230
10bd3e2e
LP
4231 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
4232 if (r < 0)
4233 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 4234
cbe83389
LP
4235 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
4236 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
4237 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
d42b61d2 4238 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, EVENT_PRIORITY_CGROUP_INOTIFY);
10bd3e2e
LP
4239 if (r < 0)
4240 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 4241
10bd3e2e 4242 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 4243
611c4f8a 4244 } else if (MANAGER_IS_SYSTEM(m) && manager_owns_host_root_cgroup(m) && !MANAGER_IS_TEST_RUN(m)) {
efdb0237 4245
10bd3e2e
LP
4246 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
4247 * since it does not generate events when control groups with children run empty. */
8e274523 4248
ce906769 4249 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUPS_AGENT_PATH);
23bbb0de 4250 if (r < 0)
10bd3e2e
LP
4251 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
4252 else if (r > 0)
4253 log_debug("Installed release agent.");
4254 else if (r == 0)
4255 log_debug("Release agent already installed.");
4256 }
efdb0237 4257
09e24654 4258 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
10bd3e2e
LP
4259 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
4260 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
aa77e234
MS
4261 if (r >= 0) {
4262 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
4263 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
4264 if (r < 0)
4265 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
c6c18be3 4266
aa77e234
MS
4267 /* 6. And pin it, so that it cannot be unmounted */
4268 safe_close(m->pin_cgroupfs_fd);
4269 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
4270 if (m->pin_cgroupfs_fd < 0)
4271 return log_error_errno(errno, "Failed to open pin file: %m");
0d8c31ff 4272
638cece4 4273 } else if (!MANAGER_IS_TEST_RUN(m))
aa77e234 4274 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
10bd3e2e 4275
09e24654 4276 /* 7. Always enable hierarchical support if it exists... */
638cece4 4277 if (!all_unified && !MANAGER_IS_TEST_RUN(m))
10bd3e2e 4278 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 4279
17f14955 4280 /* 8. Figure out which controllers are supported */
0fa7b500 4281 r = cg_mask_supported_subtree(m->cgroup_root, &m->cgroup_supported);
efdb0237
LP
4282 if (r < 0)
4283 return log_error_errno(r, "Failed to determine supported controllers: %m");
17f14955
RG
4284
4285 /* 9. Figure out which bpf-based pseudo-controllers are supported */
4286 r = cg_bpf_mask_supported(&mask);
4287 if (r < 0)
4288 return log_error_errno(r, "Failed to determine supported bpf-based pseudo-controllers: %m");
4289 m->cgroup_supported |= mask;
4290
4291 /* 10. Log which controllers are supported */
e8616626
ZJS
4292 for (CGroupController c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
4293 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c),
4294 yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 4295
a32360f1 4296 return 0;
8e274523
LP
4297}
4298
c6c18be3 4299void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
4300 assert(m);
4301
9444b1f2
LP
4302 /* We can't really delete the group, since we are in it. But
4303 * let's trim it. */
5dd2f5ff 4304 if (delete && m->cgroup_root && !FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL))
efdb0237
LP
4305 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
4306
5dcadb4c 4307 m->cgroup_empty_event_source = sd_event_source_disable_unref(m->cgroup_empty_event_source);
09e24654 4308
0bb814c2 4309 m->cgroup_control_inotify_wd_unit = hashmap_free(m->cgroup_control_inotify_wd_unit);
afcfaa69 4310 m->cgroup_memory_inotify_wd_unit = hashmap_free(m->cgroup_memory_inotify_wd_unit);
efdb0237 4311
5dcadb4c 4312 m->cgroup_inotify_event_source = sd_event_source_disable_unref(m->cgroup_inotify_event_source);
efdb0237 4313 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 4314
03e334a1 4315 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 4316
efdb0237 4317 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
4318}
4319
4ad49000 4320Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 4321 char *p;
4ad49000 4322 Unit *u;
acb14d31
LP
4323
4324 assert(m);
4325 assert(cgroup);
acb14d31 4326
4ad49000
LP
4327 u = hashmap_get(m->cgroup_unit, cgroup);
4328 if (u)
4329 return u;
acb14d31 4330
2f82562b 4331 p = strdupa_safe(cgroup);
acb14d31
LP
4332 for (;;) {
4333 char *e;
4334
4335 e = strrchr(p, '/');
efdb0237
LP
4336 if (!e || e == p)
4337 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
4338
4339 *e = 0;
4340
4ad49000
LP
4341 u = hashmap_get(m->cgroup_unit, p);
4342 if (u)
4343 return u;
acb14d31
LP
4344 }
4345}
4346
d70dfe1b 4347Unit *manager_get_unit_by_pidref_cgroup(Manager *m, const PidRef *pid) {
4ad49000 4348 _cleanup_free_ char *cgroup = NULL;
8e274523 4349
8c47c732
LP
4350 assert(m);
4351
a9062242 4352 if (cg_pidref_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
b3ac818b
LP
4353 return NULL;
4354
4355 return manager_get_unit_by_cgroup(m, cgroup);
4356}
4357
d70dfe1b 4358Unit *manager_get_unit_by_pidref_watching(Manager *m, const PidRef *pid) {
62a76913 4359 Unit *u, **array;
b3ac818b
LP
4360
4361 assert(m);
4362
495e75ed
LP
4363 if (!pidref_is_set(pid))
4364 return NULL;
62a76913 4365
495e75ed
LP
4366 u = hashmap_get(m->watch_pids, pid);
4367 if (u)
4368 return u;
4369
4370 array = hashmap_get(m->watch_pids_more, pid);
4371 if (array)
4372 return array[0];
4373
4374 return NULL;
4375}
4376
d70dfe1b 4377Unit *manager_get_unit_by_pidref(Manager *m, const PidRef *pid) {
495e75ed
LP
4378 Unit *u;
4379
4380 assert(m);
4381
4382 /* Note that a process might be owned by multiple units, we return only one here, which is good
4383 * enough for most cases, though not strictly correct. We prefer the one reported by cgroup
4384 * membership, as that's the most relevant one as children of the process will be assigned to that
4385 * one, too, before all else. */
4386
4387 if (!pidref_is_set(pid))
8c47c732
LP
4388 return NULL;
4389
a7a87769 4390 if (pidref_is_self(pid))
efdb0237 4391 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
495e75ed
LP
4392 if (pid->pid == 1)
4393 return NULL;
efdb0237 4394
495e75ed 4395 u = manager_get_unit_by_pidref_cgroup(m, pid);
5fe8876b
LP
4396 if (u)
4397 return u;
4398
495e75ed 4399 u = manager_get_unit_by_pidref_watching(m, pid);
5fe8876b
LP
4400 if (u)
4401 return u;
4402
62a76913 4403 return NULL;
6dde1f33 4404}
4fbf50b3 4405
495e75ed
LP
4406Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
4407 assert(m);
4408
4409 if (!pid_is_valid(pid))
4410 return NULL;
4411
4412 return manager_get_unit_by_pidref(m, &PIDREF_MAKE_FROM_PID(pid));
4413}
4414
4ad49000
LP
4415int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
4416 Unit *u;
4fbf50b3 4417
4ad49000
LP
4418 assert(m);
4419 assert(cgroup);
4fbf50b3 4420
09e24654
LP
4421 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
4422 * or from the --system instance */
4423
d8fdc620
LP
4424 log_debug("Got cgroup empty notification for: %s", cgroup);
4425
4ad49000 4426 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
4427 if (!u)
4428 return 0;
b56c28c3 4429
09e24654
LP
4430 unit_add_to_cgroup_empty_queue(u);
4431 return 1;
5ad096b3
LP
4432}
4433
93ff34e4 4434int unit_get_memory_available(Unit *u, uint64_t *ret) {
8db929a1 4435 uint64_t available = UINT64_MAX, current = 0;
93ff34e4
LB
4436
4437 assert(u);
4438 assert(ret);
4439
4440 /* If data from cgroups can be accessed, try to find out how much more memory a unit can
4441 * claim before hitting the configured cgroup limits (if any). Consider both MemoryHigh
4442 * and MemoryMax, and also any slice the unit might be nested below. */
4443
727cea76 4444 do {
8db929a1 4445 uint64_t unit_available, unit_limit = UINT64_MAX;
727cea76 4446 CGroupContext *unit_context;
93ff34e4
LB
4447
4448 /* No point in continuing if we can't go any lower */
4449 if (available == 0)
4450 break;
4451
727cea76
MK
4452 unit_context = unit_get_cgroup_context(u);
4453 if (!unit_context)
4454 return -ENODATA;
93ff34e4 4455
9cc54544
LP
4456 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4457 if (!crt || !crt->cgroup_path)
93ff34e4
LB
4458 continue;
4459
8db929a1
MK
4460 (void) unit_get_memory_current(u, &current);
4461 /* in case of error, previous current propagates as lower bound */
4462
727cea76
MK
4463 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
4464 unit_limit = physical_memory();
4465 else if (unit_context->memory_max == UINT64_MAX && unit_context->memory_high == UINT64_MAX)
93ff34e4 4466 continue;
727cea76 4467 unit_limit = MIN3(unit_limit, unit_context->memory_max, unit_context->memory_high);
93ff34e4 4468
8db929a1 4469 unit_available = LESS_BY(unit_limit, current);
727cea76
MK
4470 available = MIN(unit_available, available);
4471 } while ((u = UNIT_GET_SLICE(u)));
93ff34e4
LB
4472
4473 *ret = available;
4474
4475 return 0;
4476}
4477
5ad096b3 4478int unit_get_memory_current(Unit *u, uint64_t *ret) {
5ad096b3
LP
4479 int r;
4480
9824ab1f
MY
4481 // FIXME: Merge this into unit_get_memory_accounting after support for cgroup v1 is dropped
4482
5ad096b3
LP
4483 assert(u);
4484 assert(ret);
4485
2e4025c0 4486 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
cf3b4be1
LP
4487 return -ENODATA;
4488
9cc54544
LP
4489 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4490 if (!crt || !crt->cgroup_path)
5ad096b3
LP
4491 return -ENODATA;
4492
1f73aa00 4493 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
611c4f8a 4494 if (unit_has_host_root_cgroup(u))
c482724a 4495 return procfs_memory_get_used(ret);
1f73aa00 4496
9cc54544 4497 if ((crt->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
4498 return -ENODATA;
4499
b4cccbc1
LP
4500 r = cg_all_unified();
4501 if (r < 0)
4502 return r;
5ad096b3 4503
9cc54544 4504 return cg_get_attribute_as_uint64("memory", crt->cgroup_path, r > 0 ? "memory.current" : "memory.usage_in_bytes", ret);
5ad096b3
LP
4505}
4506
9824ab1f
MY
4507int unit_get_memory_accounting(Unit *u, CGroupMemoryAccountingMetric metric, uint64_t *ret) {
4508
4509 static const char* const attributes_table[_CGROUP_MEMORY_ACCOUNTING_METRIC_MAX] = {
4510 [CGROUP_MEMORY_PEAK] = "memory.peak",
4511 [CGROUP_MEMORY_SWAP_CURRENT] = "memory.swap.current",
4512 [CGROUP_MEMORY_SWAP_PEAK] = "memory.swap.peak",
4513 [CGROUP_MEMORY_ZSWAP_CURRENT] = "memory.zswap.current",
4514 };
4515
4516 uint64_t bytes;
f17b07f4 4517 bool updated = false;
6c71db76
FS
4518 int r;
4519
4520 assert(u);
9824ab1f
MY
4521 assert(metric >= 0);
4522 assert(metric < _CGROUP_MEMORY_ACCOUNTING_METRIC_MAX);
6c71db76 4523
37533c94
FS
4524 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
4525 return -ENODATA;
4526
9cc54544
LP
4527 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4528 if (!crt)
4529 return -ENODATA;
4530 if (!crt->cgroup_path)
f17b07f4 4531 /* If the cgroup is already gone, we try to find the last cached value. */
a8aed6a9 4532 goto finish;
6c71db76
FS
4533
4534 /* The root cgroup doesn't expose this information. */
4535 if (unit_has_host_root_cgroup(u))
4536 return -ENODATA;
4537
9cc54544 4538 if (!FLAGS_SET(crt->cgroup_realized_mask, CGROUP_MASK_MEMORY))
6c71db76
FS
4539 return -ENODATA;
4540
4541 r = cg_all_unified();
4542 if (r < 0)
4543 return r;
9824ab1f 4544 if (r == 0)
6c71db76
FS
4545 return -ENODATA;
4546
9cc54544 4547 r = cg_get_attribute_as_uint64("memory", crt->cgroup_path, attributes_table[metric], &bytes);
f17b07f4 4548 if (r < 0 && r != -ENODATA)
9824ab1f 4549 return r;
f17b07f4 4550 updated = r >= 0;
6c71db76 4551
a8aed6a9
MY
4552finish:
4553 if (metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST) {
9cc54544 4554 uint64_t *last = &crt->memory_accounting_last[metric];
6c71db76 4555
a8aed6a9
MY
4556 if (updated)
4557 *last = bytes;
4558 else if (*last != UINT64_MAX)
4559 bytes = *last;
4560 else
4561 return -ENODATA;
f17b07f4 4562
a8aed6a9 4563 } else if (!updated)
f17b07f4 4564 return -ENODATA;
6c71db76 4565
6c71db76
FS
4566 if (ret)
4567 *ret = bytes;
4568
4569 return 0;
4570}
4571
03a7b521 4572int unit_get_tasks_current(Unit *u, uint64_t *ret) {
03a7b521
LP
4573 assert(u);
4574 assert(ret);
4575
2e4025c0 4576 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
cf3b4be1
LP
4577 return -ENODATA;
4578
9cc54544
LP
4579 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4580 if (!crt || !crt->cgroup_path)
03a7b521
LP
4581 return -ENODATA;
4582
c36a69f4 4583 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
611c4f8a 4584 if (unit_has_host_root_cgroup(u))
c36a69f4
LP
4585 return procfs_tasks_get_current(ret);
4586
9cc54544 4587 if ((crt->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
1f73aa00
LP
4588 return -ENODATA;
4589
9cc54544 4590 return cg_get_attribute_as_uint64("pids", crt->cgroup_path, "pids.current", ret);
03a7b521
LP
4591}
4592
5ad096b3 4593static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
5ad096b3
LP
4594 uint64_t ns;
4595 int r;
4596
4597 assert(u);
4598 assert(ret);
4599
9cc54544
LP
4600 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4601 if (!crt || !crt->cgroup_path)
5ad096b3
LP
4602 return -ENODATA;
4603
1f73aa00 4604 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
611c4f8a 4605 if (unit_has_host_root_cgroup(u))
1f73aa00
LP
4606 return procfs_cpu_get_usage(ret);
4607
f98c2585 4608 /* Requisite controllers for CPU accounting are not enabled */
9cc54544 4609 if ((get_cpu_accounting_mask() & ~crt->cgroup_realized_mask) != 0)
f98c2585
CD
4610 return -ENODATA;
4611
92a99304
LP
4612 r = cg_all_unified();
4613 if (r < 0)
4614 return r;
b4cccbc1 4615 if (r > 0) {
66ebf6c0
TH
4616 _cleanup_free_ char *val = NULL;
4617 uint64_t us;
5ad096b3 4618
9cc54544 4619 r = cg_get_keyed_attribute("cpu", crt->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
b734a4ff
LP
4620 if (IN_SET(r, -ENOENT, -ENXIO))
4621 return -ENODATA;
d742f4b5
LP
4622 if (r < 0)
4623 return r;
66ebf6c0
TH
4624
4625 r = safe_atou64(val, &us);
4626 if (r < 0)
4627 return r;
4628
4629 ns = us * NSEC_PER_USEC;
613328c3 4630 } else
9cc54544 4631 return cg_get_attribute_as_uint64("cpuacct", crt->cgroup_path, "cpuacct.usage", ret);
5ad096b3
LP
4632
4633 *ret = ns;
4634 return 0;
4635}
4636
4637int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
4638 nsec_t ns;
4639 int r;
4640
fe700f46
LP
4641 assert(u);
4642
4643 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
4644 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
4645 * call this function with a NULL return value. */
4646
9cc54544
LP
4647 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4648 if (!crt || !crt->cgroup_path)
4649 return -ENODATA;
4650
2e4025c0 4651 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
cf3b4be1
LP
4652 return -ENODATA;
4653
5ad096b3 4654 r = unit_get_cpu_usage_raw(u, &ns);
9cc54544 4655 if (r == -ENODATA && crt->cpu_usage_last != NSEC_INFINITY) {
fe700f46
LP
4656 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
4657 * cached value. */
4658
4659 if (ret)
9cc54544 4660 *ret = crt->cpu_usage_last;
fe700f46
LP
4661 return 0;
4662 }
5ad096b3
LP
4663 if (r < 0)
4664 return r;
4665
9cc54544
LP
4666 if (ns > crt->cpu_usage_base)
4667 ns -= crt->cpu_usage_base;
5ad096b3
LP
4668 else
4669 ns = 0;
4670
9cc54544 4671 crt->cpu_usage_last = ns;
fe700f46
LP
4672 if (ret)
4673 *ret = ns;
4674
5ad096b3
LP
4675 return 0;
4676}
4677
906c06f6
DM
4678int unit_get_ip_accounting(
4679 Unit *u,
4680 CGroupIPAccountingMetric metric,
4681 uint64_t *ret) {
4682
6b659ed8 4683 uint64_t value;
906c06f6
DM
4684 int fd, r;
4685
4686 assert(u);
4687 assert(metric >= 0);
4688 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
4689 assert(ret);
4690
2e4025c0 4691 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
cf3b4be1
LP
4692 return -ENODATA;
4693
9cc54544
LP
4694 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4695 if (!crt || !crt->cgroup_path)
4696 return -ENODATA;
4697
906c06f6 4698 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
9cc54544
LP
4699 crt->ip_accounting_ingress_map_fd :
4700 crt->ip_accounting_egress_map_fd;
906c06f6
DM
4701 if (fd < 0)
4702 return -ENODATA;
4703
4704 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
6b659ed8 4705 r = bpf_firewall_read_accounting(fd, &value, NULL);
906c06f6 4706 else
6b659ed8
LP
4707 r = bpf_firewall_read_accounting(fd, NULL, &value);
4708 if (r < 0)
4709 return r;
4710
4711 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
4712 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
4713 * ip_accounting_extra[] field, and add them in here transparently. */
4714
9cc54544 4715 *ret = value + crt->ip_accounting_extra[metric];
906c06f6
DM
4716
4717 return r;
4718}
4719
4fb0d2dc
MK
4720static uint64_t unit_get_effective_limit_one(Unit *u, CGroupLimitType type) {
4721 CGroupContext *cc;
4722
4723 assert(u);
4724 assert(UNIT_HAS_CGROUP_CONTEXT(u));
4725
93f8e88d
MK
4726 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
4727 switch (type) {
4728 case CGROUP_LIMIT_MEMORY_MAX:
4729 case CGROUP_LIMIT_MEMORY_HIGH:
4730 return physical_memory();
4731 case CGROUP_LIMIT_TASKS_MAX:
4732 return system_tasks_max();
4733 default:
4734 assert_not_reached();
4735 }
4736
c658ad79 4737 cc = ASSERT_PTR(unit_get_cgroup_context(u));
4fb0d2dc
MK
4738 switch (type) {
4739 /* Note: on legacy/hybrid hierarchies memory_max stays CGROUP_LIMIT_MAX unless configured
4740 * explicitly. Effective value of MemoryLimit= (cgroup v1) is not implemented. */
4741 case CGROUP_LIMIT_MEMORY_MAX:
4742 return cc->memory_max;
4743 case CGROUP_LIMIT_MEMORY_HIGH:
4744 return cc->memory_high;
4745 case CGROUP_LIMIT_TASKS_MAX:
4746 return cgroup_tasks_max_resolve(&cc->tasks_max);
4747 default:
4748 assert_not_reached();
4749 }
4750}
4751
4752int unit_get_effective_limit(Unit *u, CGroupLimitType type, uint64_t *ret) {
4753 uint64_t infimum;
4754
4755 assert(u);
4756 assert(ret);
4757 assert(type >= 0);
4758 assert(type < _CGROUP_LIMIT_TYPE_MAX);
4759
4760 if (!UNIT_HAS_CGROUP_CONTEXT(u))
4761 return -EINVAL;
4762
4763 infimum = unit_get_effective_limit_one(u, type);
4764 for (Unit *slice = UNIT_GET_SLICE(u); slice; slice = UNIT_GET_SLICE(slice))
4765 infimum = MIN(infimum, unit_get_effective_limit_one(slice, type));
4766
4767 *ret = infimum;
4768 return 0;
4769}
4770
fbe14fc9
LP
4771static int unit_get_io_accounting_raw(Unit *u, uint64_t ret[static _CGROUP_IO_ACCOUNTING_METRIC_MAX]) {
4772 static const char *const field_names[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
4773 [CGROUP_IO_READ_BYTES] = "rbytes=",
4774 [CGROUP_IO_WRITE_BYTES] = "wbytes=",
4775 [CGROUP_IO_READ_OPERATIONS] = "rios=",
4776 [CGROUP_IO_WRITE_OPERATIONS] = "wios=",
4777 };
4778 uint64_t acc[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {};
4779 _cleanup_free_ char *path = NULL;
4780 _cleanup_fclose_ FILE *f = NULL;
4781 int r;
4782
4783 assert(u);
4784
9cc54544
LP
4785 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4786 if (!crt || !crt->cgroup_path)
fbe14fc9
LP
4787 return -ENODATA;
4788
4789 if (unit_has_host_root_cgroup(u))
4790 return -ENODATA; /* TODO: return useful data for the top-level cgroup */
4791
4792 r = cg_all_unified();
4793 if (r < 0)
4794 return r;
82133326 4795 if (r == 0)
fbe14fc9
LP
4796 return -ENODATA;
4797
9cc54544 4798 if (!FLAGS_SET(crt->cgroup_realized_mask, CGROUP_MASK_IO))
fbe14fc9
LP
4799 return -ENODATA;
4800
9cc54544 4801 r = cg_get_path("io", crt->cgroup_path, "io.stat", &path);
fbe14fc9
LP
4802 if (r < 0)
4803 return r;
4804
4805 f = fopen(path, "re");
4806 if (!f)
4807 return -errno;
4808
4809 for (;;) {
4810 _cleanup_free_ char *line = NULL;
4811 const char *p;
4812
4813 r = read_line(f, LONG_LINE_MAX, &line);
4814 if (r < 0)
4815 return r;
4816 if (r == 0)
4817 break;
4818
4819 p = line;
4820 p += strcspn(p, WHITESPACE); /* Skip over device major/minor */
4821 p += strspn(p, WHITESPACE); /* Skip over following whitespace */
4822
4823 for (;;) {
4824 _cleanup_free_ char *word = NULL;
4825
4826 r = extract_first_word(&p, &word, NULL, EXTRACT_RETAIN_ESCAPE);
4827 if (r < 0)
4828 return r;
4829 if (r == 0)
4830 break;
4831
4832 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++) {
4833 const char *x;
4834
4835 x = startswith(word, field_names[i]);
4836 if (x) {
4837 uint64_t w;
4838
4839 r = safe_atou64(x, &w);
4840 if (r < 0)
4841 return r;
4842
4843 /* Sum up the stats of all devices */
4844 acc[i] += w;
4845 break;
4846 }
4847 }
4848 }
4849 }
4850
4851 memcpy(ret, acc, sizeof(acc));
4852 return 0;
4853}
4854
4855int unit_get_io_accounting(
4856 Unit *u,
4857 CGroupIOAccountingMetric metric,
4858 bool allow_cache,
4859 uint64_t *ret) {
4860
4861 uint64_t raw[_CGROUP_IO_ACCOUNTING_METRIC_MAX];
4862 int r;
4863
4864 /* Retrieve an IO account parameter. This will subtract the counter when the unit was started. */
4865
4866 if (!UNIT_CGROUP_BOOL(u, io_accounting))
4867 return -ENODATA;
4868
9cc54544
LP
4869 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4870 if (!crt || !crt->cgroup_path)
4871 return -ENODATA;
4872
4873 if (allow_cache && crt->io_accounting_last[metric] != UINT64_MAX)
fbe14fc9
LP
4874 goto done;
4875
4876 r = unit_get_io_accounting_raw(u, raw);
9cc54544 4877 if (r == -ENODATA && crt->io_accounting_last[metric] != UINT64_MAX)
fbe14fc9
LP
4878 goto done;
4879 if (r < 0)
4880 return r;
4881
4882 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++) {
4883 /* Saturated subtraction */
9cc54544
LP
4884 if (raw[i] > crt->io_accounting_base[i])
4885 crt->io_accounting_last[i] = raw[i] - crt->io_accounting_base[i];
fbe14fc9 4886 else
9cc54544 4887 crt->io_accounting_last[i] = 0;
fbe14fc9
LP
4888 }
4889
4890done:
4891 if (ret)
9cc54544 4892 *ret = crt->io_accounting_last[metric];
fbe14fc9
LP
4893
4894 return 0;
4895}
4896
906c06f6 4897int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
4898 int r;
4899
4900 assert(u);
4901
9cc54544
LP
4902 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4903 if (!crt || !crt->cgroup_path)
4904 return 0;
4905
4906 crt->cpu_usage_last = NSEC_INFINITY;
fe700f46 4907
9cc54544 4908 r = unit_get_cpu_usage_raw(u, &crt->cpu_usage_base);
5ad096b3 4909 if (r < 0) {
9cc54544 4910 crt->cpu_usage_base = 0;
5ad096b3 4911 return r;
b56c28c3 4912 }
2633eb83 4913
4ad49000 4914 return 0;
4fbf50b3
LP
4915}
4916
d4bdc202
MY
4917void unit_reset_memory_accounting_last(Unit *u) {
4918 assert(u);
4919
9cc54544
LP
4920 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4921 if (!crt || !crt->cgroup_path)
4922 return;
4923
85471164 4924 FOREACH_ELEMENT(i, crt->memory_accounting_last)
d4bdc202
MY
4925 *i = UINT64_MAX;
4926}
4927
906c06f6 4928int unit_reset_ip_accounting(Unit *u) {
cbd2abbb 4929 int r = 0;
906c06f6
DM
4930
4931 assert(u);
4932
9cc54544
LP
4933 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4934 if (!crt || !crt->cgroup_path)
4935 return 0;
4936
4937 if (crt->ip_accounting_ingress_map_fd >= 0)
4938 RET_GATHER(r, bpf_firewall_reset_accounting(crt->ip_accounting_ingress_map_fd));
906c06f6 4939
9cc54544
LP
4940 if (crt->ip_accounting_egress_map_fd >= 0)
4941 RET_GATHER(r, bpf_firewall_reset_accounting(crt->ip_accounting_egress_map_fd));
906c06f6 4942
9cc54544 4943 zero(crt->ip_accounting_extra);
6b659ed8 4944
cbd2abbb 4945 return r;
906c06f6
DM
4946}
4947
d4bdc202
MY
4948void unit_reset_io_accounting_last(Unit *u) {
4949 assert(u);
4950
9cc54544
LP
4951 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4952 if (!crt || !crt->cgroup_path)
4953 return;
4954
4955 FOREACH_ARRAY(i, crt->io_accounting_last, _CGROUP_IO_ACCOUNTING_METRIC_MAX)
d4bdc202
MY
4956 *i = UINT64_MAX;
4957}
4958
fbe14fc9
LP
4959int unit_reset_io_accounting(Unit *u) {
4960 int r;
4961
4962 assert(u);
4963
9cc54544
LP
4964 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4965 if (!crt || !crt->cgroup_path)
4966 return 0;
4967
d4bdc202 4968 unit_reset_io_accounting_last(u);
fbe14fc9 4969
9cc54544 4970 r = unit_get_io_accounting_raw(u, crt->io_accounting_base);
fbe14fc9 4971 if (r < 0) {
9cc54544 4972 zero(crt->io_accounting_base);
fbe14fc9
LP
4973 return r;
4974 }
4975
4976 return 0;
4977}
4978
9b2559a1 4979int unit_reset_accounting(Unit *u) {
cbd2abbb 4980 int r = 0;
9b2559a1
LP
4981
4982 assert(u);
4983
cbd2abbb
MY
4984 RET_GATHER(r, unit_reset_cpu_accounting(u));
4985 RET_GATHER(r, unit_reset_io_accounting(u));
4986 RET_GATHER(r, unit_reset_ip_accounting(u));
d4bdc202 4987 unit_reset_memory_accounting_last(u);
9b2559a1 4988
cbd2abbb 4989 return r;
9b2559a1
LP
4990}
4991
e7ab4d1a
LP
4992void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
4993 assert(u);
4994
4995 if (!UNIT_HAS_CGROUP_CONTEXT(u))
4996 return;
4997
9cc54544
LP
4998 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4999 if (!crt)
5000 return;
5001
e7ab4d1a
LP
5002 if (m == 0)
5003 return;
5004
538b4852
TH
5005 /* always invalidate compat pairs together */
5006 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
5007 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
5008
7cce4fb7
LP
5009 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
5010 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
5011
9cc54544 5012 if (FLAGS_SET(crt->cgroup_invalidated_mask, m)) /* NOP? */
e7ab4d1a
LP
5013 return;
5014
9cc54544 5015 crt->cgroup_invalidated_mask |= m;
91a6073e 5016 unit_add_to_cgroup_realize_queue(u);
e7ab4d1a
LP
5017}
5018
906c06f6
DM
5019void unit_invalidate_cgroup_bpf(Unit *u) {
5020 assert(u);
5021
5022 if (!UNIT_HAS_CGROUP_CONTEXT(u))
5023 return;
5024
9cc54544
LP
5025 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5026 if (!crt)
906c06f6
DM
5027 return;
5028
9cc54544
LP
5029 if (crt->cgroup_invalidated_mask & CGROUP_MASK_BPF_FIREWALL) /* NOP? */
5030 return;
5031
5032 crt->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
91a6073e 5033 unit_add_to_cgroup_realize_queue(u);
906c06f6
DM
5034
5035 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
5036 * list of our children includes our own. */
5037 if (u->type == UNIT_SLICE) {
5038 Unit *member;
906c06f6 5039
d219a2b0 5040 UNIT_FOREACH_DEPENDENCY(member, u, UNIT_ATOM_SLICE_OF)
15ed3c3a 5041 unit_invalidate_cgroup_bpf(member);
906c06f6
DM
5042 }
5043}
5044
869f52f2
DS
5045void unit_cgroup_catchup(Unit *u) {
5046 assert(u);
5047
5048 if (!UNIT_HAS_CGROUP_CONTEXT(u))
5049 return;
5050
5051 /* We dropped the inotify watch during reexec/reload, so we need to
5052 * check these as they may have changed.
5053 * Note that (currently) the kernel doesn't actually update cgroup
5054 * file modification times, so we can't just serialize and then check
5055 * the mtime for file(s) we are interested in. */
5056 (void) unit_check_cgroup_events(u);
5057 unit_add_to_cgroup_oom_queue(u);
5058}
5059
1d9cc876
LP
5060bool unit_cgroup_delegate(Unit *u) {
5061 CGroupContext *c;
5062
5063 assert(u);
5064
5065 if (!UNIT_VTABLE(u)->can_delegate)
5066 return false;
5067
5068 c = unit_get_cgroup_context(u);
5069 if (!c)
5070 return false;
5071
5072 return c->delegate;
5073}
5074
e7ab4d1a 5075void manager_invalidate_startup_units(Manager *m) {
e7ab4d1a
LP
5076 Unit *u;
5077
5078 assert(m);
5079
90e74a66 5080 SET_FOREACH(u, m->startup_units)
9dfb6a3a 5081 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO|CGROUP_MASK_CPUSET);
e7ab4d1a
LP
5082}
5083
16b6af6a
AV
5084static int unit_cgroup_freezer_kernel_state(Unit *u, FreezerState *ret) {
5085 _cleanup_free_ char *val = NULL;
5086 FreezerState s;
5087 int r;
d9e45bc3
MS
5088
5089 assert(u);
16b6af6a 5090 assert(ret);
d9e45bc3 5091
9cc54544
LP
5092 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5093 if (!crt || !crt->cgroup_path)
5094 return -EOWNERDEAD;
5095
5096 r = cg_get_keyed_attribute(
5097 SYSTEMD_CGROUP_CONTROLLER,
5098 crt->cgroup_path,
5099 "cgroup.events",
5100 STRV_MAKE("frozen"),
5101 &val);
16b6af6a
AV
5102 if (IN_SET(r, -ENOENT, -ENXIO))
5103 return -ENODATA;
5104 if (r < 0)
5105 return r;
9a1e90ae 5106
16b6af6a
AV
5107 if (streq(val, "0"))
5108 s = FREEZER_RUNNING;
5109 else if (streq(val, "1"))
5110 s = FREEZER_FROZEN;
5111 else {
4e494e6a 5112 log_unit_debug(u, "Unexpected cgroup frozen state: %s", val);
16b6af6a
AV
5113 s = _FREEZER_STATE_INVALID;
5114 }
a14137d9 5115
16b6af6a
AV
5116 *ret = s;
5117 return 0;
5118}
d9e45bc3 5119
16b6af6a
AV
5120int unit_cgroup_freezer_action(Unit *u, FreezerAction action) {
5121 _cleanup_free_ char *path = NULL;
5122 FreezerState target, current, next;
5123 int r;
a14137d9 5124
16b6af6a
AV
5125 assert(u);
5126 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_PARENT_FREEZE,
5127 FREEZER_THAW, FREEZER_PARENT_THAW));
5128
9cc54544 5129 if (!cg_freezer_supported())
16b6af6a 5130 return 0;
a14137d9 5131
16b6af6a 5132 unit_next_freezer_state(u, action, &next, &target);
d9e45bc3 5133
7923e949
AV
5134 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5135 if (!crt || !crt->cgroup_realized) {
5136 /* No realized cgroup = nothing to freeze */
5137 u->freezer_state = freezer_state_finish(next);
5138 return 0;
5139 }
5140
16b6af6a 5141 r = unit_cgroup_freezer_kernel_state(u, &current);
d9e45bc3 5142 if (r < 0)
16b6af6a 5143 return r;
d9e45bc3 5144
16b6af6a
AV
5145 if (current == target)
5146 next = freezer_state_finish(next);
5147 else if (IN_SET(next, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT, FREEZER_RUNNING)) {
5148 /* We're transitioning into a finished state, which implies that the cgroup's
5149 * current state already matches the target and thus we'd return 0. But, reality
5150 * shows otherwise. This indicates that our freezer_state tracking has diverged
5151 * from the real state of the cgroup, which can happen if someone meddles with the
5152 * cgroup from underneath us. This really shouldn't happen during normal operation,
5153 * though. So, let's warn about it and fix up the state to be valid */
5154
5155 log_unit_warning(u, "Unit wants to transition to %s freezer state but cgroup is unexpectedly %s, fixing up.",
5156 freezer_state_to_string(next), freezer_state_to_string(current) ?: "(invalid)");
5157
5158 if (next == FREEZER_FROZEN)
5159 next = FREEZER_FREEZING;
5160 else if (next == FREEZER_FROZEN_BY_PARENT)
5161 next = FREEZER_FREEZING_BY_PARENT;
5162 else if (next == FREEZER_RUNNING)
5163 next = FREEZER_THAWING;
5164 }
d9e45bc3 5165
9cc54544 5166 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, "cgroup.freeze", &path);
d9e45bc3
MS
5167 if (r < 0)
5168 return r;
5169
16b6af6a
AV
5170 log_unit_debug(u, "Unit freezer state was %s, now %s.",
5171 freezer_state_to_string(u->freezer_state),
5172 freezer_state_to_string(next));
d9e45bc3 5173
16b6af6a 5174 r = write_string_file(path, one_zero(target == FREEZER_FROZEN), WRITE_STRING_FILE_DISABLE_BUFFER);
d9e45bc3
MS
5175 if (r < 0)
5176 return r;
5177
16b6af6a
AV
5178 u->freezer_state = next;
5179 return target != current;
d9e45bc3
MS
5180}
5181
047f5d63
PH
5182int unit_get_cpuset(Unit *u, CPUSet *cpus, const char *name) {
5183 _cleanup_free_ char *v = NULL;
5184 int r;
5185
5186 assert(u);
5187 assert(cpus);
5188
9cc54544
LP
5189 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5190 if (!crt || !crt->cgroup_path)
047f5d63
PH
5191 return -ENODATA;
5192
9cc54544 5193 if ((crt->cgroup_realized_mask & CGROUP_MASK_CPUSET) == 0)
047f5d63
PH
5194 return -ENODATA;
5195
5196 r = cg_all_unified();
5197 if (r < 0)
5198 return r;
5199 if (r == 0)
5200 return -ENODATA;
48fd01e5 5201
9cc54544 5202 r = cg_get_attribute("cpuset", crt->cgroup_path, name, &v);
047f5d63
PH
5203 if (r == -ENOENT)
5204 return -ENODATA;
5205 if (r < 0)
5206 return r;
5207
5208 return parse_cpu_set_full(v, cpus, false, NULL, NULL, 0, NULL);
5209}
5210
9cc54544
LP
5211CGroupRuntime *cgroup_runtime_new(void) {
5212 _cleanup_(cgroup_runtime_freep) CGroupRuntime *crt = NULL;
5213
5214 crt = new(CGroupRuntime, 1);
5215 if (!crt)
5216 return NULL;
5217
5218 *crt = (CGroupRuntime) {
5219 .cpu_usage_last = NSEC_INFINITY,
5220
5221 .cgroup_control_inotify_wd = -1,
5222 .cgroup_memory_inotify_wd = -1,
5223
5224 .ip_accounting_ingress_map_fd = -EBADF,
5225 .ip_accounting_egress_map_fd = -EBADF,
5226
5227 .ipv4_allow_map_fd = -EBADF,
5228 .ipv6_allow_map_fd = -EBADF,
5229 .ipv4_deny_map_fd = -EBADF,
5230 .ipv6_deny_map_fd = -EBADF,
5231
5232 .cgroup_invalidated_mask = _CGROUP_MASK_ALL,
5233 };
5234
85471164 5235 FOREACH_ELEMENT(i, crt->memory_accounting_last)
9cc54544 5236 *i = UINT64_MAX;
85471164 5237 FOREACH_ELEMENT(i, crt->io_accounting_base)
9cc54544 5238 *i = UINT64_MAX;
85471164 5239 FOREACH_ELEMENT(i, crt->io_accounting_last)
9cc54544 5240 *i = UINT64_MAX;
85471164 5241 FOREACH_ELEMENT(i, crt->ip_accounting_extra)
9cc54544
LP
5242 *i = UINT64_MAX;
5243
5244 return TAKE_PTR(crt);
5245}
5246
5247CGroupRuntime *cgroup_runtime_free(CGroupRuntime *crt) {
5248 if (!crt)
5249 return NULL;
5250
5251 fdset_free(crt->initial_socket_bind_link_fds);
5252#if BPF_FRAMEWORK
5253 bpf_link_free(crt->ipv4_socket_bind_link);
5254 bpf_link_free(crt->ipv6_socket_bind_link);
5255#endif
5256 hashmap_free(crt->bpf_foreign_by_key);
5257
5258 bpf_program_free(crt->bpf_device_control_installed);
5259
5260#if BPF_FRAMEWORK
5261 bpf_link_free(crt->restrict_ifaces_ingress_bpf_link);
5262 bpf_link_free(crt->restrict_ifaces_egress_bpf_link);
5263#endif
33b93371 5264 fdset_free(crt->initial_restrict_ifaces_link_fds);
9cc54544
LP
5265
5266 safe_close(crt->ipv4_allow_map_fd);
5267 safe_close(crt->ipv6_allow_map_fd);
5268 safe_close(crt->ipv4_deny_map_fd);
5269 safe_close(crt->ipv6_deny_map_fd);
5270
5271 bpf_program_free(crt->ip_bpf_ingress);
5272 bpf_program_free(crt->ip_bpf_ingress_installed);
5273 bpf_program_free(crt->ip_bpf_egress);
5274 bpf_program_free(crt->ip_bpf_egress_installed);
5275
5276 set_free(crt->ip_bpf_custom_ingress);
5277 set_free(crt->ip_bpf_custom_ingress_installed);
5278 set_free(crt->ip_bpf_custom_egress);
5279 set_free(crt->ip_bpf_custom_egress_installed);
5280
9cc54544
LP
5281 free(crt->cgroup_path);
5282
5283 return mfree(crt);
5284}
5285
5286static const char* const ip_accounting_metric_field_table[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
5287 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
5288 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
5289 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
5290 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
5291};
5292
5293DEFINE_PRIVATE_STRING_TABLE_LOOKUP(ip_accounting_metric_field, CGroupIPAccountingMetric);
5294
5295static const char* const io_accounting_metric_field_base_table[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
5296 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
5297 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
5298 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
5299 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
5300};
5301
5302DEFINE_PRIVATE_STRING_TABLE_LOOKUP(io_accounting_metric_field_base, CGroupIOAccountingMetric);
5303
5304static const char* const io_accounting_metric_field_last_table[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
5305 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
5306 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
5307 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
5308 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
5309};
5310
5311DEFINE_PRIVATE_STRING_TABLE_LOOKUP(io_accounting_metric_field_last, CGroupIOAccountingMetric);
5312
5313static const char* const memory_accounting_metric_field_last_table[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = {
5314 [CGROUP_MEMORY_PEAK] = "memory-accounting-peak",
5315 [CGROUP_MEMORY_SWAP_PEAK] = "memory-accounting-swap-peak",
5316};
5317
5318DEFINE_PRIVATE_STRING_TABLE_LOOKUP(memory_accounting_metric_field_last, CGroupMemoryAccountingMetric);
5319
5320static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
5321 _cleanup_free_ char *s = NULL;
5322 int r;
5323
5324 assert(f);
5325 assert(key);
5326
5327 if (mask == 0)
5328 return 0;
5329
5330 r = cg_mask_to_string(mask, &s);
5331 if (r < 0)
5332 return log_error_errno(r, "Failed to format cgroup mask: %m");
5333
5334 return serialize_item(f, key, s);
5335}
5336
5337int cgroup_runtime_serialize(Unit *u, FILE *f, FDSet *fds) {
5338 int r;
5339
5340 assert(u);
5341 assert(f);
5342 assert(fds);
5343
5344 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5345 if (!crt)
5346 return 0;
5347
5348 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, crt->cpu_usage_base);
5349 if (crt->cpu_usage_last != NSEC_INFINITY)
5350 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, crt->cpu_usage_last);
5351
5352 if (crt->managed_oom_kill_last > 0)
5353 (void) serialize_item_format(f, "managed-oom-kill-last", "%" PRIu64, crt->managed_oom_kill_last);
5354
5355 if (crt->oom_kill_last > 0)
5356 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, crt->oom_kill_last);
5357
5358 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) {
5359 uint64_t v;
5360
5361 r = unit_get_memory_accounting(u, metric, &v);
5362 if (r >= 0)
5363 (void) serialize_item_format(f, memory_accounting_metric_field_last_to_string(metric), "%" PRIu64, v);
5364 }
5365
5366 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
5367 uint64_t v;
5368
5369 r = unit_get_ip_accounting(u, m, &v);
5370 if (r >= 0)
5371 (void) serialize_item_format(f, ip_accounting_metric_field_to_string(m), "%" PRIu64, v);
5372 }
5373
5374 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
5375 (void) serialize_item_format(f, io_accounting_metric_field_base_to_string(im), "%" PRIu64, crt->io_accounting_base[im]);
5376
5377 if (crt->io_accounting_last[im] != UINT64_MAX)
5378 (void) serialize_item_format(f, io_accounting_metric_field_last_to_string(im), "%" PRIu64, crt->io_accounting_last[im]);
5379 }
5380
5381 if (crt->cgroup_path)
5382 (void) serialize_item(f, "cgroup", crt->cgroup_path);
5383 if (crt->cgroup_id != 0)
5384 (void) serialize_item_format(f, "cgroup-id", "%" PRIu64, crt->cgroup_id);
5385
5386 (void) serialize_bool(f, "cgroup-realized", crt->cgroup_realized);
5387 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", crt->cgroup_realized_mask);
5388 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", crt->cgroup_enabled_mask);
5389 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", crt->cgroup_invalidated_mask);
5390
5391 (void) bpf_socket_bind_serialize(u, f, fds);
5392
5393 (void) bpf_program_serialize_attachment(f, fds, "ip-bpf-ingress-installed", crt->ip_bpf_ingress_installed);
5394 (void) bpf_program_serialize_attachment(f, fds, "ip-bpf-egress-installed", crt->ip_bpf_egress_installed);
5395 (void) bpf_program_serialize_attachment(f, fds, "bpf-device-control-installed", crt->bpf_device_control_installed);
5396 (void) bpf_program_serialize_attachment_set(f, fds, "ip-bpf-custom-ingress-installed", crt->ip_bpf_custom_ingress_installed);
5397 (void) bpf_program_serialize_attachment_set(f, fds, "ip-bpf-custom-egress-installed", crt->ip_bpf_custom_egress_installed);
5398
5399 (void) bpf_restrict_ifaces_serialize(u, f, fds);
5400
5401 return 0;
5402}
5403
5404#define MATCH_DESERIALIZE(u, key, l, v, parse_func, target) \
5405 ({ \
5406 bool _deserialize_matched = streq(l, key); \
5407 if (_deserialize_matched) { \
5408 CGroupRuntime *crt = unit_setup_cgroup_runtime(u); \
5409 if (!crt) \
5410 log_oom_debug(); \
5411 else { \
5412 int _deserialize_r = parse_func(v); \
5413 if (_deserialize_r < 0) \
5414 log_unit_debug_errno(u, _deserialize_r, \
5415 "Failed to parse \"%s=%s\", ignoring.", l, v); \
5416 else \
5417 crt->target = _deserialize_r; \
5418 } \
5419 } \
5420 _deserialize_matched; \
5421 })
5422
5423#define MATCH_DESERIALIZE_IMMEDIATE(u, key, l, v, parse_func, target) \
5424 ({ \
5425 bool _deserialize_matched = streq(l, key); \
5426 if (_deserialize_matched) { \
5427 CGroupRuntime *crt = unit_setup_cgroup_runtime(u); \
5428 if (!crt) \
5429 log_oom_debug(); \
5430 else { \
5431 int _deserialize_r = parse_func(v, &crt->target); \
5432 if (_deserialize_r < 0) \
5433 log_unit_debug_errno(u, _deserialize_r, \
5434 "Failed to parse \"%s=%s\", ignoring", l, v); \
5435 } \
5436 } \
5437 _deserialize_matched; \
5438 })
5439
5440#define MATCH_DESERIALIZE_METRIC(u, key, l, v, parse_func, target) \
5441 ({ \
5442 bool _deserialize_matched = streq(l, key); \
5443 if (_deserialize_matched) { \
5444 CGroupRuntime *crt = unit_setup_cgroup_runtime(u); \
5445 if (!crt) \
5446 log_oom_debug(); \
5447 else { \
5448 int _deserialize_r = parse_func(v); \
5449 if (_deserialize_r < 0) \
5450 log_unit_debug_errno(u, _deserialize_r, \
5451 "Failed to parse \"%s=%s\", ignoring.", l, v); \
5452 else \
5453 crt->target = _deserialize_r; \
5454 } \
5455 } \
5456 _deserialize_matched; \
5457 })
5458
5459int cgroup_runtime_deserialize_one(Unit *u, const char *key, const char *value, FDSet *fds) {
5460 int r;
5461
5462 assert(u);
5463 assert(value);
5464
5465 if (!UNIT_HAS_CGROUP_CONTEXT(u))
5466 return 0;
5467
5468 if (MATCH_DESERIALIZE_IMMEDIATE(u, "cpu-usage-base", key, value, safe_atou64, cpu_usage_base) ||
5469 MATCH_DESERIALIZE_IMMEDIATE(u, "cpuacct-usage-base", key, value, safe_atou64, cpu_usage_base))
5470 return 1;
5471
5472 if (MATCH_DESERIALIZE_IMMEDIATE(u, "cpu-usage-last", key, value, safe_atou64, cpu_usage_last))
5473 return 1;
5474
5475 if (MATCH_DESERIALIZE_IMMEDIATE(u, "managed-oom-kill-last", key, value, safe_atou64, managed_oom_kill_last))
5476 return 1;
5477
5478 if (MATCH_DESERIALIZE_IMMEDIATE(u, "oom-kill-last", key, value, safe_atou64, oom_kill_last))
5479 return 1;
5480
5481 if (streq(key, "cgroup")) {
5482 r = unit_set_cgroup_path(u, value);
5483 if (r < 0)
5484 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", value);
5485
5486 (void) unit_watch_cgroup(u);
5487 (void) unit_watch_cgroup_memory(u);
5488 return 1;
5489 }
5490
5491 if (MATCH_DESERIALIZE_IMMEDIATE(u, "cgroup-id", key, value, safe_atou64, cgroup_id))
5492 return 1;
5493
5494 if (MATCH_DESERIALIZE(u, "cgroup-realized", key, value, parse_boolean, cgroup_realized))
5495 return 1;
5496
5497 if (MATCH_DESERIALIZE_IMMEDIATE(u, "cgroup-realized-mask", key, value, cg_mask_from_string, cgroup_realized_mask))
5498 return 1;
5499
5500 if (MATCH_DESERIALIZE_IMMEDIATE(u, "cgroup-enabled-mask", key, value, cg_mask_from_string, cgroup_enabled_mask))
5501 return 1;
5502
5503 if (MATCH_DESERIALIZE_IMMEDIATE(u, "cgroup-invalidated-mask", key, value, cg_mask_from_string, cgroup_invalidated_mask))
5504 return 1;
5505
5506 if (STR_IN_SET(key, "ipv4-socket-bind-bpf-link-fd", "ipv6-socket-bind-bpf-link-fd")) {
5507 int fd;
5508
5509 fd = deserialize_fd(fds, value);
5510 if (fd >= 0)
5511 (void) bpf_socket_bind_add_initial_link_fd(u, fd);
5512
5513 return 1;
5514 }
5515
5516 if (STR_IN_SET(key,
5517 "ip-bpf-ingress-installed", "ip-bpf-egress-installed",
5518 "bpf-device-control-installed",
5519 "ip-bpf-custom-ingress-installed", "ip-bpf-custom-egress-installed")) {
5520
5521 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
5522 if (!crt)
5523 log_oom_debug();
5524 else {
5525 if (streq(key, "ip-bpf-ingress-installed"))
5526 (void) bpf_program_deserialize_attachment(value, fds, &crt->ip_bpf_ingress_installed);
5527
5528 if (streq(key, "ip-bpf-egress-installed"))
5529 (void) bpf_program_deserialize_attachment(value, fds, &crt->ip_bpf_egress_installed);
5530
5531 if (streq(key, "bpf-device-control-installed"))
5532 (void) bpf_program_deserialize_attachment(value, fds, &crt->bpf_device_control_installed);
5533
5534 if (streq(key, "ip-bpf-custom-ingress-installed"))
5535 (void) bpf_program_deserialize_attachment_set(value, fds, &crt->ip_bpf_custom_ingress_installed);
5536
5537 if (streq(key, "ip-bpf-custom-egress-installed"))
5538 (void) bpf_program_deserialize_attachment_set(value, fds, &crt->ip_bpf_custom_egress_installed);
5539 }
5540
5541 return 1;
5542 }
5543
5544 if (streq(key, "restrict-ifaces-bpf-fd")) {
5545 int fd;
5546
5547 fd = deserialize_fd(fds, value);
5548 if (fd >= 0)
5549 (void) bpf_restrict_ifaces_add_initial_link_fd(u, fd);
5550 return 1;
5551 }
5552
5553 CGroupMemoryAccountingMetric mm = memory_accounting_metric_field_last_from_string(key);
5554 if (mm >= 0) {
5555 uint64_t c;
5556
5557 r = safe_atou64(value, &c);
5558 if (r < 0)
5559 log_unit_debug(u, "Failed to parse memory accounting last value %s, ignoring.", value);
5560 else {
5561 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
5562 if (!crt)
5563 log_oom_debug();
5564 else
5565 crt->memory_accounting_last[mm] = c;
5566 }
5567
5568 return 1;
5569 }
5570
5571 CGroupIPAccountingMetric ipm = ip_accounting_metric_field_from_string(key);
5572 if (ipm >= 0) {
5573 uint64_t c;
5574
5575 r = safe_atou64(value, &c);
5576 if (r < 0)
5577 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", value);
5578 else {
5579 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
5580 if (!crt)
5581 log_oom_debug();
5582 else
5583 crt->ip_accounting_extra[ipm] = c;
5584 }
5585
5586 return 1;
5587 }
5588
5589 CGroupIOAccountingMetric iom = io_accounting_metric_field_base_from_string(key);
5590 if (iom >= 0) {
5591 uint64_t c;
5592
5593 r = safe_atou64(value, &c);
5594 if (r < 0)
5595 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", value);
5596 else {
5597 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
5598 if (!crt)
5599 log_oom_debug();
5600 else
5601 crt->io_accounting_base[iom] = c;
5602 }
5603
5604 return 1;
5605 }
5606
5607 iom = io_accounting_metric_field_last_from_string(key);
5608 if (iom >= 0) {
5609 uint64_t c;
5610
5611 r = safe_atou64(value, &c);
5612 if (r < 0)
5613 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", value);
5614 else {
5615 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
5616 if (!crt)
5617 log_oom_debug();
5618 else
5619 crt->io_accounting_last[iom] = c;
5620 }
5621 return 1;
5622 }
5623
5624 return 0;
5625}
5626
4e806bfa
AZ
5627static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
5628 [CGROUP_DEVICE_POLICY_AUTO] = "auto",
5629 [CGROUP_DEVICE_POLICY_CLOSED] = "closed",
5630 [CGROUP_DEVICE_POLICY_STRICT] = "strict",
5631};
5632
4ad49000 5633DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);
d9e45bc3 5634
6bb00842 5635static const char* const cgroup_pressure_watch_table[_CGROUP_PRESSURE_WATCH_MAX] = {
16b6af6a 5636 [CGROUP_PRESSURE_WATCH_OFF] = "off",
6bb00842 5637 [CGROUP_PRESSURE_WATCH_AUTO] = "auto",
16b6af6a 5638 [CGROUP_PRESSURE_WATCH_ON] = "on",
6bb00842
LP
5639 [CGROUP_PRESSURE_WATCH_SKIP] = "skip",
5640};
5641
5642DEFINE_STRING_TABLE_LOOKUP_WITH_BOOLEAN(cgroup_pressure_watch, CGroupPressureWatch, CGROUP_PRESSURE_WATCH_ON);
435996e6
DDM
5643
5644static const char* const cgroup_ip_accounting_metric_table[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
5645 [CGROUP_IP_INGRESS_BYTES] = "IPIngressBytes",
5646 [CGROUP_IP_EGRESS_BYTES] = "IPEgressBytes",
5647 [CGROUP_IP_INGRESS_PACKETS] = "IPIngressPackets",
5648 [CGROUP_IP_EGRESS_PACKETS] = "IPEgressPackets",
5649};
5650
5651DEFINE_STRING_TABLE_LOOKUP(cgroup_ip_accounting_metric, CGroupIPAccountingMetric);
5652
5653static const char* const cgroup_io_accounting_metric_table[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
5654 [CGROUP_IO_READ_BYTES] = "IOReadBytes",
5655 [CGROUP_IO_WRITE_BYTES] = "IOWriteBytes",
5656 [CGROUP_IO_READ_OPERATIONS] = "IOReadOperations",
5657 [CGROUP_IO_WRITE_OPERATIONS] = "IOWriteOperations",
5658};
5659
5660DEFINE_STRING_TABLE_LOOKUP(cgroup_io_accounting_metric, CGroupIOAccountingMetric);
9824ab1f
MY
5661
5662static const char* const cgroup_memory_accounting_metric_table[_CGROUP_MEMORY_ACCOUNTING_METRIC_MAX] = {
5663 [CGROUP_MEMORY_PEAK] = "MemoryPeak",
5664 [CGROUP_MEMORY_SWAP_CURRENT] = "MemorySwapCurrent",
5665 [CGROUP_MEMORY_SWAP_PEAK] = "MemorySwapPeak",
5666 [CGROUP_MEMORY_ZSWAP_CURRENT] = "MemoryZSwapCurrent",
5667};
5668
5669DEFINE_STRING_TABLE_LOOKUP(cgroup_memory_accounting_metric, CGroupMemoryAccountingMetric);
4fb0d2dc 5670
8ad61489 5671static const char *const cgroup_effective_limit_type_table[_CGROUP_LIMIT_TYPE_MAX] = {
4fb0d2dc
MK
5672 [CGROUP_LIMIT_MEMORY_MAX] = "EffectiveMemoryMax",
5673 [CGROUP_LIMIT_MEMORY_HIGH] = "EffectiveMemoryHigh",
5674 [CGROUP_LIMIT_TASKS_MAX] = "EffectiveTasksMax",
5675};
5676
8ad61489 5677DEFINE_STRING_TABLE_LOOKUP(cgroup_effective_limit_type, CGroupLimitType);