]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
core: don't try to write CPU quota and memory limit cgroup attrs on root cgroup
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
8e274523 2
c6c18be3 3#include <fcntl.h>
e41969e3 4#include <fnmatch.h>
8c6db833 5
b5efdb8a 6#include "alloc-util.h"
18c528e9 7#include "blockdev-util.h"
906c06f6 8#include "bpf-firewall.h"
45c2e068 9#include "btrfs-util.h"
084c7007 10#include "bpf-devices.h"
6592b975 11#include "bus-error.h"
03a7b521 12#include "cgroup-util.h"
3ffd4af2
LP
13#include "cgroup.h"
14#include "fd-util.h"
0d39fa9c 15#include "fileio.h"
77601719 16#include "fs-util.h"
6bedfcbb 17#include "parse-util.h"
9eb977db 18#include "path-util.h"
03a7b521 19#include "process-util.h"
c36a69f4 20#include "procfs-util.h"
9444b1f2 21#include "special.h"
74c48bf5 22#include "stat-util.h"
906c06f6 23#include "stdio-util.h"
8b43440b 24#include "string-table.h"
07630cea 25#include "string-util.h"
cc6271f1 26#include "virt.h"
8e274523 27
9a054909
LP
28#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
29
39b9fefb
LP
30/* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
31 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
32 * out specific attributes from us. */
33#define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
34
611c4f8a 35bool manager_owns_host_root_cgroup(Manager *m) {
cc6271f1
LP
36 assert(m);
37
38 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
39 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
40 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
41 * we run in any kind of container virtualization. */
42
28cfdc5a
LP
43 if (MANAGER_IS_USER(m))
44 return false;
45
cc6271f1
LP
46 if (detect_container() > 0)
47 return false;
48
57ea45e1 49 return empty_or_root(m->cgroup_root);
cc6271f1
LP
50}
51
611c4f8a 52bool unit_has_host_root_cgroup(Unit *u) {
f3725e64
LP
53 assert(u);
54
cc6271f1
LP
55 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
56 * the manager manages the root cgroup. */
f3725e64 57
611c4f8a 58 if (!manager_owns_host_root_cgroup(u->manager))
f3725e64
LP
59 return false;
60
cc6271f1 61 return unit_has_name(u, SPECIAL_ROOT_SLICE);
f3725e64
LP
62}
63
293d32df
LP
64static int set_attribute_and_warn(Unit *u, const char *controller, const char *attribute, const char *value) {
65 int r;
66
67 r = cg_set_attribute(controller, u->cgroup_path, attribute, value);
68 if (r < 0)
69 log_unit_full(u, LOG_LEVEL_CGROUP_WRITE(r), r, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
70 strna(attribute), isempty(u->cgroup_path) ? "/" : u->cgroup_path, (int) strcspn(value, NEWLINE), value);
71
72 return r;
73}
74
2b40998d 75static void cgroup_compat_warn(void) {
128fadc9
TH
76 static bool cgroup_compat_warned = false;
77
78 if (cgroup_compat_warned)
79 return;
80
cc6271f1
LP
81 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
82 "See cgroup-compat debug messages for details.");
83
128fadc9
TH
84 cgroup_compat_warned = true;
85}
86
87#define log_cgroup_compat(unit, fmt, ...) do { \
88 cgroup_compat_warn(); \
89 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 90 } while (false)
128fadc9 91
4ad49000
LP
92void cgroup_context_init(CGroupContext *c) {
93 assert(c);
94
de8a711a 95 /* Initialize everything to the kernel defaults. */
4ad49000 96
de8a711a
LP
97 *c = (CGroupContext) {
98 .cpu_weight = CGROUP_WEIGHT_INVALID,
99 .startup_cpu_weight = CGROUP_WEIGHT_INVALID,
100 .cpu_quota_per_sec_usec = USEC_INFINITY,
66ebf6c0 101
de8a711a
LP
102 .cpu_shares = CGROUP_CPU_SHARES_INVALID,
103 .startup_cpu_shares = CGROUP_CPU_SHARES_INVALID,
d53d9474 104
de8a711a
LP
105 .memory_high = CGROUP_LIMIT_MAX,
106 .memory_max = CGROUP_LIMIT_MAX,
107 .memory_swap_max = CGROUP_LIMIT_MAX,
da4d897e 108
de8a711a 109 .memory_limit = CGROUP_LIMIT_MAX,
b2f8b02e 110
de8a711a
LP
111 .io_weight = CGROUP_WEIGHT_INVALID,
112 .startup_io_weight = CGROUP_WEIGHT_INVALID,
13c31542 113
de8a711a
LP
114 .blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
115 .startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
d53d9474 116
de8a711a
LP
117 .tasks_max = CGROUP_LIMIT_MAX,
118 };
4ad49000 119}
8e274523 120
4ad49000
LP
121void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
122 assert(c);
123 assert(a);
124
71fda00f 125 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
126 free(a->path);
127 free(a);
128}
129
13c31542
TH
130void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
131 assert(c);
132 assert(w);
133
134 LIST_REMOVE(device_weights, c->io_device_weights, w);
135 free(w->path);
136 free(w);
137}
138
6ae4283c
TH
139void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l) {
140 assert(c);
141 assert(l);
142
143 LIST_REMOVE(device_latencies, c->io_device_latencies, l);
144 free(l->path);
145 free(l);
146}
147
13c31542
TH
148void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
149 assert(c);
150 assert(l);
151
152 LIST_REMOVE(device_limits, c->io_device_limits, l);
153 free(l->path);
154 free(l);
155}
156
4ad49000
LP
157void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
158 assert(c);
159 assert(w);
160
71fda00f 161 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
162 free(w->path);
163 free(w);
164}
165
166void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
167 assert(c);
8e274523 168 assert(b);
8e274523 169
71fda00f 170 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
171 free(b->path);
172 free(b);
173}
174
175void cgroup_context_done(CGroupContext *c) {
176 assert(c);
177
13c31542
TH
178 while (c->io_device_weights)
179 cgroup_context_free_io_device_weight(c, c->io_device_weights);
180
6ae4283c
TH
181 while (c->io_device_latencies)
182 cgroup_context_free_io_device_latency(c, c->io_device_latencies);
183
13c31542
TH
184 while (c->io_device_limits)
185 cgroup_context_free_io_device_limit(c, c->io_device_limits);
186
4ad49000
LP
187 while (c->blockio_device_weights)
188 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
189
190 while (c->blockio_device_bandwidths)
191 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
192
193 while (c->device_allow)
194 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f
DM
195
196 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
197 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
4ad49000
LP
198}
199
200void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
13c31542
TH
201 CGroupIODeviceLimit *il;
202 CGroupIODeviceWeight *iw;
6ae4283c 203 CGroupIODeviceLatency *l;
4ad49000
LP
204 CGroupBlockIODeviceBandwidth *b;
205 CGroupBlockIODeviceWeight *w;
206 CGroupDeviceAllow *a;
c21c9906 207 IPAddressAccessItem *iaai;
9a054909 208 char u[FORMAT_TIMESPAN_MAX];
4ad49000
LP
209
210 assert(c);
211 assert(f);
212
213 prefix = strempty(prefix);
214
215 fprintf(f,
216 "%sCPUAccounting=%s\n"
13c31542 217 "%sIOAccounting=%s\n"
4ad49000
LP
218 "%sBlockIOAccounting=%s\n"
219 "%sMemoryAccounting=%s\n"
d53d9474 220 "%sTasksAccounting=%s\n"
c21c9906 221 "%sIPAccounting=%s\n"
66ebf6c0
TH
222 "%sCPUWeight=%" PRIu64 "\n"
223 "%sStartupCPUWeight=%" PRIu64 "\n"
d53d9474
LP
224 "%sCPUShares=%" PRIu64 "\n"
225 "%sStartupCPUShares=%" PRIu64 "\n"
b2f8b02e 226 "%sCPUQuotaPerSecSec=%s\n"
13c31542
TH
227 "%sIOWeight=%" PRIu64 "\n"
228 "%sStartupIOWeight=%" PRIu64 "\n"
d53d9474
LP
229 "%sBlockIOWeight=%" PRIu64 "\n"
230 "%sStartupBlockIOWeight=%" PRIu64 "\n"
48422635 231 "%sMemoryMin=%" PRIu64 "\n"
da4d897e
TH
232 "%sMemoryLow=%" PRIu64 "\n"
233 "%sMemoryHigh=%" PRIu64 "\n"
234 "%sMemoryMax=%" PRIu64 "\n"
96e131ea 235 "%sMemorySwapMax=%" PRIu64 "\n"
4ad49000 236 "%sMemoryLimit=%" PRIu64 "\n"
03a7b521 237 "%sTasksMax=%" PRIu64 "\n"
a931ad47
LP
238 "%sDevicePolicy=%s\n"
239 "%sDelegate=%s\n",
4ad49000 240 prefix, yes_no(c->cpu_accounting),
13c31542 241 prefix, yes_no(c->io_accounting),
4ad49000
LP
242 prefix, yes_no(c->blockio_accounting),
243 prefix, yes_no(c->memory_accounting),
d53d9474 244 prefix, yes_no(c->tasks_accounting),
c21c9906 245 prefix, yes_no(c->ip_accounting),
66ebf6c0
TH
246 prefix, c->cpu_weight,
247 prefix, c->startup_cpu_weight,
4ad49000 248 prefix, c->cpu_shares,
95ae05c0 249 prefix, c->startup_cpu_shares,
b1d6dcf5 250 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
13c31542
TH
251 prefix, c->io_weight,
252 prefix, c->startup_io_weight,
4ad49000 253 prefix, c->blockio_weight,
95ae05c0 254 prefix, c->startup_blockio_weight,
48422635 255 prefix, c->memory_min,
da4d897e
TH
256 prefix, c->memory_low,
257 prefix, c->memory_high,
258 prefix, c->memory_max,
96e131ea 259 prefix, c->memory_swap_max,
4ad49000 260 prefix, c->memory_limit,
03a7b521 261 prefix, c->tasks_max,
a931ad47
LP
262 prefix, cgroup_device_policy_to_string(c->device_policy),
263 prefix, yes_no(c->delegate));
4ad49000 264
02638280
LP
265 if (c->delegate) {
266 _cleanup_free_ char *t = NULL;
267
268 (void) cg_mask_to_string(c->delegate_controllers, &t);
269
47a78d41 270 fprintf(f, "%sDelegateControllers=%s\n",
02638280
LP
271 prefix,
272 strempty(t));
273 }
274
4ad49000
LP
275 LIST_FOREACH(device_allow, a, c->device_allow)
276 fprintf(f,
277 "%sDeviceAllow=%s %s%s%s\n",
278 prefix,
279 a->path,
280 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
281
13c31542
TH
282 LIST_FOREACH(device_weights, iw, c->io_device_weights)
283 fprintf(f,
6ae4283c 284 "%sIODeviceWeight=%s %" PRIu64 "\n",
13c31542
TH
285 prefix,
286 iw->path,
287 iw->weight);
288
6ae4283c
TH
289 LIST_FOREACH(device_latencies, l, c->io_device_latencies)
290 fprintf(f,
291 "%sIODeviceLatencyTargetSec=%s %s\n",
292 prefix,
293 l->path,
294 format_timespan(u, sizeof(u), l->target_usec, 1));
295
13c31542
TH
296 LIST_FOREACH(device_limits, il, c->io_device_limits) {
297 char buf[FORMAT_BYTES_MAX];
9be57249
TH
298 CGroupIOLimitType type;
299
300 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
301 if (il->limits[type] != cgroup_io_limit_defaults[type])
302 fprintf(f,
303 "%s%s=%s %s\n",
304 prefix,
305 cgroup_io_limit_type_to_string(type),
306 il->path,
307 format_bytes(buf, sizeof(buf), il->limits[type]));
13c31542
TH
308 }
309
4ad49000
LP
310 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
311 fprintf(f,
d53d9474 312 "%sBlockIODeviceWeight=%s %" PRIu64,
4ad49000
LP
313 prefix,
314 w->path,
315 w->weight);
316
317 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
318 char buf[FORMAT_BYTES_MAX];
319
979d0311
TH
320 if (b->rbps != CGROUP_LIMIT_MAX)
321 fprintf(f,
322 "%sBlockIOReadBandwidth=%s %s\n",
323 prefix,
324 b->path,
325 format_bytes(buf, sizeof(buf), b->rbps));
326 if (b->wbps != CGROUP_LIMIT_MAX)
327 fprintf(f,
328 "%sBlockIOWriteBandwidth=%s %s\n",
329 prefix,
330 b->path,
331 format_bytes(buf, sizeof(buf), b->wbps));
4ad49000 332 }
c21c9906
LP
333
334 LIST_FOREACH(items, iaai, c->ip_address_allow) {
335 _cleanup_free_ char *k = NULL;
336
337 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
338 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
339 }
340
341 LIST_FOREACH(items, iaai, c->ip_address_deny) {
342 _cleanup_free_ char *k = NULL;
343
344 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
345 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
346 }
4ad49000
LP
347}
348
fd870bac
YW
349int cgroup_add_device_allow(CGroupContext *c, const char *dev, const char *mode) {
350 _cleanup_free_ CGroupDeviceAllow *a = NULL;
351 _cleanup_free_ char *d = NULL;
352
353 assert(c);
354 assert(dev);
355 assert(isempty(mode) || in_charset(mode, "rwm"));
356
357 a = new(CGroupDeviceAllow, 1);
358 if (!a)
359 return -ENOMEM;
360
361 d = strdup(dev);
362 if (!d)
363 return -ENOMEM;
364
365 *a = (CGroupDeviceAllow) {
366 .path = TAKE_PTR(d),
490c5a37
LP
367 .r = isempty(mode) || strchr(mode, 'r'),
368 .w = isempty(mode) || strchr(mode, 'w'),
369 .m = isempty(mode) || strchr(mode, 'm'),
fd870bac
YW
370 };
371
372 LIST_PREPEND(device_allow, c->device_allow, a);
373 TAKE_PTR(a);
374
375 return 0;
376}
377
45c2e068 378static int lookup_block_device(const char *p, dev_t *ret) {
d5aecba6 379 struct stat st = {};
45c2e068 380 int r;
4ad49000
LP
381
382 assert(p);
45c2e068 383 assert(ret);
4ad49000 384
d5aecba6
LP
385 r = device_path_parse_major_minor(p, &st.st_mode, &st.st_rdev);
386 if (r == -ENODEV) { /* not a parsable device node, need to go to disk */
387 if (stat(p, &st) < 0)
388 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
389 } else if (r < 0)
390 return log_warning_errno(r, "Failed to parse major/minor from path '%s': %m", p);
391
392 if (S_ISCHR(st.st_mode)) {
393 log_warning("Device node '%s' is a character device, but block device needed.", p);
394 return -ENOTBLK;
395 } else if (S_ISBLK(st.st_mode))
45c2e068
LP
396 *ret = st.st_rdev;
397 else if (major(st.st_dev) != 0)
398 *ret = st.st_dev; /* If this is not a device node then use the block device this file is stored on */
399 else {
400 /* If this is btrfs, getting the backing block device is a bit harder */
401 r = btrfs_get_block_device(p, ret);
402 if (r < 0 && r != -ENOTTY)
403 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
404 if (r == -ENOTTY) {
405 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
406 return -ENODEV;
407 }
4ad49000 408 }
8e274523 409
45c2e068
LP
410 /* If this is a LUKS device, try to get the originating block device */
411 (void) block_get_originating(*ret, ret);
412
413 /* If this is a partition, try to get the originating block device */
414 (void) block_get_whole_disk(*ret, ret);
8e274523 415 return 0;
8e274523
LP
416}
417
084c7007 418static int whitelist_device(BPFProgram *prog, const char *path, const char *node, const char *acc) {
846b3bd6 419 struct stat st = {};
8c6db833 420 int r;
8e274523 421
4ad49000
LP
422 assert(path);
423 assert(acc);
8e274523 424
74c48bf5
LP
425 /* Some special handling for /dev/block/%u:%u, /dev/char/%u:%u, /run/systemd/inaccessible/chr and
426 * /run/systemd/inaccessible/blk paths. Instead of stat()ing these we parse out the major/minor directly. This
427 * means clients can use these path without the device node actually around */
846b3bd6 428 r = device_path_parse_major_minor(node, &st.st_mode, &st.st_rdev);
74c48bf5
LP
429 if (r < 0) {
430 if (r != -ENODEV)
431 return log_warning_errno(r, "Couldn't parse major/minor from device path '%s': %m", node);
b200489b 432
74c48bf5
LP
433 if (stat(node, &st) < 0)
434 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
e7330dfe 435
74c48bf5
LP
436 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
437 log_warning("%s is not a device.", node);
438 return -ENODEV;
439 }
4ad49000
LP
440 }
441
084c7007
RG
442 if (cg_all_unified() > 0) {
443 if (!prog)
444 return 0;
4ad49000 445
b9839ac9
LP
446 return cgroup_bpf_whitelist_device(prog, S_ISCHR(st.st_mode) ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK,
447 major(st.st_rdev), minor(st.st_rdev), acc);
448
084c7007
RG
449 } else {
450 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
451
452 sprintf(buf,
453 "%c %u:%u %s",
454 S_ISCHR(st.st_mode) ? 'c' : 'b',
455 major(st.st_rdev), minor(st.st_rdev),
456 acc);
457
8c838407
LP
458 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL here. */
459
084c7007
RG
460 r = cg_set_attribute("devices", path, "devices.allow", buf);
461 if (r < 0)
2c74e12b 462 return log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING,
b9839ac9 463 r, "Failed to set devices.allow on %s: %m", path);
4ad49000 464
b9839ac9
LP
465 return 0;
466 }
8e274523
LP
467}
468
084c7007 469static int whitelist_major(BPFProgram *prog, const char *path, const char *name, char type, const char *acc) {
90060676 470 _cleanup_fclose_ FILE *f = NULL;
8e8b5d2e 471 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4];
90060676 472 bool good = false;
8e8b5d2e 473 unsigned maj;
90060676
LP
474 int r;
475
476 assert(path);
477 assert(acc);
4c701096 478 assert(IN_SET(type, 'b', 'c'));
90060676 479
8e8b5d2e
LP
480 if (streq(name, "*")) {
481 /* If the name is a wildcard, then apply this list to all devices of this type */
482
483 if (cg_all_unified() > 0) {
484 if (!prog)
485 return 0;
486
487 (void) cgroup_bpf_whitelist_class(prog, type == 'c' ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK, acc);
488 } else {
489 xsprintf(buf, "%c *:* %s", type, acc);
490
491 r = cg_set_attribute("devices", path, "devices.allow", buf);
492 if (r < 0)
493 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
494 "Failed to set devices.allow on %s: %m", path);
495 return 0;
496 }
497 }
498
499 if (safe_atou(name, &maj) >= 0 && DEVICE_MAJOR_VALID(maj)) {
500 /* The name is numeric and suitable as major. In that case, let's take is major, and create the entry
501 * directly */
502
503 if (cg_all_unified() > 0) {
504 if (!prog)
505 return 0;
506
507 (void) cgroup_bpf_whitelist_major(prog,
508 type == 'c' ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK,
509 maj, acc);
510 } else {
511 xsprintf(buf, "%c %u:* %s", type, maj, acc);
512
513 r = cg_set_attribute("devices", path, "devices.allow", buf);
514 if (r < 0)
515 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
516 "Failed to set devices.allow on %s: %m", path);
517 }
518
519 return 0;
520 }
521
90060676 522 f = fopen("/proc/devices", "re");
4a62c710
MS
523 if (!f)
524 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
90060676 525
c66e60a8
LP
526 for (;;) {
527 _cleanup_free_ char *line = NULL;
8e8b5d2e 528 char *w, *p;
90060676 529
c66e60a8
LP
530 r = read_line(f, LONG_LINE_MAX, &line);
531 if (r < 0)
532 return log_warning_errno(r, "Failed to read /proc/devices: %m");
533 if (r == 0)
534 break;
90060676
LP
535
536 if (type == 'c' && streq(line, "Character devices:")) {
537 good = true;
538 continue;
539 }
540
541 if (type == 'b' && streq(line, "Block devices:")) {
542 good = true;
543 continue;
544 }
545
546 if (isempty(line)) {
547 good = false;
548 continue;
549 }
550
551 if (!good)
552 continue;
553
554 p = strstrip(line);
555
556 w = strpbrk(p, WHITESPACE);
557 if (!w)
558 continue;
559 *w = 0;
560
561 r = safe_atou(p, &maj);
562 if (r < 0)
563 continue;
564 if (maj <= 0)
565 continue;
566
567 w++;
568 w += strspn(w, WHITESPACE);
e41969e3
LP
569
570 if (fnmatch(name, w, 0) != 0)
90060676
LP
571 continue;
572
084c7007
RG
573 if (cg_all_unified() > 0) {
574 if (!prog)
575 continue;
90060676 576
913c898c
LP
577 (void) cgroup_bpf_whitelist_major(prog,
578 type == 'c' ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK,
579 maj, acc);
084c7007 580 } else {
084c7007
RG
581 sprintf(buf,
582 "%c %u:* %s",
583 type,
584 maj,
585 acc);
586
8c838407
LP
587 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL
588 * here. */
589
084c7007
RG
590 r = cg_set_attribute("devices", path, "devices.allow", buf);
591 if (r < 0)
2c74e12b 592 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING,
084c7007
RG
593 r, "Failed to set devices.allow on %s: %m", path);
594 }
90060676
LP
595 }
596
597 return 0;
90060676
LP
598}
599
66ebf6c0
TH
600static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
601 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
602 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
603}
604
605static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
606 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
607 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
608}
609
610static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
611 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
612 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
613 return c->startup_cpu_weight;
614 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
615 return c->cpu_weight;
616 else
617 return CGROUP_WEIGHT_DEFAULT;
618}
619
620static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
621 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
622 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
623 return c->startup_cpu_shares;
624 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
625 return c->cpu_shares;
626 else
627 return CGROUP_CPU_SHARES_DEFAULT;
628}
629
52fecf20
LP
630static void cgroup_apply_unified_cpu_weight(Unit *u, uint64_t weight) {
631 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
66ebf6c0
TH
632
633 xsprintf(buf, "%" PRIu64 "\n", weight);
293d32df 634 (void) set_attribute_and_warn(u, "cpu", "cpu.weight", buf);
52fecf20
LP
635}
636
637static void cgroup_apply_unified_cpu_quota(Unit *u, usec_t quota) {
638 char buf[(DECIMAL_STR_MAX(usec_t) + 1) * 2 + 1];
66ebf6c0
TH
639
640 if (quota != USEC_INFINITY)
641 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
642 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
643 else
644 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
293d32df 645 (void) set_attribute_and_warn(u, "cpu", "cpu.max", buf);
66ebf6c0
TH
646}
647
52fecf20
LP
648static void cgroup_apply_legacy_cpu_shares(Unit *u, uint64_t shares) {
649 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
66ebf6c0
TH
650
651 xsprintf(buf, "%" PRIu64 "\n", shares);
293d32df 652 (void) set_attribute_and_warn(u, "cpu", "cpu.shares", buf);
52fecf20
LP
653}
654
655static void cgroup_apply_legacy_cpu_quota(Unit *u, usec_t quota) {
656 char buf[DECIMAL_STR_MAX(usec_t) + 2];
66ebf6c0
TH
657
658 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
293d32df 659 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_period_us", buf);
66ebf6c0
TH
660
661 if (quota != USEC_INFINITY) {
662 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
293d32df 663 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", buf);
66ebf6c0 664 } else
589a5f7a 665 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", "-1\n");
66ebf6c0
TH
666}
667
668static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
669 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
670 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
671}
672
673static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
674 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
675 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
676}
677
508c45da 678static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
679 return c->io_accounting ||
680 c->io_weight != CGROUP_WEIGHT_INVALID ||
681 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
682 c->io_device_weights ||
6ae4283c 683 c->io_device_latencies ||
538b4852
TH
684 c->io_device_limits;
685}
686
508c45da 687static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
688 return c->blockio_accounting ||
689 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
690 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
691 c->blockio_device_weights ||
692 c->blockio_device_bandwidths;
693}
694
508c45da 695static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
696 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
697 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
698 return c->startup_io_weight;
699 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
700 return c->io_weight;
701 else
702 return CGROUP_WEIGHT_DEFAULT;
703}
704
508c45da 705static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
706 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
707 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
708 return c->startup_blockio_weight;
709 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
710 return c->blockio_weight;
711 else
712 return CGROUP_BLKIO_WEIGHT_DEFAULT;
713}
714
508c45da 715static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
716 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
717 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
718}
719
508c45da 720static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
721 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
722 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
723}
724
f29ff115 725static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
726 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
727 dev_t dev;
728 int r;
729
730 r = lookup_block_device(dev_path, &dev);
731 if (r < 0)
732 return;
733
734 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
293d32df 735 (void) set_attribute_and_warn(u, "io", "io.weight", buf);
64faf04c
TH
736}
737
f29ff115 738static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
739 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
740 dev_t dev;
741 int r;
742
743 r = lookup_block_device(dev_path, &dev);
744 if (r < 0)
745 return;
746
747 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
293d32df 748 (void) set_attribute_and_warn(u, "blkio", "blkio.weight_device", buf);
64faf04c
TH
749}
750
6ae4283c
TH
751static void cgroup_apply_io_device_latency(Unit *u, const char *dev_path, usec_t target) {
752 char buf[DECIMAL_STR_MAX(dev_t)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
753 dev_t dev;
754 int r;
755
756 r = lookup_block_device(dev_path, &dev);
757 if (r < 0)
758 return;
759
760 if (target != USEC_INFINITY)
761 xsprintf(buf, "%u:%u target=%" PRIu64 "\n", major(dev), minor(dev), target);
762 else
763 xsprintf(buf, "%u:%u target=max\n", major(dev), minor(dev));
764
293d32df 765 (void) set_attribute_and_warn(u, "io", "io.latency", buf);
6ae4283c
TH
766}
767
17ae2780 768static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
64faf04c
TH
769 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
770 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
771 CGroupIOLimitType type;
772 dev_t dev;
64faf04c
TH
773 int r;
774
775 r = lookup_block_device(dev_path, &dev);
776 if (r < 0)
17ae2780 777 return;
64faf04c 778
17ae2780
LP
779 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
780 if (limits[type] != cgroup_io_limit_defaults[type])
64faf04c 781 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
17ae2780 782 else
64faf04c 783 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
64faf04c
TH
784
785 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
786 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
787 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
293d32df 788 (void) set_attribute_and_warn(u, "io", "io.max", buf);
64faf04c
TH
789}
790
17ae2780 791static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
792 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
793 dev_t dev;
64faf04c
TH
794 int r;
795
796 r = lookup_block_device(dev_path, &dev);
797 if (r < 0)
17ae2780 798 return;
64faf04c 799
64faf04c 800 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
293d32df 801 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.read_bps_device", buf);
64faf04c 802
64faf04c 803 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
293d32df 804 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.write_bps_device", buf);
64faf04c
TH
805}
806
da4d897e 807static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
48422635 808 return c->memory_min > 0 || c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
da4d897e
TH
809}
810
f29ff115 811static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
589a5f7a 812 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
da4d897e
TH
813
814 if (v != CGROUP_LIMIT_MAX)
815 xsprintf(buf, "%" PRIu64 "\n", v);
816
293d32df 817 (void) set_attribute_and_warn(u, "memory", file, buf);
da4d897e
TH
818}
819
0f2d84d2 820static void cgroup_apply_firewall(Unit *u) {
0f2d84d2
LP
821 assert(u);
822
acf7f253 823 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
906c06f6 824
acf7f253 825 if (bpf_firewall_compile(u) < 0)
906c06f6
DM
826 return;
827
828 (void) bpf_firewall_install(u);
906c06f6
DM
829}
830
831static void cgroup_context_apply(
832 Unit *u,
833 CGroupMask apply_mask,
906c06f6
DM
834 ManagerState state) {
835
f29ff115
TH
836 const char *path;
837 CGroupContext *c;
52fecf20 838 bool is_host_root, is_local_root;
4ad49000
LP
839 int r;
840
f29ff115
TH
841 assert(u);
842
906c06f6 843 /* Nothing to do? Exit early! */
17f14955 844 if (apply_mask == 0)
4ad49000 845 return;
8e274523 846
52fecf20
LP
847 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
848 * attributes should only be managed for cgroups further down the tree. */
849 is_local_root = unit_has_name(u, SPECIAL_ROOT_SLICE);
850 is_host_root = unit_has_host_root_cgroup(u);
f3725e64
LP
851
852 assert_se(c = unit_get_cgroup_context(u));
853 assert_se(path = u->cgroup_path);
854
52fecf20 855 if (is_local_root) /* Make sure we don't try to display messages with an empty path. */
6da13913 856 path = "/";
01efdf13 857
be2c0327
LP
858 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
859 * then), and missing cgroups, i.e. EROFS and ENOENT. */
714e2e1d 860
be2c0327
LP
861 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
862 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
863 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
864 * containers we want to leave control of these to the container manager (and if cgroupsv2 delegation is used
865 * we couldn't even write to them if we wanted to). */
866 if ((apply_mask & CGROUP_MASK_CPU) && !is_local_root) {
8e274523 867
b4cccbc1 868 if (cg_all_unified() > 0) {
be2c0327 869 uint64_t weight;
b2f8b02e 870
be2c0327
LP
871 if (cgroup_context_has_cpu_weight(c))
872 weight = cgroup_context_cpu_weight(c, state);
873 else if (cgroup_context_has_cpu_shares(c)) {
874 uint64_t shares;
66ebf6c0 875
be2c0327
LP
876 shares = cgroup_context_cpu_shares(c, state);
877 weight = cgroup_cpu_shares_to_weight(shares);
66ebf6c0 878
be2c0327
LP
879 log_cgroup_compat(u, "Applying [Startup]CPUShares=%" PRIu64 " as [Startup]CPUWeight=%" PRIu64 " on %s",
880 shares, weight, path);
881 } else
882 weight = CGROUP_WEIGHT_DEFAULT;
66ebf6c0 883
be2c0327
LP
884 cgroup_apply_unified_cpu_weight(u, weight);
885 cgroup_apply_unified_cpu_quota(u, c->cpu_quota_per_sec_usec);
66ebf6c0 886
52fecf20 887 } else {
be2c0327 888 uint64_t shares;
52fecf20 889
be2c0327
LP
890 if (cgroup_context_has_cpu_weight(c)) {
891 uint64_t weight;
52fecf20 892
be2c0327
LP
893 weight = cgroup_context_cpu_weight(c, state);
894 shares = cgroup_cpu_weight_to_shares(weight);
52fecf20 895
be2c0327
LP
896 log_cgroup_compat(u, "Applying [Startup]CPUWeight=%" PRIu64 " as [Startup]CPUShares=%" PRIu64 " on %s",
897 weight, shares, path);
898 } else if (cgroup_context_has_cpu_shares(c))
899 shares = cgroup_context_cpu_shares(c, state);
900 else
901 shares = CGROUP_CPU_SHARES_DEFAULT;
66ebf6c0 902
be2c0327
LP
903 cgroup_apply_legacy_cpu_shares(u, shares);
904 cgroup_apply_legacy_cpu_quota(u, c->cpu_quota_per_sec_usec);
66ebf6c0 905 }
4ad49000
LP
906 }
907
52fecf20
LP
908 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroupsv2
909 * controller), and in case of containers we want to leave control of these attributes to the container manager
910 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
911 if ((apply_mask & CGROUP_MASK_IO) && !is_local_root) {
912 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
913 bool has_io, has_blockio;
914 uint64_t weight;
13c31542 915
52fecf20
LP
916 has_io = cgroup_context_has_io_config(c);
917 has_blockio = cgroup_context_has_blockio_config(c);
13c31542 918
52fecf20
LP
919 if (has_io)
920 weight = cgroup_context_io_weight(c, state);
921 else if (has_blockio) {
922 uint64_t blkio_weight;
128fadc9 923
52fecf20
LP
924 blkio_weight = cgroup_context_blkio_weight(c, state);
925 weight = cgroup_weight_blkio_to_io(blkio_weight);
128fadc9 926
52fecf20
LP
927 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
928 blkio_weight, weight);
929 } else
930 weight = CGROUP_WEIGHT_DEFAULT;
13c31542 931
52fecf20
LP
932 xsprintf(buf, "default %" PRIu64 "\n", weight);
933 (void) set_attribute_and_warn(u, "io", "io.weight", buf);
538b4852 934
52fecf20
LP
935 if (has_io) {
936 CGroupIODeviceLatency *latency;
937 CGroupIODeviceLimit *limit;
938 CGroupIODeviceWeight *w;
128fadc9 939
52fecf20
LP
940 LIST_FOREACH(device_weights, w, c->io_device_weights)
941 cgroup_apply_io_device_weight(u, w->path, w->weight);
128fadc9 942
52fecf20
LP
943 LIST_FOREACH(device_limits, limit, c->io_device_limits)
944 cgroup_apply_io_device_limit(u, limit->path, limit->limits);
6ae4283c 945
52fecf20
LP
946 LIST_FOREACH(device_latencies, latency, c->io_device_latencies)
947 cgroup_apply_io_device_latency(u, latency->path, latency->target_usec);
6ae4283c 948
52fecf20
LP
949 } else if (has_blockio) {
950 CGroupBlockIODeviceWeight *w;
951 CGroupBlockIODeviceBandwidth *b;
13c31542 952
52fecf20
LP
953 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
954 weight = cgroup_weight_blkio_to_io(w->weight);
17ae2780 955
52fecf20
LP
956 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
957 w->weight, weight, w->path);
538b4852 958
52fecf20
LP
959 cgroup_apply_io_device_weight(u, w->path, weight);
960 }
538b4852 961
17ae2780 962 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
538b4852
TH
963 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
964 CGroupIOLimitType type;
965
966 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
967 limits[type] = cgroup_io_limit_defaults[type];
968
969 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
970 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
971
128fadc9
TH
972 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
973 b->rbps, b->wbps, b->path);
974
17ae2780 975 cgroup_apply_io_device_limit(u, b->path, limits);
538b4852 976 }
13c31542
TH
977 }
978 }
979
906c06f6 980 if (apply_mask & CGROUP_MASK_BLKIO) {
52fecf20 981 bool has_io, has_blockio;
4ad49000 982
52fecf20
LP
983 has_io = cgroup_context_has_io_config(c);
984 has_blockio = cgroup_context_has_blockio_config(c);
985
986 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
987 * left to our container manager, too. */
988 if (!is_local_root) {
64faf04c
TH
989 char buf[DECIMAL_STR_MAX(uint64_t)+1];
990 uint64_t weight;
64faf04c 991
7d862ab8 992 if (has_io) {
52fecf20 993 uint64_t io_weight;
128fadc9 994
52fecf20 995 io_weight = cgroup_context_io_weight(c, state);
538b4852 996 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9
TH
997
998 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
999 io_weight, weight);
7d862ab8
TH
1000 } else if (has_blockio)
1001 weight = cgroup_context_blkio_weight(c, state);
1002 else
538b4852 1003 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
1004
1005 xsprintf(buf, "%" PRIu64 "\n", weight);
293d32df 1006 (void) set_attribute_and_warn(u, "blkio", "blkio.weight", buf);
4ad49000 1007
7d862ab8 1008 if (has_io) {
538b4852
TH
1009 CGroupIODeviceWeight *w;
1010
128fadc9
TH
1011 LIST_FOREACH(device_weights, w, c->io_device_weights) {
1012 weight = cgroup_weight_io_to_blkio(w->weight);
1013
1014 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
1015 w->weight, weight, w->path);
1016
1017 cgroup_apply_blkio_device_weight(u, w->path, weight);
1018 }
7d862ab8
TH
1019 } else if (has_blockio) {
1020 CGroupBlockIODeviceWeight *w;
1021
7d862ab8
TH
1022 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
1023 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
538b4852 1024 }
4ad49000
LP
1025 }
1026
52fecf20
LP
1027 /* The bandwith limits are something that make sense to be applied to the host's root but not container
1028 * roots, as there we want the container manager to handle it */
1029 if (is_host_root || !is_local_root) {
1030 if (has_io) {
1031 CGroupIODeviceLimit *l;
538b4852 1032
52fecf20
LP
1033 LIST_FOREACH(device_limits, l, c->io_device_limits) {
1034 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
1035 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
128fadc9 1036
52fecf20
LP
1037 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
1038 }
1039 } else if (has_blockio) {
1040 CGroupBlockIODeviceBandwidth *b;
7d862ab8 1041
52fecf20
LP
1042 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
1043 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
1044 }
d686d8a9 1045 }
8e274523
LP
1046 }
1047
be2c0327
LP
1048 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
1049 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
1050 * want to leave control to the container manager (and if proper cgroupsv2 delegation is used we couldn't even
1051 * write to this if we wanted to.) */
1052 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_local_root) {
efdb0237 1053
52fecf20 1054 if (cg_all_unified() > 0) {
be2c0327
LP
1055 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
1056
1057 if (cgroup_context_has_unified_memory_config(c)) {
1058 max = c->memory_max;
1059 swap_max = c->memory_swap_max;
1060 } else {
1061 max = c->memory_limit;
efdb0237 1062
be2c0327
LP
1063 if (max != CGROUP_LIMIT_MAX)
1064 log_cgroup_compat(u, "Applying MemoryLimit=%" PRIu64 " as MemoryMax=", max);
128fadc9 1065 }
da4d897e 1066
be2c0327
LP
1067 cgroup_apply_unified_memory_limit(u, "memory.min", c->memory_min);
1068 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
1069 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
1070 cgroup_apply_unified_memory_limit(u, "memory.max", max);
1071 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
128fadc9 1072
be2c0327
LP
1073 } else {
1074 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
1075 uint64_t val;
52fecf20 1076
be2c0327
LP
1077 if (cgroup_context_has_unified_memory_config(c)) {
1078 val = c->memory_max;
1079 log_cgroup_compat(u, "Applying MemoryMax=%" PRIi64 " as MemoryLimit=", val);
1080 } else
1081 val = c->memory_limit;
78a4ee59 1082
be2c0327
LP
1083 if (val == CGROUP_LIMIT_MAX)
1084 strncpy(buf, "-1\n", sizeof(buf));
1085 else
1086 xsprintf(buf, "%" PRIu64 "\n", val);
1087
1088 (void) set_attribute_and_warn(u, "memory", "memory.limit_in_bytes", buf);
da4d897e 1089 }
4ad49000 1090 }
8e274523 1091
be2c0327 1092 /* On cgroupsv2 we can apply BPF everywhere. On cgroupsv1 we apply it everywhere except for the root of
52fecf20
LP
1093 * containers, where we leave this to the manager */
1094 if ((apply_mask & (CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES)) &&
1095 (is_host_root || cg_all_unified() > 0 || !is_local_root)) {
084c7007 1096 _cleanup_(bpf_program_unrefp) BPFProgram *prog = NULL;
4ad49000 1097 CGroupDeviceAllow *a;
8e274523 1098
084c7007
RG
1099 if (cg_all_unified() > 0) {
1100 r = cgroup_init_device_bpf(&prog, c->device_policy, c->device_allow);
1101 if (r < 0)
1102 log_unit_warning_errno(u, r, "Failed to initialize device control bpf program: %m");
1103 } else {
8c838407 1104 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL
084c7007 1105 * here. */
714e2e1d 1106
084c7007
RG
1107 if (c->device_allow || c->device_policy != CGROUP_AUTO)
1108 r = cg_set_attribute("devices", path, "devices.deny", "a");
1109 else
1110 r = cg_set_attribute("devices", path, "devices.allow", "a");
1111 if (r < 0)
2c74e12b
LP
1112 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING, r,
1113 "Failed to reset devices.allow/devices.deny: %m");
084c7007 1114 }
fb385181 1115
4ad49000
LP
1116 if (c->device_policy == CGROUP_CLOSED ||
1117 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
1118 static const char auto_devices[] =
7d711efb
LP
1119 "/dev/null\0" "rwm\0"
1120 "/dev/zero\0" "rwm\0"
1121 "/dev/full\0" "rwm\0"
1122 "/dev/random\0" "rwm\0"
1123 "/dev/urandom\0" "rwm\0"
1124 "/dev/tty\0" "rwm\0"
5a7f87a9 1125 "/dev/ptmx\0" "rwm\0"
0d9e7991 1126 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
74c48bf5
LP
1127 "/run/systemd/inaccessible/chr\0" "rwm\0"
1128 "/run/systemd/inaccessible/blk\0" "rwm\0";
4ad49000
LP
1129
1130 const char *x, *y;
1131
1132 NULSTR_FOREACH_PAIR(x, y, auto_devices)
913c898c 1133 (void) whitelist_device(prog, path, x, y);
7d711efb 1134
5a7f87a9 1135 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
913c898c 1136 (void) whitelist_major(prog, path, "pts", 'c', "rw");
4ad49000
LP
1137 }
1138
1139 LIST_FOREACH(device_allow, a, c->device_allow) {
fb4650aa 1140 char acc[4], *val;
4ad49000
LP
1141 unsigned k = 0;
1142
1143 if (a->r)
1144 acc[k++] = 'r';
1145 if (a->w)
1146 acc[k++] = 'w';
1147 if (a->m)
1148 acc[k++] = 'm';
fb385181 1149
4ad49000
LP
1150 if (k == 0)
1151 continue;
fb385181 1152
4ad49000 1153 acc[k++] = 0;
90060676 1154
27458ed6 1155 if (path_startswith(a->path, "/dev/"))
913c898c 1156 (void) whitelist_device(prog, path, a->path, acc);
fb4650aa 1157 else if ((val = startswith(a->path, "block-")))
913c898c 1158 (void) whitelist_major(prog, path, val, 'b', acc);
fb4650aa 1159 else if ((val = startswith(a->path, "char-")))
913c898c 1160 (void) whitelist_major(prog, path, val, 'c', acc);
90060676 1161 else
8e8b5d2e 1162 log_unit_debug(u, "Ignoring device '%s' while writing cgroup attribute.", a->path);
4ad49000 1163 }
084c7007
RG
1164
1165 r = cgroup_apply_device_bpf(u, prog, c->device_policy, c->device_allow);
1166 if (r < 0) {
1167 static bool warned = false;
1168
1169 log_full_errno(warned ? LOG_DEBUG : LOG_WARNING, r,
1170 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1171 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1172 "(This warning is only shown for the first loaded unit using device ACL.)", u->id);
1173
1174 warned = true;
1175 }
4ad49000 1176 }
03a7b521 1177
00b5974f
LP
1178 if (apply_mask & CGROUP_MASK_PIDS) {
1179
52fecf20 1180 if (is_host_root) {
00b5974f
LP
1181 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1182 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1183 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1184 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1185 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1186 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1187 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1188 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1189 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1190 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1191 * which is desirable so that there's an offical way to release control of the sysctl from
1192 * systemd: set the limit to unbounded and reload. */
1193
1194 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1195 u->manager->sysctl_pid_max_changed = true;
1196 r = procfs_tasks_set_limit(c->tasks_max);
1197 } else if (u->manager->sysctl_pid_max_changed)
1198 r = procfs_tasks_set_limit(TASKS_MAX);
1199 else
1200 r = 0;
00b5974f 1201 if (r < 0)
39b9fefb 1202 log_unit_full(u, LOG_LEVEL_CGROUP_WRITE(r), r,
00b5974f 1203 "Failed to write to tasks limit sysctls: %m");
52fecf20 1204 }
03a7b521 1205
52fecf20
LP
1206 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
1207 * leave it for the container manager. */
1208 if (!is_local_root) {
00b5974f
LP
1209 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1210 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
03a7b521 1211
00b5974f 1212 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
293d32df 1213 (void) set_attribute_and_warn(u, "pids", "pids.max", buf);
00b5974f 1214 } else
589a5f7a 1215 (void) set_attribute_and_warn(u, "pids", "pids.max", "max\n");
00b5974f 1216 }
03a7b521 1217 }
906c06f6 1218
17f14955 1219 if (apply_mask & CGROUP_MASK_BPF_FIREWALL)
0f2d84d2 1220 cgroup_apply_firewall(u);
fb385181
LP
1221}
1222
16492445
LP
1223static bool unit_get_needs_bpf_firewall(Unit *u) {
1224 CGroupContext *c;
1225 Unit *p;
1226 assert(u);
1227
1228 c = unit_get_cgroup_context(u);
1229 if (!c)
1230 return false;
1231
1232 if (c->ip_accounting ||
1233 c->ip_address_allow ||
1234 c->ip_address_deny)
1235 return true;
1236
1237 /* If any parent slice has an IP access list defined, it applies too */
1238 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1239 c = unit_get_cgroup_context(p);
1240 if (!c)
1241 return false;
1242
1243 if (c->ip_address_allow ||
1244 c->ip_address_deny)
1245 return true;
1246 }
1247
1248 return false;
1249}
1250
53aea74a 1251static CGroupMask cgroup_context_get_mask(CGroupContext *c) {
efdb0237 1252 CGroupMask mask = 0;
8e274523 1253
fae9bc29 1254 /* Figure out which controllers we need, based on the cgroup context object */
8e274523 1255
fae9bc29 1256 if (c->cpu_accounting)
f98c2585 1257 mask |= get_cpu_accounting_mask();
fae9bc29
LP
1258
1259 if (cgroup_context_has_cpu_weight(c) ||
66ebf6c0 1260 cgroup_context_has_cpu_shares(c) ||
3a43da28 1261 c->cpu_quota_per_sec_usec != USEC_INFINITY)
fae9bc29 1262 mask |= CGROUP_MASK_CPU;
ecedd90f 1263
538b4852
TH
1264 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1265 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 1266
4ad49000 1267 if (c->memory_accounting ||
da4d897e
TH
1268 c->memory_limit != CGROUP_LIMIT_MAX ||
1269 cgroup_context_has_unified_memory_config(c))
efdb0237 1270 mask |= CGROUP_MASK_MEMORY;
8e274523 1271
a931ad47
LP
1272 if (c->device_allow ||
1273 c->device_policy != CGROUP_AUTO)
084c7007 1274 mask |= CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES;
4ad49000 1275
03a7b521 1276 if (c->tasks_accounting ||
8793fa25 1277 c->tasks_max != CGROUP_LIMIT_MAX)
03a7b521
LP
1278 mask |= CGROUP_MASK_PIDS;
1279
fae9bc29 1280 return CGROUP_MASK_EXTEND_JOINED(mask);
8e274523
LP
1281}
1282
53aea74a 1283static CGroupMask unit_get_bpf_mask(Unit *u) {
17f14955
RG
1284 CGroupMask mask = 0;
1285
fae9bc29
LP
1286 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
1287 * too. */
1288
17f14955
RG
1289 if (unit_get_needs_bpf_firewall(u))
1290 mask |= CGROUP_MASK_BPF_FIREWALL;
1291
1292 return mask;
1293}
1294
efdb0237 1295CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 1296 CGroupContext *c;
8e274523 1297
442ce775
LP
1298 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
1299 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
1300
1301 if (u->load_state != UNIT_LOADED)
1302 return 0;
efdb0237 1303
4ad49000
LP
1304 c = unit_get_cgroup_context(u);
1305 if (!c)
1306 return 0;
8e274523 1307
17f14955 1308 return cgroup_context_get_mask(c) | unit_get_bpf_mask(u) | unit_get_delegate_mask(u);
02638280
LP
1309}
1310
1311CGroupMask unit_get_delegate_mask(Unit *u) {
1312 CGroupContext *c;
1313
1314 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1315 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
19af675e 1316 *
02638280 1317 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
a931ad47 1318
1d9cc876 1319 if (!unit_cgroup_delegate(u))
02638280
LP
1320 return 0;
1321
1322 if (cg_all_unified() <= 0) {
a931ad47
LP
1323 ExecContext *e;
1324
1325 e = unit_get_exec_context(u);
02638280
LP
1326 if (e && !exec_context_maintains_privileges(e))
1327 return 0;
a931ad47
LP
1328 }
1329
1d9cc876 1330 assert_se(c = unit_get_cgroup_context(u));
fae9bc29 1331 return CGROUP_MASK_EXTEND_JOINED(c->delegate_controllers);
8e274523
LP
1332}
1333
efdb0237 1334CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 1335 assert(u);
bc432dc7 1336
02638280 1337 /* Returns the mask of controllers all of the unit's children require, merged */
efdb0237 1338
bc432dc7 1339 if (u->cgroup_members_mask_valid)
26a17ca2 1340 return u->cgroup_members_mask; /* Use cached value if possible */
bc432dc7 1341
64e844e5 1342 u->cgroup_members_mask = 0;
bc432dc7
LP
1343
1344 if (u->type == UNIT_SLICE) {
eef85c4a 1345 void *v;
bc432dc7
LP
1346 Unit *member;
1347 Iterator i;
1348
eef85c4a 1349 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
bc432dc7
LP
1350
1351 if (member == u)
1352 continue;
1353
d4fdc205 1354 if (UNIT_DEREF(member->slice) != u)
bc432dc7
LP
1355 continue;
1356
31604970 1357 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
bc432dc7
LP
1358 }
1359 }
1360
1361 u->cgroup_members_mask_valid = true;
6414b7c9 1362 return u->cgroup_members_mask;
246aa6dd
LP
1363}
1364
efdb0237 1365CGroupMask unit_get_siblings_mask(Unit *u) {
4ad49000 1366 assert(u);
246aa6dd 1367
efdb0237
LP
1368 /* Returns the mask of controllers all of the unit's siblings
1369 * require, i.e. the members mask of the unit's parent slice
1370 * if there is one. */
1371
bc432dc7 1372 if (UNIT_ISSET(u->slice))
637f421e 1373 return unit_get_members_mask(UNIT_DEREF(u->slice));
4ad49000 1374
64e844e5 1375 return unit_get_subtree_mask(u); /* we are the top-level slice */
246aa6dd
LP
1376}
1377
efdb0237
LP
1378CGroupMask unit_get_subtree_mask(Unit *u) {
1379
1380 /* Returns the mask of this subtree, meaning of the group
1381 * itself and its children. */
1382
1383 return unit_get_own_mask(u) | unit_get_members_mask(u);
1384}
1385
1386CGroupMask unit_get_target_mask(Unit *u) {
1387 CGroupMask mask;
1388
1389 /* This returns the cgroup mask of all controllers to enable
1390 * for a specific cgroup, i.e. everything it needs itself,
1391 * plus all that its children need, plus all that its siblings
1392 * need. This is primarily useful on the legacy cgroup
1393 * hierarchy, where we need to duplicate each cgroup in each
1394 * hierarchy that shall be enabled for it. */
6414b7c9 1395
efdb0237
LP
1396 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1397 mask &= u->manager->cgroup_supported;
1398
1399 return mask;
1400}
1401
1402CGroupMask unit_get_enable_mask(Unit *u) {
1403 CGroupMask mask;
1404
1405 /* This returns the cgroup mask of all controllers to enable
1406 * for the children of a specific cgroup. This is primarily
1407 * useful for the unified cgroup hierarchy, where each cgroup
1408 * controls which controllers are enabled for its children. */
1409
1410 mask = unit_get_members_mask(u);
6414b7c9
DS
1411 mask &= u->manager->cgroup_supported;
1412
1413 return mask;
1414}
1415
5af88058 1416void unit_invalidate_cgroup_members_masks(Unit *u) {
bc432dc7
LP
1417 assert(u);
1418
5af88058
LP
1419 /* Recurse invalidate the member masks cache all the way up the tree */
1420 u->cgroup_members_mask_valid = false;
bc432dc7 1421
5af88058
LP
1422 if (UNIT_ISSET(u->slice))
1423 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
6414b7c9
DS
1424}
1425
6592b975 1426const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
03b90d4b 1427
6592b975 1428 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
03b90d4b
LP
1429
1430 while (u) {
6592b975 1431
03b90d4b
LP
1432 if (u->cgroup_path &&
1433 u->cgroup_realized &&
d94a24ca 1434 FLAGS_SET(u->cgroup_realized_mask, mask))
03b90d4b
LP
1435 return u->cgroup_path;
1436
1437 u = UNIT_DEREF(u->slice);
1438 }
1439
1440 return NULL;
1441}
1442
6592b975
LP
1443static const char *migrate_callback(CGroupMask mask, void *userdata) {
1444 return unit_get_realized_cgroup_path(userdata, mask);
1445}
1446
efdb0237
LP
1447char *unit_default_cgroup_path(Unit *u) {
1448 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1449 int r;
1450
1451 assert(u);
1452
1453 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1454 return strdup(u->manager->cgroup_root);
1455
1456 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1457 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1458 if (r < 0)
1459 return NULL;
1460 }
1461
1462 escaped = cg_escape(u->id);
1463 if (!escaped)
1464 return NULL;
1465
1466 if (slice)
605405c6
ZJS
1467 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1468 escaped);
efdb0237 1469 else
605405c6 1470 return strjoin(u->manager->cgroup_root, "/", escaped);
efdb0237
LP
1471}
1472
1473int unit_set_cgroup_path(Unit *u, const char *path) {
1474 _cleanup_free_ char *p = NULL;
1475 int r;
1476
1477 assert(u);
1478
1479 if (path) {
1480 p = strdup(path);
1481 if (!p)
1482 return -ENOMEM;
1483 } else
1484 p = NULL;
1485
1486 if (streq_ptr(u->cgroup_path, p))
1487 return 0;
1488
1489 if (p) {
1490 r = hashmap_put(u->manager->cgroup_unit, p, u);
1491 if (r < 0)
1492 return r;
1493 }
1494
1495 unit_release_cgroup(u);
1496
ae2a15bc 1497 u->cgroup_path = TAKE_PTR(p);
efdb0237
LP
1498
1499 return 1;
1500}
1501
1502int unit_watch_cgroup(Unit *u) {
ab2c3861 1503 _cleanup_free_ char *events = NULL;
efdb0237
LP
1504 int r;
1505
1506 assert(u);
1507
1508 if (!u->cgroup_path)
1509 return 0;
1510
1511 if (u->cgroup_inotify_wd >= 0)
1512 return 0;
1513
1514 /* Only applies to the unified hierarchy */
c22800e4 1515 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1516 if (r < 0)
1517 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1518 if (r == 0)
efdb0237
LP
1519 return 0;
1520
1521 /* Don't watch the root slice, it's pointless. */
1522 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1523 return 0;
1524
1525 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1526 if (r < 0)
1527 return log_oom();
1528
ab2c3861 1529 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
efdb0237
LP
1530 if (r < 0)
1531 return log_oom();
1532
ab2c3861 1533 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
efdb0237
LP
1534 if (u->cgroup_inotify_wd < 0) {
1535
1536 if (errno == ENOENT) /* If the directory is already
1537 * gone we don't need to track
1538 * it, so this is not an error */
1539 return 0;
1540
1541 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1542 }
1543
1544 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1545 if (r < 0)
1546 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1547
1548 return 0;
1549}
1550
a4634b21
LP
1551int unit_pick_cgroup_path(Unit *u) {
1552 _cleanup_free_ char *path = NULL;
1553 int r;
1554
1555 assert(u);
1556
1557 if (u->cgroup_path)
1558 return 0;
1559
1560 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1561 return -EINVAL;
1562
1563 path = unit_default_cgroup_path(u);
1564 if (!path)
1565 return log_oom();
1566
1567 r = unit_set_cgroup_path(u, path);
1568 if (r == -EEXIST)
1569 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1570 if (r < 0)
1571 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1572
1573 return 0;
1574}
1575
efdb0237
LP
1576static int unit_create_cgroup(
1577 Unit *u,
1578 CGroupMask target_mask,
17f14955 1579 CGroupMask enable_mask) {
efdb0237 1580
65be7e06 1581 bool created;
27adcc97 1582 int r;
64747e2d 1583
4ad49000 1584 assert(u);
64747e2d 1585
27c4ed79 1586 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0cd385d3
LP
1587 return 0;
1588
a4634b21
LP
1589 /* Figure out our cgroup path */
1590 r = unit_pick_cgroup_path(u);
1591 if (r < 0)
1592 return r;
b58b8e11 1593
03b90d4b 1594 /* First, create our own group */
efdb0237 1595 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
23bbb0de 1596 if (r < 0)
efdb0237 1597 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
490c5a37 1598 created = r;
efdb0237
LP
1599
1600 /* Start watching it */
1601 (void) unit_watch_cgroup(u);
1602
65be7e06 1603 /* Preserve enabled controllers in delegated units, adjust others. */
1fd3a10c 1604 if (created || !u->cgroup_realized || !unit_cgroup_delegate(u)) {
27adcc97 1605 CGroupMask result_mask = 0;
65be7e06
ZJS
1606
1607 /* Enable all controllers we need */
27adcc97 1608 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path, &result_mask);
65be7e06 1609 if (r < 0)
27adcc97
LP
1610 log_unit_warning_errno(u, r, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
1611
1612 /* If we just turned off a controller, this might release the controller for our parent too, let's
1613 * enqueue the parent for re-realization in that case again. */
1614 if (UNIT_ISSET(u->slice)) {
1615 CGroupMask turned_off;
1616
1617 turned_off = (u->cgroup_realized ? u->cgroup_enabled_mask & ~result_mask : 0);
1618 if (turned_off != 0) {
1619 Unit *parent;
1620
1621 /* Force the parent to propagate the enable mask to the kernel again, by invalidating
1622 * the controller we just turned off. */
1623
1624 for (parent = UNIT_DEREF(u->slice); parent; parent = UNIT_DEREF(parent->slice))
1625 unit_invalidate_cgroup(parent, turned_off);
1626 }
1627 }
1628
1629 /* Remember what's actually enabled now */
1630 u->cgroup_enabled_mask = result_mask;
65be7e06 1631 }
03b90d4b
LP
1632
1633 /* Keep track that this is now realized */
4ad49000 1634 u->cgroup_realized = true;
efdb0237 1635 u->cgroup_realized_mask = target_mask;
4ad49000 1636
1d9cc876 1637 if (u->type != UNIT_SLICE && !unit_cgroup_delegate(u)) {
0cd385d3
LP
1638
1639 /* Then, possibly move things over, but not if
1640 * subgroups may contain processes, which is the case
1641 * for slice and delegation units. */
1642 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1643 if (r < 0)
efdb0237 1644 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
0cd385d3 1645 }
03b90d4b 1646
64747e2d
LP
1647 return 0;
1648}
1649
6592b975
LP
1650static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
1651 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1652 char *pp;
7b3fd631 1653 int r;
6592b975 1654
7b3fd631
LP
1655 assert(u);
1656
6592b975
LP
1657 if (MANAGER_IS_SYSTEM(u->manager))
1658 return -EINVAL;
1659
1660 if (!u->manager->system_bus)
1661 return -EIO;
1662
1663 if (!u->cgroup_path)
1664 return -EINVAL;
1665
1666 /* Determine this unit's cgroup path relative to our cgroup root */
1667 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
1668 if (!pp)
1669 return -EINVAL;
1670
1671 pp = strjoina("/", pp, suffix_path);
858d36c1 1672 path_simplify(pp, false);
6592b975
LP
1673
1674 r = sd_bus_call_method(u->manager->system_bus,
1675 "org.freedesktop.systemd1",
1676 "/org/freedesktop/systemd1",
1677 "org.freedesktop.systemd1.Manager",
1678 "AttachProcessesToUnit",
1679 &error, NULL,
1680 "ssau",
1681 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
7b3fd631 1682 if (r < 0)
6592b975
LP
1683 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
1684
1685 return 0;
1686}
1687
1688int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
1689 CGroupMask delegated_mask;
1690 const char *p;
1691 Iterator i;
1692 void *pidp;
1693 int r, q;
1694
1695 assert(u);
1696
1697 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1698 return -EINVAL;
1699
1700 if (set_isempty(pids))
1701 return 0;
7b3fd631 1702
6592b975 1703 r = unit_realize_cgroup(u);
7b3fd631
LP
1704 if (r < 0)
1705 return r;
1706
6592b975
LP
1707 if (isempty(suffix_path))
1708 p = u->cgroup_path;
1709 else
1710 p = strjoina(u->cgroup_path, "/", suffix_path);
1711
1712 delegated_mask = unit_get_delegate_mask(u);
1713
1714 r = 0;
1715 SET_FOREACH(pidp, pids, i) {
1716 pid_t pid = PTR_TO_PID(pidp);
1717 CGroupController c;
1718
1719 /* First, attach the PID to the main cgroup hierarchy */
1720 q = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
1721 if (q < 0) {
1722 log_unit_debug_errno(u, q, "Couldn't move process " PID_FMT " to requested cgroup '%s': %m", pid, p);
1723
1724 if (MANAGER_IS_USER(u->manager) && IN_SET(q, -EPERM, -EACCES)) {
1725 int z;
1726
1727 /* If we are in a user instance, and we can't move the process ourselves due to
1728 * permission problems, let's ask the system instance about it instead. Since it's more
1729 * privileged it might be able to move the process across the leaves of a subtree who's
1730 * top node is not owned by us. */
1731
1732 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
1733 if (z < 0)
1734 log_unit_debug_errno(u, z, "Couldn't move process " PID_FMT " to requested cgroup '%s' via the system bus either: %m", pid, p);
1735 else
1736 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1737 }
1738
1739 if (r >= 0)
1740 r = q; /* Remember first error */
1741
1742 continue;
1743 }
1744
1745 q = cg_all_unified();
1746 if (q < 0)
1747 return q;
1748 if (q > 0)
1749 continue;
1750
1751 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1752 * innermost realized one */
1753
1754 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1755 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1756 const char *realized;
1757
1758 if (!(u->manager->cgroup_supported & bit))
1759 continue;
1760
1761 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1762 if (delegated_mask & u->cgroup_realized_mask & bit) {
1763 q = cg_attach(cgroup_controller_to_string(c), p, pid);
1764 if (q >= 0)
1765 continue; /* Success! */
1766
1767 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1768 pid, p, cgroup_controller_to_string(c));
1769 }
1770
1771 /* So this controller is either not delegate or realized, or something else weird happened. In
1772 * that case let's attach the PID at least to the closest cgroup up the tree that is
1773 * realized. */
1774 realized = unit_get_realized_cgroup_path(u, bit);
1775 if (!realized)
1776 continue; /* Not even realized in the root slice? Then let's not bother */
1777
1778 q = cg_attach(cgroup_controller_to_string(c), realized, pid);
1779 if (q < 0)
1780 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
1781 pid, realized, cgroup_controller_to_string(c));
1782 }
1783 }
1784
1785 return r;
7b3fd631
LP
1786}
1787
4b58153d
LP
1788static void cgroup_xattr_apply(Unit *u) {
1789 char ids[SD_ID128_STRING_MAX];
1790 int r;
1791
1792 assert(u);
1793
1794 if (!MANAGER_IS_SYSTEM(u->manager))
1795 return;
1796
1797 if (sd_id128_is_null(u->invocation_id))
1798 return;
1799
1800 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1801 "trusted.invocation_id",
1802 sd_id128_to_string(u->invocation_id, ids), 32,
1803 0);
1804 if (r < 0)
0fb84499 1805 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
4b58153d
LP
1806}
1807
906c06f6
DM
1808static bool unit_has_mask_realized(
1809 Unit *u,
1810 CGroupMask target_mask,
17f14955 1811 CGroupMask enable_mask) {
906c06f6 1812
bc432dc7
LP
1813 assert(u);
1814
d5095dcd
LP
1815 /* Returns true if this unit is fully realized. We check four things:
1816 *
1817 * 1. Whether the cgroup was created at all
1818 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroupsv1)
1819 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroupsv2)
1820 * 4. Whether the invalidation mask is currently zero
1821 *
1822 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
1823 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroupv1 controllers), CGROUP_MASK_V2 (for
1824 * real cgroupv2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
1825 * is only matters for cgroupsv1 controllers, and cgroup_enabled_mask only used for cgroupsv2, and if they
1826 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
1827 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
1828 * simply don't matter. */
1829
906c06f6 1830 return u->cgroup_realized &&
d5095dcd
LP
1831 ((u->cgroup_realized_mask ^ target_mask) & CGROUP_MASK_V1) == 0 &&
1832 ((u->cgroup_enabled_mask ^ enable_mask) & CGROUP_MASK_V2) == 0 &&
17f14955 1833 u->cgroup_invalidated_mask == 0;
6414b7c9
DS
1834}
1835
27adcc97 1836void unit_add_to_cgroup_realize_queue(Unit *u) {
2aa57a65
LP
1837 assert(u);
1838
1839 if (u->in_cgroup_realize_queue)
1840 return;
1841
1842 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1843 u->in_cgroup_realize_queue = true;
1844}
1845
1846static void unit_remove_from_cgroup_realize_queue(Unit *u) {
1847 assert(u);
1848
1849 if (!u->in_cgroup_realize_queue)
1850 return;
1851
1852 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1853 u->in_cgroup_realize_queue = false;
1854}
1855
6414b7c9
DS
1856/* Check if necessary controllers and attributes for a unit are in place.
1857 *
1858 * If so, do nothing.
1859 * If not, create paths, move processes over, and set attributes.
1860 *
1861 * Returns 0 on success and < 0 on failure. */
db785129 1862static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 1863 CGroupMask target_mask, enable_mask;
6414b7c9 1864 int r;
64747e2d 1865
4ad49000 1866 assert(u);
64747e2d 1867
2aa57a65 1868 unit_remove_from_cgroup_realize_queue(u);
64747e2d 1869
efdb0237 1870 target_mask = unit_get_target_mask(u);
ccf78df1
TH
1871 enable_mask = unit_get_enable_mask(u);
1872
17f14955 1873 if (unit_has_mask_realized(u, target_mask, enable_mask))
0a1eb06d 1874 return 0;
64747e2d 1875
4ad49000 1876 /* First, realize parents */
6414b7c9 1877 if (UNIT_ISSET(u->slice)) {
db785129 1878 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
6414b7c9
DS
1879 if (r < 0)
1880 return r;
1881 }
4ad49000
LP
1882
1883 /* And then do the real work */
17f14955 1884 r = unit_create_cgroup(u, target_mask, enable_mask);
6414b7c9
DS
1885 if (r < 0)
1886 return r;
1887
1888 /* Finally, apply the necessary attributes. */
17f14955 1889 cgroup_context_apply(u, target_mask, state);
4b58153d 1890 cgroup_xattr_apply(u);
6414b7c9 1891
c2baf11c
LP
1892 /* Now, reset the invalidation mask */
1893 u->cgroup_invalidated_mask = 0;
6414b7c9 1894 return 0;
64747e2d
LP
1895}
1896
91a6073e 1897unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
db785129 1898 ManagerState state;
4ad49000 1899 unsigned n = 0;
db785129 1900 Unit *i;
6414b7c9 1901 int r;
ecedd90f 1902
91a6073e
LP
1903 assert(m);
1904
db785129
LP
1905 state = manager_state(m);
1906
91a6073e
LP
1907 while ((i = m->cgroup_realize_queue)) {
1908 assert(i->in_cgroup_realize_queue);
ecedd90f 1909
2aa57a65
LP
1910 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
1911 /* Maybe things changed, and the unit is not actually active anymore? */
1912 unit_remove_from_cgroup_realize_queue(i);
1913 continue;
1914 }
1915
db785129 1916 r = unit_realize_cgroup_now(i, state);
6414b7c9 1917 if (r < 0)
efdb0237 1918 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 1919
4ad49000
LP
1920 n++;
1921 }
ecedd90f 1922
4ad49000 1923 return n;
8e274523
LP
1924}
1925
91a6073e 1926static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
4ad49000 1927 Unit *slice;
ca949c9d 1928
4ad49000
LP
1929 /* This adds the siblings of the specified unit and the
1930 * siblings of all parent units to the cgroup queue. (But
1931 * neither the specified unit itself nor the parents.) */
1932
1933 while ((slice = UNIT_DEREF(u->slice))) {
1934 Iterator i;
1935 Unit *m;
eef85c4a 1936 void *v;
8f53a7b8 1937
eef85c4a 1938 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
4ad49000
LP
1939 if (m == u)
1940 continue;
8e274523 1941
6414b7c9
DS
1942 /* Skip units that have a dependency on the slice
1943 * but aren't actually in it. */
4ad49000 1944 if (UNIT_DEREF(m->slice) != slice)
50159e6a 1945 continue;
8e274523 1946
6414b7c9
DS
1947 /* No point in doing cgroup application for units
1948 * without active processes. */
1949 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1950 continue;
1951
1952 /* If the unit doesn't need any new controllers
1953 * and has current ones realized, it doesn't need
1954 * any changes. */
906c06f6
DM
1955 if (unit_has_mask_realized(m,
1956 unit_get_target_mask(m),
17f14955 1957 unit_get_enable_mask(m)))
6414b7c9
DS
1958 continue;
1959
91a6073e 1960 unit_add_to_cgroup_realize_queue(m);
50159e6a
LP
1961 }
1962
4ad49000 1963 u = slice;
8e274523 1964 }
4ad49000
LP
1965}
1966
0a1eb06d 1967int unit_realize_cgroup(Unit *u) {
4ad49000
LP
1968 assert(u);
1969
35b7ff80 1970 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 1971 return 0;
8e274523 1972
4ad49000
LP
1973 /* So, here's the deal: when realizing the cgroups for this
1974 * unit, we need to first create all parents, but there's more
1975 * actually: for the weight-based controllers we also need to
1976 * make sure that all our siblings (i.e. units that are in the
73e231ab 1977 * same slice as we are) have cgroups, too. Otherwise, things
4ad49000
LP
1978 * would become very uneven as each of their processes would
1979 * get as much resources as all our group together. This call
1980 * will synchronously create the parent cgroups, but will
1981 * defer work on the siblings to the next event loop
1982 * iteration. */
ca949c9d 1983
4ad49000 1984 /* Add all sibling slices to the cgroup queue. */
91a6073e 1985 unit_add_siblings_to_cgroup_realize_queue(u);
4ad49000 1986
6414b7c9 1987 /* And realize this one now (and apply the values) */
db785129 1988 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
1989}
1990
efdb0237
LP
1991void unit_release_cgroup(Unit *u) {
1992 assert(u);
1993
8a0d5388
LP
1994 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
1995 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
efdb0237
LP
1996
1997 if (u->cgroup_path) {
1998 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1999 u->cgroup_path = mfree(u->cgroup_path);
2000 }
2001
2002 if (u->cgroup_inotify_wd >= 0) {
2003 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
5e1ee764 2004 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring: %m", u->cgroup_inotify_wd, u->id);
efdb0237
LP
2005
2006 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
2007 u->cgroup_inotify_wd = -1;
2008 }
2009}
2010
2011void unit_prune_cgroup(Unit *u) {
8e274523 2012 int r;
efdb0237 2013 bool is_root_slice;
8e274523 2014
4ad49000 2015 assert(u);
8e274523 2016
efdb0237
LP
2017 /* Removes the cgroup, if empty and possible, and stops watching it. */
2018
4ad49000
LP
2019 if (!u->cgroup_path)
2020 return;
8e274523 2021
fe700f46
LP
2022 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
2023
efdb0237
LP
2024 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
2025
2026 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
dab5bf85 2027 if (r < 0) {
f29ff115 2028 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
dab5bf85
RL
2029 return;
2030 }
8e274523 2031
efdb0237
LP
2032 if (is_root_slice)
2033 return;
2034
2035 unit_release_cgroup(u);
0a1eb06d 2036
4ad49000 2037 u->cgroup_realized = false;
bc432dc7 2038 u->cgroup_realized_mask = 0;
ccf78df1 2039 u->cgroup_enabled_mask = 0;
084c7007
RG
2040
2041 u->bpf_device_control_installed = bpf_program_unref(u->bpf_device_control_installed);
8e274523
LP
2042}
2043
efdb0237 2044int unit_search_main_pid(Unit *u, pid_t *ret) {
4ad49000
LP
2045 _cleanup_fclose_ FILE *f = NULL;
2046 pid_t pid = 0, npid, mypid;
efdb0237 2047 int r;
4ad49000
LP
2048
2049 assert(u);
efdb0237 2050 assert(ret);
4ad49000
LP
2051
2052 if (!u->cgroup_path)
efdb0237 2053 return -ENXIO;
4ad49000 2054
efdb0237
LP
2055 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
2056 if (r < 0)
2057 return r;
4ad49000 2058
df0ff127 2059 mypid = getpid_cached();
4ad49000
LP
2060 while (cg_read_pid(f, &npid) > 0) {
2061 pid_t ppid;
2062
2063 if (npid == pid)
2064 continue;
8e274523 2065
4ad49000 2066 /* Ignore processes that aren't our kids */
6bc73acb 2067 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
4ad49000 2068 continue;
8e274523 2069
efdb0237 2070 if (pid != 0)
4ad49000
LP
2071 /* Dang, there's more than one daemonized PID
2072 in this group, so we don't know what process
2073 is the main process. */
efdb0237
LP
2074
2075 return -ENODATA;
8e274523 2076
4ad49000 2077 pid = npid;
8e274523
LP
2078 }
2079
efdb0237
LP
2080 *ret = pid;
2081 return 0;
2082}
2083
2084static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 2085 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
2086 _cleanup_fclose_ FILE *f = NULL;
2087 int ret = 0, r;
2088
2089 assert(u);
2090 assert(path);
2091
2092 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2093 if (r < 0)
2094 ret = r;
2095 else {
2096 pid_t pid;
2097
2098 while ((r = cg_read_pid(f, &pid)) > 0) {
2099 r = unit_watch_pid(u, pid);
2100 if (r < 0 && ret >= 0)
2101 ret = r;
2102 }
2103
2104 if (r < 0 && ret >= 0)
2105 ret = r;
2106 }
2107
2108 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2109 if (r < 0) {
2110 if (ret >= 0)
2111 ret = r;
2112 } else {
2113 char *fn;
2114
2115 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2116 _cleanup_free_ char *p = NULL;
2117
605405c6 2118 p = strjoin(path, "/", fn);
efdb0237
LP
2119 free(fn);
2120
2121 if (!p)
2122 return -ENOMEM;
2123
2124 r = unit_watch_pids_in_path(u, p);
2125 if (r < 0 && ret >= 0)
2126 ret = r;
2127 }
2128
2129 if (r < 0 && ret >= 0)
2130 ret = r;
2131 }
2132
2133 return ret;
2134}
2135
11aef522
LP
2136int unit_synthesize_cgroup_empty_event(Unit *u) {
2137 int r;
2138
2139 assert(u);
2140
2141 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2142 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2143 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2144
2145 if (!u->cgroup_path)
2146 return -ENOENT;
2147
2148 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2149 if (r < 0)
2150 return r;
2151 if (r > 0) /* On unified we have reliable notifications, and don't need this */
2152 return 0;
2153
2154 if (!set_isempty(u->pids))
2155 return 0;
2156
2157 unit_add_to_cgroup_empty_queue(u);
2158 return 0;
2159}
2160
efdb0237 2161int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
2162 int r;
2163
efdb0237
LP
2164 assert(u);
2165
2166 /* Adds all PIDs from our cgroup to the set of PIDs we
2167 * watch. This is a fallback logic for cases where we do not
2168 * get reliable cgroup empty notifications: we try to use
2169 * SIGCHLD as replacement. */
2170
2171 if (!u->cgroup_path)
2172 return -ENOENT;
2173
c22800e4 2174 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2175 if (r < 0)
2176 return r;
2177 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
2178 return 0;
2179
2180 return unit_watch_pids_in_path(u, u->cgroup_path);
2181}
2182
09e24654
LP
2183static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
2184 Manager *m = userdata;
2185 Unit *u;
efdb0237
LP
2186 int r;
2187
09e24654
LP
2188 assert(s);
2189 assert(m);
efdb0237 2190
09e24654
LP
2191 u = m->cgroup_empty_queue;
2192 if (!u)
efdb0237
LP
2193 return 0;
2194
09e24654
LP
2195 assert(u->in_cgroup_empty_queue);
2196 u->in_cgroup_empty_queue = false;
2197 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
2198
2199 if (m->cgroup_empty_queue) {
2200 /* More stuff queued, let's make sure we remain enabled */
2201 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2202 if (r < 0)
19a691a9 2203 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
09e24654 2204 }
efdb0237
LP
2205
2206 unit_add_to_gc_queue(u);
2207
2208 if (UNIT_VTABLE(u)->notify_cgroup_empty)
2209 UNIT_VTABLE(u)->notify_cgroup_empty(u);
2210
2211 return 0;
2212}
2213
09e24654
LP
2214void unit_add_to_cgroup_empty_queue(Unit *u) {
2215 int r;
2216
2217 assert(u);
2218
2219 /* Note that there are four different ways how cgroup empty events reach us:
2220 *
2221 * 1. On the unified hierarchy we get an inotify event on the cgroup
2222 *
2223 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2224 *
2225 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2226 *
2227 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2228 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2229 *
2230 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2231 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2232 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2233 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2234 * case for scope units). */
2235
2236 if (u->in_cgroup_empty_queue)
2237 return;
2238
2239 /* Let's verify that the cgroup is really empty */
2240 if (!u->cgroup_path)
2241 return;
2242 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2243 if (r < 0) {
2244 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
2245 return;
2246 }
2247 if (r == 0)
2248 return;
2249
2250 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
2251 u->in_cgroup_empty_queue = true;
2252
2253 /* Trigger the defer event */
2254 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
2255 if (r < 0)
2256 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
2257}
2258
efdb0237
LP
2259static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2260 Manager *m = userdata;
2261
2262 assert(s);
2263 assert(fd >= 0);
2264 assert(m);
2265
2266 for (;;) {
2267 union inotify_event_buffer buffer;
2268 struct inotify_event *e;
2269 ssize_t l;
2270
2271 l = read(fd, &buffer, sizeof(buffer));
2272 if (l < 0) {
47249640 2273 if (IN_SET(errno, EINTR, EAGAIN))
efdb0237
LP
2274 return 0;
2275
2276 return log_error_errno(errno, "Failed to read control group inotify events: %m");
2277 }
2278
2279 FOREACH_INOTIFY_EVENT(e, buffer, l) {
2280 Unit *u;
2281
2282 if (e->wd < 0)
2283 /* Queue overflow has no watch descriptor */
2284 continue;
2285
2286 if (e->mask & IN_IGNORED)
2287 /* The watch was just removed */
2288 continue;
2289
2290 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
2291 if (!u) /* Not that inotify might deliver
2292 * events for a watch even after it
2293 * was removed, because it was queued
2294 * before the removal. Let's ignore
2295 * this here safely. */
2296 continue;
2297
09e24654 2298 unit_add_to_cgroup_empty_queue(u);
efdb0237
LP
2299 }
2300 }
8e274523
LP
2301}
2302
17f14955
RG
2303static int cg_bpf_mask_supported(CGroupMask *ret) {
2304 CGroupMask mask = 0;
2305 int r;
2306
2307 /* BPF-based firewall */
2308 r = bpf_firewall_supported();
2309 if (r > 0)
2310 mask |= CGROUP_MASK_BPF_FIREWALL;
2311
084c7007
RG
2312 /* BPF-based device access control */
2313 r = bpf_devices_supported();
2314 if (r > 0)
2315 mask |= CGROUP_MASK_BPF_DEVICES;
2316
17f14955
RG
2317 *ret = mask;
2318 return 0;
2319}
2320
8e274523 2321int manager_setup_cgroup(Manager *m) {
9444b1f2 2322 _cleanup_free_ char *path = NULL;
10bd3e2e 2323 const char *scope_path;
efdb0237 2324 CGroupController c;
b4cccbc1 2325 int r, all_unified;
17f14955 2326 CGroupMask mask;
efdb0237 2327 char *e;
8e274523
LP
2328
2329 assert(m);
2330
35d2e7ec 2331 /* 1. Determine hierarchy */
efdb0237 2332 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 2333 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
2334 if (r < 0)
2335 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 2336
efdb0237
LP
2337 /* Chop off the init scope, if we are already located in it */
2338 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 2339
efdb0237
LP
2340 /* LEGACY: Also chop off the system slice if we are in
2341 * it. This is to support live upgrades from older systemd
2342 * versions where PID 1 was moved there. Also see
2343 * cg_get_root_path(). */
463d0d15 2344 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 2345 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 2346 if (!e)
efdb0237 2347 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 2348 }
efdb0237
LP
2349 if (e)
2350 *e = 0;
7ccfb64a 2351
7546145e
LP
2352 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2353 * easily prepend it everywhere. */
2354 delete_trailing_chars(m->cgroup_root, "/");
8e274523 2355
35d2e7ec 2356 /* 2. Show data */
9444b1f2 2357 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
2358 if (r < 0)
2359 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 2360
415fc41c
TH
2361 r = cg_unified_flush();
2362 if (r < 0)
2363 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 2364
b4cccbc1 2365 all_unified = cg_all_unified();
d4c819ed
ZJS
2366 if (all_unified < 0)
2367 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
2368 if (all_unified > 0)
efdb0237 2369 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 2370 else {
c22800e4 2371 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2372 if (r < 0)
2373 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2374 if (r > 0)
2375 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2376 else
2377 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2378 }
efdb0237 2379
09e24654
LP
2380 /* 3. Allocate cgroup empty defer event source */
2381 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2382 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2383 if (r < 0)
2384 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2385
2386 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2387 if (r < 0)
2388 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2389
2390 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2391 if (r < 0)
2392 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2393
2394 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2395
2396 /* 4. Install notifier inotify object, or agent */
10bd3e2e 2397 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 2398
09e24654 2399 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
efdb0237 2400
10bd3e2e
LP
2401 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2402 safe_close(m->cgroup_inotify_fd);
efdb0237 2403
10bd3e2e
LP
2404 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2405 if (m->cgroup_inotify_fd < 0)
2406 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 2407
10bd3e2e
LP
2408 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2409 if (r < 0)
2410 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 2411
10bd3e2e
LP
2412 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2413 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
09e24654 2414 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
10bd3e2e
LP
2415 if (r < 0)
2416 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 2417
10bd3e2e 2418 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 2419
611c4f8a 2420 } else if (MANAGER_IS_SYSTEM(m) && manager_owns_host_root_cgroup(m) && !MANAGER_IS_TEST_RUN(m)) {
efdb0237 2421
10bd3e2e
LP
2422 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2423 * since it does not generate events when control groups with children run empty. */
8e274523 2424
10bd3e2e 2425 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
23bbb0de 2426 if (r < 0)
10bd3e2e
LP
2427 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2428 else if (r > 0)
2429 log_debug("Installed release agent.");
2430 else if (r == 0)
2431 log_debug("Release agent already installed.");
2432 }
efdb0237 2433
09e24654 2434 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
10bd3e2e
LP
2435 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2436 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
aa77e234
MS
2437 if (r >= 0) {
2438 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2439 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2440 if (r < 0)
2441 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
c6c18be3 2442
aa77e234
MS
2443 /* 6. And pin it, so that it cannot be unmounted */
2444 safe_close(m->pin_cgroupfs_fd);
2445 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2446 if (m->pin_cgroupfs_fd < 0)
2447 return log_error_errno(errno, "Failed to open pin file: %m");
0d8c31ff 2448
638cece4 2449 } else if (!MANAGER_IS_TEST_RUN(m))
aa77e234 2450 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
10bd3e2e 2451
09e24654 2452 /* 7. Always enable hierarchical support if it exists... */
638cece4 2453 if (!all_unified && !MANAGER_IS_TEST_RUN(m))
10bd3e2e 2454 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 2455
17f14955 2456 /* 8. Figure out which controllers are supported */
efdb0237
LP
2457 r = cg_mask_supported(&m->cgroup_supported);
2458 if (r < 0)
2459 return log_error_errno(r, "Failed to determine supported controllers: %m");
17f14955
RG
2460
2461 /* 9. Figure out which bpf-based pseudo-controllers are supported */
2462 r = cg_bpf_mask_supported(&mask);
2463 if (r < 0)
2464 return log_error_errno(r, "Failed to determine supported bpf-based pseudo-controllers: %m");
2465 m->cgroup_supported |= mask;
2466
2467 /* 10. Log which controllers are supported */
efdb0237 2468 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
eee0a1e4 2469 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 2470
a32360f1 2471 return 0;
8e274523
LP
2472}
2473
c6c18be3 2474void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
2475 assert(m);
2476
9444b1f2
LP
2477 /* We can't really delete the group, since we are in it. But
2478 * let's trim it. */
f6c63f6f 2479 if (delete && m->cgroup_root && m->test_run_flags != MANAGER_TEST_RUN_MINIMAL)
efdb0237
LP
2480 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2481
09e24654
LP
2482 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2483
efdb0237
LP
2484 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2485
2486 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2487 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 2488
03e334a1 2489 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 2490
efdb0237 2491 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
2492}
2493
4ad49000 2494Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 2495 char *p;
4ad49000 2496 Unit *u;
acb14d31
LP
2497
2498 assert(m);
2499 assert(cgroup);
acb14d31 2500
4ad49000
LP
2501 u = hashmap_get(m->cgroup_unit, cgroup);
2502 if (u)
2503 return u;
acb14d31 2504
8e70580b 2505 p = strdupa(cgroup);
acb14d31
LP
2506 for (;;) {
2507 char *e;
2508
2509 e = strrchr(p, '/');
efdb0237
LP
2510 if (!e || e == p)
2511 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
2512
2513 *e = 0;
2514
4ad49000
LP
2515 u = hashmap_get(m->cgroup_unit, p);
2516 if (u)
2517 return u;
acb14d31
LP
2518 }
2519}
2520
b3ac818b 2521Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
4ad49000 2522 _cleanup_free_ char *cgroup = NULL;
8e274523 2523
8c47c732
LP
2524 assert(m);
2525
62a76913 2526 if (!pid_is_valid(pid))
b3ac818b
LP
2527 return NULL;
2528
62a76913 2529 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
b3ac818b
LP
2530 return NULL;
2531
2532 return manager_get_unit_by_cgroup(m, cgroup);
2533}
2534
2535Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
62a76913 2536 Unit *u, **array;
b3ac818b
LP
2537
2538 assert(m);
2539
62a76913
LP
2540 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2541 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2542 * relevant one as children of the process will be assigned to that one, too, before all else. */
2543
2544 if (!pid_is_valid(pid))
8c47c732
LP
2545 return NULL;
2546
2ca9d979 2547 if (pid == getpid_cached())
efdb0237
LP
2548 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2549
62a76913 2550 u = manager_get_unit_by_pid_cgroup(m, pid);
5fe8876b
LP
2551 if (u)
2552 return u;
2553
62a76913 2554 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
5fe8876b
LP
2555 if (u)
2556 return u;
2557
62a76913
LP
2558 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
2559 if (array)
2560 return array[0];
2561
2562 return NULL;
6dde1f33 2563}
4fbf50b3 2564
4ad49000
LP
2565int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2566 Unit *u;
4fbf50b3 2567
4ad49000
LP
2568 assert(m);
2569 assert(cgroup);
4fbf50b3 2570
09e24654
LP
2571 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2572 * or from the --system instance */
2573
d8fdc620
LP
2574 log_debug("Got cgroup empty notification for: %s", cgroup);
2575
4ad49000 2576 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
2577 if (!u)
2578 return 0;
b56c28c3 2579
09e24654
LP
2580 unit_add_to_cgroup_empty_queue(u);
2581 return 1;
5ad096b3
LP
2582}
2583
2584int unit_get_memory_current(Unit *u, uint64_t *ret) {
2585 _cleanup_free_ char *v = NULL;
2586 int r;
2587
2588 assert(u);
2589 assert(ret);
2590
2e4025c0 2591 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
cf3b4be1
LP
2592 return -ENODATA;
2593
5ad096b3
LP
2594 if (!u->cgroup_path)
2595 return -ENODATA;
2596
1f73aa00 2597 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
611c4f8a 2598 if (unit_has_host_root_cgroup(u))
1f73aa00
LP
2599 return procfs_memory_get_current(ret);
2600
efdb0237 2601 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
2602 return -ENODATA;
2603
b4cccbc1
LP
2604 r = cg_all_unified();
2605 if (r < 0)
2606 return r;
2607 if (r > 0)
efdb0237 2608 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
b4cccbc1
LP
2609 else
2610 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
5ad096b3
LP
2611 if (r == -ENOENT)
2612 return -ENODATA;
2613 if (r < 0)
2614 return r;
2615
2616 return safe_atou64(v, ret);
2617}
2618
03a7b521
LP
2619int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2620 _cleanup_free_ char *v = NULL;
2621 int r;
2622
2623 assert(u);
2624 assert(ret);
2625
2e4025c0 2626 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
cf3b4be1
LP
2627 return -ENODATA;
2628
03a7b521
LP
2629 if (!u->cgroup_path)
2630 return -ENODATA;
2631
c36a69f4 2632 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
611c4f8a 2633 if (unit_has_host_root_cgroup(u))
c36a69f4
LP
2634 return procfs_tasks_get_current(ret);
2635
1f73aa00
LP
2636 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2637 return -ENODATA;
2638
03a7b521
LP
2639 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2640 if (r == -ENOENT)
2641 return -ENODATA;
2642 if (r < 0)
2643 return r;
2644
2645 return safe_atou64(v, ret);
2646}
2647
5ad096b3
LP
2648static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2649 _cleanup_free_ char *v = NULL;
2650 uint64_t ns;
2651 int r;
2652
2653 assert(u);
2654 assert(ret);
2655
2656 if (!u->cgroup_path)
2657 return -ENODATA;
2658
1f73aa00 2659 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
611c4f8a 2660 if (unit_has_host_root_cgroup(u))
1f73aa00
LP
2661 return procfs_cpu_get_usage(ret);
2662
b4cccbc1
LP
2663 r = cg_all_unified();
2664 if (r < 0)
2665 return r;
f98c2585
CD
2666
2667 /* Requisite controllers for CPU accounting are not enabled */
2668 if ((get_cpu_accounting_mask() & ~u->cgroup_realized_mask) != 0)
2669 return -ENODATA;
2670
b4cccbc1 2671 if (r > 0) {
66ebf6c0
TH
2672 _cleanup_free_ char *val = NULL;
2673 uint64_t us;
5ad096b3 2674
b734a4ff 2675 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
66ebf6c0
TH
2676 if (r < 0)
2677 return r;
b734a4ff
LP
2678 if (IN_SET(r, -ENOENT, -ENXIO))
2679 return -ENODATA;
66ebf6c0
TH
2680
2681 r = safe_atou64(val, &us);
2682 if (r < 0)
2683 return r;
2684
2685 ns = us * NSEC_PER_USEC;
2686 } else {
66ebf6c0
TH
2687 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2688 if (r == -ENOENT)
2689 return -ENODATA;
2690 if (r < 0)
2691 return r;
2692
2693 r = safe_atou64(v, &ns);
2694 if (r < 0)
2695 return r;
2696 }
5ad096b3
LP
2697
2698 *ret = ns;
2699 return 0;
2700}
2701
2702int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2703 nsec_t ns;
2704 int r;
2705
fe700f46
LP
2706 assert(u);
2707
2708 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2709 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2710 * call this function with a NULL return value. */
2711
2e4025c0 2712 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
cf3b4be1
LP
2713 return -ENODATA;
2714
5ad096b3 2715 r = unit_get_cpu_usage_raw(u, &ns);
fe700f46
LP
2716 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2717 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2718 * cached value. */
2719
2720 if (ret)
2721 *ret = u->cpu_usage_last;
2722 return 0;
2723 }
5ad096b3
LP
2724 if (r < 0)
2725 return r;
2726
66ebf6c0
TH
2727 if (ns > u->cpu_usage_base)
2728 ns -= u->cpu_usage_base;
5ad096b3
LP
2729 else
2730 ns = 0;
2731
fe700f46
LP
2732 u->cpu_usage_last = ns;
2733 if (ret)
2734 *ret = ns;
2735
5ad096b3
LP
2736 return 0;
2737}
2738
906c06f6
DM
2739int unit_get_ip_accounting(
2740 Unit *u,
2741 CGroupIPAccountingMetric metric,
2742 uint64_t *ret) {
2743
6b659ed8 2744 uint64_t value;
906c06f6
DM
2745 int fd, r;
2746
2747 assert(u);
2748 assert(metric >= 0);
2749 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2750 assert(ret);
2751
2e4025c0 2752 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
cf3b4be1
LP
2753 return -ENODATA;
2754
906c06f6
DM
2755 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2756 u->ip_accounting_ingress_map_fd :
2757 u->ip_accounting_egress_map_fd;
906c06f6
DM
2758 if (fd < 0)
2759 return -ENODATA;
2760
2761 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
6b659ed8 2762 r = bpf_firewall_read_accounting(fd, &value, NULL);
906c06f6 2763 else
6b659ed8
LP
2764 r = bpf_firewall_read_accounting(fd, NULL, &value);
2765 if (r < 0)
2766 return r;
2767
2768 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2769 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2770 * ip_accounting_extra[] field, and add them in here transparently. */
2771
2772 *ret = value + u->ip_accounting_extra[metric];
906c06f6
DM
2773
2774 return r;
2775}
2776
2777int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
2778 nsec_t ns;
2779 int r;
2780
2781 assert(u);
2782
fe700f46
LP
2783 u->cpu_usage_last = NSEC_INFINITY;
2784
5ad096b3
LP
2785 r = unit_get_cpu_usage_raw(u, &ns);
2786 if (r < 0) {
66ebf6c0 2787 u->cpu_usage_base = 0;
5ad096b3 2788 return r;
b56c28c3 2789 }
2633eb83 2790
66ebf6c0 2791 u->cpu_usage_base = ns;
4ad49000 2792 return 0;
4fbf50b3
LP
2793}
2794
906c06f6
DM
2795int unit_reset_ip_accounting(Unit *u) {
2796 int r = 0, q = 0;
2797
2798 assert(u);
2799
2800 if (u->ip_accounting_ingress_map_fd >= 0)
2801 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2802
2803 if (u->ip_accounting_egress_map_fd >= 0)
2804 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2805
6b659ed8
LP
2806 zero(u->ip_accounting_extra);
2807
906c06f6
DM
2808 return r < 0 ? r : q;
2809}
2810
e7ab4d1a
LP
2811void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2812 assert(u);
2813
2814 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2815 return;
2816
2817 if (m == 0)
2818 return;
2819
538b4852
TH
2820 /* always invalidate compat pairs together */
2821 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2822 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2823
7cce4fb7
LP
2824 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2825 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2826
e00068e7 2827 if (FLAGS_SET(u->cgroup_invalidated_mask, m)) /* NOP? */
e7ab4d1a
LP
2828 return;
2829
e00068e7 2830 u->cgroup_invalidated_mask |= m;
91a6073e 2831 unit_add_to_cgroup_realize_queue(u);
e7ab4d1a
LP
2832}
2833
906c06f6
DM
2834void unit_invalidate_cgroup_bpf(Unit *u) {
2835 assert(u);
2836
2837 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2838 return;
2839
17f14955 2840 if (u->cgroup_invalidated_mask & CGROUP_MASK_BPF_FIREWALL) /* NOP? */
906c06f6
DM
2841 return;
2842
17f14955 2843 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
91a6073e 2844 unit_add_to_cgroup_realize_queue(u);
906c06f6
DM
2845
2846 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2847 * list of our children includes our own. */
2848 if (u->type == UNIT_SLICE) {
2849 Unit *member;
2850 Iterator i;
eef85c4a 2851 void *v;
906c06f6 2852
eef85c4a 2853 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
906c06f6
DM
2854 if (member == u)
2855 continue;
2856
2857 if (UNIT_DEREF(member->slice) != u)
2858 continue;
2859
2860 unit_invalidate_cgroup_bpf(member);
2861 }
2862 }
2863}
2864
1d9cc876
LP
2865bool unit_cgroup_delegate(Unit *u) {
2866 CGroupContext *c;
2867
2868 assert(u);
2869
2870 if (!UNIT_VTABLE(u)->can_delegate)
2871 return false;
2872
2873 c = unit_get_cgroup_context(u);
2874 if (!c)
2875 return false;
2876
2877 return c->delegate;
2878}
2879
e7ab4d1a
LP
2880void manager_invalidate_startup_units(Manager *m) {
2881 Iterator i;
2882 Unit *u;
2883
2884 assert(m);
2885
2886 SET_FOREACH(u, m->startup_units, i)
13c31542 2887 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
e7ab4d1a
LP
2888}
2889
4ad49000
LP
2890static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2891 [CGROUP_AUTO] = "auto",
2892 [CGROUP_CLOSED] = "closed",
2893 [CGROUP_STRICT] = "strict",
2894};
4fbf50b3 2895
4ad49000 2896DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);