]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/cgroup.c
Merge pull request #12420 from mrc0mmand/coccinelle-tweaks
[thirdparty/systemd.git] / src / core / cgroup.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <fcntl.h>
4 #include <fnmatch.h>
5
6 #include "sd-messages.h"
7
8 #include "alloc-util.h"
9 #include "blockdev-util.h"
10 #include "bpf-devices.h"
11 #include "bpf-firewall.h"
12 #include "btrfs-util.h"
13 #include "bus-error.h"
14 #include "cgroup-util.h"
15 #include "cgroup.h"
16 #include "fd-util.h"
17 #include "fileio.h"
18 #include "fs-util.h"
19 #include "nulstr-util.h"
20 #include "parse-util.h"
21 #include "path-util.h"
22 #include "process-util.h"
23 #include "procfs-util.h"
24 #include "special.h"
25 #include "stat-util.h"
26 #include "stdio-util.h"
27 #include "string-table.h"
28 #include "string-util.h"
29 #include "virt.h"
30
31 #define CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
32
33 /* Returns the log level to use when cgroup attribute writes fail. When an attribute is missing or we have access
34 * problems we downgrade to LOG_DEBUG. This is supposed to be nice to container managers and kernels which want to mask
35 * out specific attributes from us. */
36 #define LOG_LEVEL_CGROUP_WRITE(r) (IN_SET(abs(r), ENOENT, EROFS, EACCES, EPERM) ? LOG_DEBUG : LOG_WARNING)
37
38 bool manager_owns_host_root_cgroup(Manager *m) {
39 assert(m);
40
41 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
42 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
43 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
44 * we run in any kind of container virtualization. */
45
46 if (MANAGER_IS_USER(m))
47 return false;
48
49 if (detect_container() > 0)
50 return false;
51
52 return empty_or_root(m->cgroup_root);
53 }
54
55 bool unit_has_host_root_cgroup(Unit *u) {
56 assert(u);
57
58 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
59 * the manager manages the root cgroup. */
60
61 if (!manager_owns_host_root_cgroup(u->manager))
62 return false;
63
64 return unit_has_name(u, SPECIAL_ROOT_SLICE);
65 }
66
67 static int set_attribute_and_warn(Unit *u, const char *controller, const char *attribute, const char *value) {
68 int r;
69
70 r = cg_set_attribute(controller, u->cgroup_path, attribute, value);
71 if (r < 0)
72 log_unit_full(u, LOG_LEVEL_CGROUP_WRITE(r), r, "Failed to set '%s' attribute on '%s' to '%.*s': %m",
73 strna(attribute), isempty(u->cgroup_path) ? "/" : u->cgroup_path, (int) strcspn(value, NEWLINE), value);
74
75 return r;
76 }
77
78 static void cgroup_compat_warn(void) {
79 static bool cgroup_compat_warned = false;
80
81 if (cgroup_compat_warned)
82 return;
83
84 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
85 "See cgroup-compat debug messages for details.");
86
87 cgroup_compat_warned = true;
88 }
89
90 #define log_cgroup_compat(unit, fmt, ...) do { \
91 cgroup_compat_warn(); \
92 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
93 } while (false)
94
95 void cgroup_context_init(CGroupContext *c) {
96 assert(c);
97
98 /* Initialize everything to the kernel defaults. */
99
100 *c = (CGroupContext) {
101 .cpu_weight = CGROUP_WEIGHT_INVALID,
102 .startup_cpu_weight = CGROUP_WEIGHT_INVALID,
103 .cpu_quota_per_sec_usec = USEC_INFINITY,
104 .cpu_quota_period_usec = USEC_INFINITY,
105
106 .cpu_shares = CGROUP_CPU_SHARES_INVALID,
107 .startup_cpu_shares = CGROUP_CPU_SHARES_INVALID,
108
109 .memory_high = CGROUP_LIMIT_MAX,
110 .memory_max = CGROUP_LIMIT_MAX,
111 .memory_swap_max = CGROUP_LIMIT_MAX,
112
113 .memory_limit = CGROUP_LIMIT_MAX,
114
115 .io_weight = CGROUP_WEIGHT_INVALID,
116 .startup_io_weight = CGROUP_WEIGHT_INVALID,
117
118 .blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
119 .startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID,
120
121 .tasks_max = CGROUP_LIMIT_MAX,
122 };
123 }
124
125 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
126 assert(c);
127 assert(a);
128
129 LIST_REMOVE(device_allow, c->device_allow, a);
130 free(a->path);
131 free(a);
132 }
133
134 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
135 assert(c);
136 assert(w);
137
138 LIST_REMOVE(device_weights, c->io_device_weights, w);
139 free(w->path);
140 free(w);
141 }
142
143 void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l) {
144 assert(c);
145 assert(l);
146
147 LIST_REMOVE(device_latencies, c->io_device_latencies, l);
148 free(l->path);
149 free(l);
150 }
151
152 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
153 assert(c);
154 assert(l);
155
156 LIST_REMOVE(device_limits, c->io_device_limits, l);
157 free(l->path);
158 free(l);
159 }
160
161 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
162 assert(c);
163 assert(w);
164
165 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
166 free(w->path);
167 free(w);
168 }
169
170 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
171 assert(c);
172 assert(b);
173
174 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
175 free(b->path);
176 free(b);
177 }
178
179 void cgroup_context_done(CGroupContext *c) {
180 assert(c);
181
182 while (c->io_device_weights)
183 cgroup_context_free_io_device_weight(c, c->io_device_weights);
184
185 while (c->io_device_latencies)
186 cgroup_context_free_io_device_latency(c, c->io_device_latencies);
187
188 while (c->io_device_limits)
189 cgroup_context_free_io_device_limit(c, c->io_device_limits);
190
191 while (c->blockio_device_weights)
192 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
193
194 while (c->blockio_device_bandwidths)
195 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
196
197 while (c->device_allow)
198 cgroup_context_free_device_allow(c, c->device_allow);
199
200 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
201 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
202 }
203
204 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
205 _cleanup_free_ char *disable_controllers_str = NULL;
206 CGroupIODeviceLimit *il;
207 CGroupIODeviceWeight *iw;
208 CGroupIODeviceLatency *l;
209 CGroupBlockIODeviceBandwidth *b;
210 CGroupBlockIODeviceWeight *w;
211 CGroupDeviceAllow *a;
212 IPAddressAccessItem *iaai;
213 char u[FORMAT_TIMESPAN_MAX];
214 char v[FORMAT_TIMESPAN_MAX];
215
216 assert(c);
217 assert(f);
218
219 prefix = strempty(prefix);
220
221 (void) cg_mask_to_string(c->disable_controllers, &disable_controllers_str);
222
223 fprintf(f,
224 "%sCPUAccounting=%s\n"
225 "%sIOAccounting=%s\n"
226 "%sBlockIOAccounting=%s\n"
227 "%sMemoryAccounting=%s\n"
228 "%sTasksAccounting=%s\n"
229 "%sIPAccounting=%s\n"
230 "%sCPUWeight=%" PRIu64 "\n"
231 "%sStartupCPUWeight=%" PRIu64 "\n"
232 "%sCPUShares=%" PRIu64 "\n"
233 "%sStartupCPUShares=%" PRIu64 "\n"
234 "%sCPUQuotaPerSecSec=%s\n"
235 "%sCPUQuotaPeriodSec=%s\n"
236 "%sIOWeight=%" PRIu64 "\n"
237 "%sStartupIOWeight=%" PRIu64 "\n"
238 "%sBlockIOWeight=%" PRIu64 "\n"
239 "%sStartupBlockIOWeight=%" PRIu64 "\n"
240 "%sDefaultMemoryMin=%" PRIu64 "\n"
241 "%sDefaultMemoryLow=%" PRIu64 "\n"
242 "%sMemoryMin=%" PRIu64 "\n"
243 "%sMemoryLow=%" PRIu64 "\n"
244 "%sMemoryHigh=%" PRIu64 "\n"
245 "%sMemoryMax=%" PRIu64 "\n"
246 "%sMemorySwapMax=%" PRIu64 "\n"
247 "%sMemoryLimit=%" PRIu64 "\n"
248 "%sTasksMax=%" PRIu64 "\n"
249 "%sDevicePolicy=%s\n"
250 "%sDisableControllers=%s\n"
251 "%sDelegate=%s\n",
252 prefix, yes_no(c->cpu_accounting),
253 prefix, yes_no(c->io_accounting),
254 prefix, yes_no(c->blockio_accounting),
255 prefix, yes_no(c->memory_accounting),
256 prefix, yes_no(c->tasks_accounting),
257 prefix, yes_no(c->ip_accounting),
258 prefix, c->cpu_weight,
259 prefix, c->startup_cpu_weight,
260 prefix, c->cpu_shares,
261 prefix, c->startup_cpu_shares,
262 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
263 prefix, format_timespan(v, sizeof(v), c->cpu_quota_period_usec, 1),
264 prefix, c->io_weight,
265 prefix, c->startup_io_weight,
266 prefix, c->blockio_weight,
267 prefix, c->startup_blockio_weight,
268 prefix, c->default_memory_min,
269 prefix, c->default_memory_low,
270 prefix, c->memory_min,
271 prefix, c->memory_low,
272 prefix, c->memory_high,
273 prefix, c->memory_max,
274 prefix, c->memory_swap_max,
275 prefix, c->memory_limit,
276 prefix, c->tasks_max,
277 prefix, cgroup_device_policy_to_string(c->device_policy),
278 prefix, strnull(disable_controllers_str),
279 prefix, yes_no(c->delegate));
280
281 if (c->delegate) {
282 _cleanup_free_ char *t = NULL;
283
284 (void) cg_mask_to_string(c->delegate_controllers, &t);
285
286 fprintf(f, "%sDelegateControllers=%s\n",
287 prefix,
288 strempty(t));
289 }
290
291 LIST_FOREACH(device_allow, a, c->device_allow)
292 fprintf(f,
293 "%sDeviceAllow=%s %s%s%s\n",
294 prefix,
295 a->path,
296 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
297
298 LIST_FOREACH(device_weights, iw, c->io_device_weights)
299 fprintf(f,
300 "%sIODeviceWeight=%s %" PRIu64 "\n",
301 prefix,
302 iw->path,
303 iw->weight);
304
305 LIST_FOREACH(device_latencies, l, c->io_device_latencies)
306 fprintf(f,
307 "%sIODeviceLatencyTargetSec=%s %s\n",
308 prefix,
309 l->path,
310 format_timespan(u, sizeof(u), l->target_usec, 1));
311
312 LIST_FOREACH(device_limits, il, c->io_device_limits) {
313 char buf[FORMAT_BYTES_MAX];
314 CGroupIOLimitType type;
315
316 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
317 if (il->limits[type] != cgroup_io_limit_defaults[type])
318 fprintf(f,
319 "%s%s=%s %s\n",
320 prefix,
321 cgroup_io_limit_type_to_string(type),
322 il->path,
323 format_bytes(buf, sizeof(buf), il->limits[type]));
324 }
325
326 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
327 fprintf(f,
328 "%sBlockIODeviceWeight=%s %" PRIu64,
329 prefix,
330 w->path,
331 w->weight);
332
333 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
334 char buf[FORMAT_BYTES_MAX];
335
336 if (b->rbps != CGROUP_LIMIT_MAX)
337 fprintf(f,
338 "%sBlockIOReadBandwidth=%s %s\n",
339 prefix,
340 b->path,
341 format_bytes(buf, sizeof(buf), b->rbps));
342 if (b->wbps != CGROUP_LIMIT_MAX)
343 fprintf(f,
344 "%sBlockIOWriteBandwidth=%s %s\n",
345 prefix,
346 b->path,
347 format_bytes(buf, sizeof(buf), b->wbps));
348 }
349
350 LIST_FOREACH(items, iaai, c->ip_address_allow) {
351 _cleanup_free_ char *k = NULL;
352
353 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
354 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
355 }
356
357 LIST_FOREACH(items, iaai, c->ip_address_deny) {
358 _cleanup_free_ char *k = NULL;
359
360 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
361 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
362 }
363 }
364
365 int cgroup_add_device_allow(CGroupContext *c, const char *dev, const char *mode) {
366 _cleanup_free_ CGroupDeviceAllow *a = NULL;
367 _cleanup_free_ char *d = NULL;
368
369 assert(c);
370 assert(dev);
371 assert(isempty(mode) || in_charset(mode, "rwm"));
372
373 a = new(CGroupDeviceAllow, 1);
374 if (!a)
375 return -ENOMEM;
376
377 d = strdup(dev);
378 if (!d)
379 return -ENOMEM;
380
381 *a = (CGroupDeviceAllow) {
382 .path = TAKE_PTR(d),
383 .r = isempty(mode) || strchr(mode, 'r'),
384 .w = isempty(mode) || strchr(mode, 'w'),
385 .m = isempty(mode) || strchr(mode, 'm'),
386 };
387
388 LIST_PREPEND(device_allow, c->device_allow, a);
389 TAKE_PTR(a);
390
391 return 0;
392 }
393
394 #define UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(entry) \
395 uint64_t unit_get_ancestor_##entry(Unit *u) { \
396 CGroupContext *c; \
397 \
398 /* 1. Is entry set in this unit? If so, use that. \
399 * 2. Is the default for this entry set in any \
400 * ancestor? If so, use that. \
401 * 3. Otherwise, return CGROUP_LIMIT_MIN. */ \
402 \
403 assert(u); \
404 \
405 c = unit_get_cgroup_context(u); \
406 if (c && c->entry##_set) \
407 return c->entry; \
408 \
409 while ((u = UNIT_DEREF(u->slice))) { \
410 c = unit_get_cgroup_context(u); \
411 if (c && c->default_##entry##_set) \
412 return c->default_##entry; \
413 } \
414 \
415 /* We've reached the root, but nobody had default for \
416 * this entry set, so set it to the kernel default. */ \
417 return CGROUP_LIMIT_MIN; \
418 }
419
420 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_low);
421 UNIT_DEFINE_ANCESTOR_MEMORY_LOOKUP(memory_min);
422
423 static void cgroup_xattr_apply(Unit *u) {
424 char ids[SD_ID128_STRING_MAX];
425 int r;
426
427 assert(u);
428
429 if (!MANAGER_IS_SYSTEM(u->manager))
430 return;
431
432 if (sd_id128_is_null(u->invocation_id))
433 return;
434
435 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
436 "trusted.invocation_id",
437 sd_id128_to_string(u->invocation_id, ids), 32,
438 0);
439 if (r < 0)
440 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
441 }
442
443 static int lookup_block_device(const char *p, dev_t *ret) {
444 dev_t rdev, dev = 0;
445 mode_t mode;
446 int r;
447
448 assert(p);
449 assert(ret);
450
451 r = device_path_parse_major_minor(p, &mode, &rdev);
452 if (r == -ENODEV) { /* not a parsable device node, need to go to disk */
453 struct stat st;
454 if (stat(p, &st) < 0)
455 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
456 rdev = (dev_t)st.st_rdev;
457 dev = (dev_t)st.st_dev;
458 mode = st.st_mode;
459 } else if (r < 0)
460 return log_warning_errno(r, "Failed to parse major/minor from path '%s': %m", p);
461
462 if (S_ISCHR(mode)) {
463 log_warning("Device node '%s' is a character device, but block device needed.", p);
464 return -ENOTBLK;
465 } else if (S_ISBLK(mode))
466 *ret = rdev;
467 else if (major(dev) != 0)
468 *ret = dev; /* If this is not a device node then use the block device this file is stored on */
469 else {
470 /* If this is btrfs, getting the backing block device is a bit harder */
471 r = btrfs_get_block_device(p, ret);
472 if (r < 0 && r != -ENOTTY)
473 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
474 if (r == -ENOTTY) {
475 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
476 return -ENODEV;
477 }
478 }
479
480 /* If this is a LUKS device, try to get the originating block device */
481 (void) block_get_originating(*ret, ret);
482
483 /* If this is a partition, try to get the originating block device */
484 (void) block_get_whole_disk(*ret, ret);
485 return 0;
486 }
487
488 static int whitelist_device(BPFProgram *prog, const char *path, const char *node, const char *acc) {
489 dev_t rdev;
490 mode_t mode;
491 int r;
492
493 assert(path);
494 assert(acc);
495
496 /* Some special handling for /dev/block/%u:%u, /dev/char/%u:%u, /run/systemd/inaccessible/chr and
497 * /run/systemd/inaccessible/blk paths. Instead of stat()ing these we parse out the major/minor directly. This
498 * means clients can use these path without the device node actually around */
499 r = device_path_parse_major_minor(node, &mode, &rdev);
500 if (r < 0) {
501 if (r != -ENODEV)
502 return log_warning_errno(r, "Couldn't parse major/minor from device path '%s': %m", node);
503
504 struct stat st;
505 if (stat(node, &st) < 0)
506 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
507
508 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
509 log_warning("%s is not a device.", node);
510 return -ENODEV;
511 }
512 rdev = (dev_t) st.st_rdev;
513 mode = st.st_mode;
514 }
515
516 if (cg_all_unified() > 0) {
517 if (!prog)
518 return 0;
519
520 return cgroup_bpf_whitelist_device(prog, S_ISCHR(mode) ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK,
521 major(rdev), minor(rdev), acc);
522
523 } else {
524 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
525
526 sprintf(buf,
527 "%c %u:%u %s",
528 S_ISCHR(mode) ? 'c' : 'b',
529 major(rdev), minor(rdev),
530 acc);
531
532 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL here. */
533
534 r = cg_set_attribute("devices", path, "devices.allow", buf);
535 if (r < 0)
536 return log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING,
537 r, "Failed to set devices.allow on %s: %m", path);
538
539 return 0;
540 }
541 }
542
543 static int whitelist_major(BPFProgram *prog, const char *path, const char *name, char type, const char *acc) {
544 _cleanup_fclose_ FILE *f = NULL;
545 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4];
546 bool good = false;
547 unsigned maj;
548 int r;
549
550 assert(path);
551 assert(acc);
552 assert(IN_SET(type, 'b', 'c'));
553
554 if (streq(name, "*")) {
555 /* If the name is a wildcard, then apply this list to all devices of this type */
556
557 if (cg_all_unified() > 0) {
558 if (!prog)
559 return 0;
560
561 (void) cgroup_bpf_whitelist_class(prog, type == 'c' ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK, acc);
562 } else {
563 xsprintf(buf, "%c *:* %s", type, acc);
564
565 r = cg_set_attribute("devices", path, "devices.allow", buf);
566 if (r < 0)
567 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
568 "Failed to set devices.allow on %s: %m", path);
569 return 0;
570 }
571 }
572
573 if (safe_atou(name, &maj) >= 0 && DEVICE_MAJOR_VALID(maj)) {
574 /* The name is numeric and suitable as major. In that case, let's take is major, and create the entry
575 * directly */
576
577 if (cg_all_unified() > 0) {
578 if (!prog)
579 return 0;
580
581 (void) cgroup_bpf_whitelist_major(prog,
582 type == 'c' ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK,
583 maj, acc);
584 } else {
585 xsprintf(buf, "%c %u:* %s", type, maj, acc);
586
587 r = cg_set_attribute("devices", path, "devices.allow", buf);
588 if (r < 0)
589 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
590 "Failed to set devices.allow on %s: %m", path);
591 }
592
593 return 0;
594 }
595
596 f = fopen("/proc/devices", "re");
597 if (!f)
598 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
599
600 for (;;) {
601 _cleanup_free_ char *line = NULL;
602 char *w, *p;
603
604 r = read_line(f, LONG_LINE_MAX, &line);
605 if (r < 0)
606 return log_warning_errno(r, "Failed to read /proc/devices: %m");
607 if (r == 0)
608 break;
609
610 if (type == 'c' && streq(line, "Character devices:")) {
611 good = true;
612 continue;
613 }
614
615 if (type == 'b' && streq(line, "Block devices:")) {
616 good = true;
617 continue;
618 }
619
620 if (isempty(line)) {
621 good = false;
622 continue;
623 }
624
625 if (!good)
626 continue;
627
628 p = strstrip(line);
629
630 w = strpbrk(p, WHITESPACE);
631 if (!w)
632 continue;
633 *w = 0;
634
635 r = safe_atou(p, &maj);
636 if (r < 0)
637 continue;
638 if (maj <= 0)
639 continue;
640
641 w++;
642 w += strspn(w, WHITESPACE);
643
644 if (fnmatch(name, w, 0) != 0)
645 continue;
646
647 if (cg_all_unified() > 0) {
648 if (!prog)
649 continue;
650
651 (void) cgroup_bpf_whitelist_major(prog,
652 type == 'c' ? BPF_DEVCG_DEV_CHAR : BPF_DEVCG_DEV_BLOCK,
653 maj, acc);
654 } else {
655 sprintf(buf,
656 "%c %u:* %s",
657 type,
658 maj,
659 acc);
660
661 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL
662 * here. */
663
664 r = cg_set_attribute("devices", path, "devices.allow", buf);
665 if (r < 0)
666 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING,
667 r, "Failed to set devices.allow on %s: %m", path);
668 }
669 }
670
671 return 0;
672 }
673
674 static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
675 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
676 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
677 }
678
679 static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
680 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
681 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
682 }
683
684 static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
685 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
686 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
687 return c->startup_cpu_weight;
688 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
689 return c->cpu_weight;
690 else
691 return CGROUP_WEIGHT_DEFAULT;
692 }
693
694 static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
695 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
696 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
697 return c->startup_cpu_shares;
698 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
699 return c->cpu_shares;
700 else
701 return CGROUP_CPU_SHARES_DEFAULT;
702 }
703
704 usec_t cgroup_cpu_adjust_period(usec_t period, usec_t quota, usec_t resolution, usec_t max_period) {
705 /* kernel uses a minimum resolution of 1ms, so both period and (quota * period)
706 * need to be higher than that boundary. quota is specified in USecPerSec.
707 * Additionally, period must be at most max_period. */
708 assert(quota > 0);
709
710 return MIN(MAX3(period, resolution, resolution * USEC_PER_SEC / quota), max_period);
711 }
712
713 static usec_t cgroup_cpu_adjust_period_and_log(Unit *u, usec_t period, usec_t quota) {
714 usec_t new_period;
715
716 if (quota == USEC_INFINITY)
717 /* Always use default period for infinity quota. */
718 return CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC;
719
720 if (period == USEC_INFINITY)
721 /* Default period was requested. */
722 period = CGROUP_CPU_QUOTA_DEFAULT_PERIOD_USEC;
723
724 /* Clamp to interval [1ms, 1s] */
725 new_period = cgroup_cpu_adjust_period(period, quota, USEC_PER_MSEC, USEC_PER_SEC);
726
727 if (new_period != period) {
728 char v[FORMAT_TIMESPAN_MAX];
729 log_unit_full(u, u->warned_clamping_cpu_quota_period ? LOG_DEBUG : LOG_WARNING, 0,
730 "Clamping CPU interval for cpu.max: period is now %s",
731 format_timespan(v, sizeof(v), new_period, 1));
732 u->warned_clamping_cpu_quota_period = true;
733 }
734
735 return new_period;
736 }
737
738 static void cgroup_apply_unified_cpu_weight(Unit *u, uint64_t weight) {
739 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
740
741 xsprintf(buf, "%" PRIu64 "\n", weight);
742 (void) set_attribute_and_warn(u, "cpu", "cpu.weight", buf);
743 }
744
745 static void cgroup_apply_unified_cpu_quota(Unit *u, usec_t quota, usec_t period) {
746 char buf[(DECIMAL_STR_MAX(usec_t) + 1) * 2 + 1];
747
748 period = cgroup_cpu_adjust_period_and_log(u, period, quota);
749 if (quota != USEC_INFINITY)
750 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
751 MAX(quota * period / USEC_PER_SEC, USEC_PER_MSEC), period);
752 else
753 xsprintf(buf, "max " USEC_FMT "\n", period);
754 (void) set_attribute_and_warn(u, "cpu", "cpu.max", buf);
755 }
756
757 static void cgroup_apply_legacy_cpu_shares(Unit *u, uint64_t shares) {
758 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
759
760 xsprintf(buf, "%" PRIu64 "\n", shares);
761 (void) set_attribute_and_warn(u, "cpu", "cpu.shares", buf);
762 }
763
764 static void cgroup_apply_legacy_cpu_quota(Unit *u, usec_t quota, usec_t period) {
765 char buf[DECIMAL_STR_MAX(usec_t) + 2];
766
767 period = cgroup_cpu_adjust_period_and_log(u, period, quota);
768
769 xsprintf(buf, USEC_FMT "\n", period);
770 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_period_us", buf);
771
772 if (quota != USEC_INFINITY) {
773 xsprintf(buf, USEC_FMT "\n", MAX(quota * period / USEC_PER_SEC, USEC_PER_MSEC));
774 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", buf);
775 } else
776 (void) set_attribute_and_warn(u, "cpu", "cpu.cfs_quota_us", "-1\n");
777 }
778
779 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
780 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
781 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
782 }
783
784 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
785 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
786 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
787 }
788
789 static bool cgroup_context_has_io_config(CGroupContext *c) {
790 return c->io_accounting ||
791 c->io_weight != CGROUP_WEIGHT_INVALID ||
792 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
793 c->io_device_weights ||
794 c->io_device_latencies ||
795 c->io_device_limits;
796 }
797
798 static bool cgroup_context_has_blockio_config(CGroupContext *c) {
799 return c->blockio_accounting ||
800 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
801 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
802 c->blockio_device_weights ||
803 c->blockio_device_bandwidths;
804 }
805
806 static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
807 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
808 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
809 return c->startup_io_weight;
810 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
811 return c->io_weight;
812 else
813 return CGROUP_WEIGHT_DEFAULT;
814 }
815
816 static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
817 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
818 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
819 return c->startup_blockio_weight;
820 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
821 return c->blockio_weight;
822 else
823 return CGROUP_BLKIO_WEIGHT_DEFAULT;
824 }
825
826 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
827 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
828 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
829 }
830
831 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
832 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
833 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
834 }
835
836 static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
837 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
838 dev_t dev;
839 int r;
840
841 r = lookup_block_device(dev_path, &dev);
842 if (r < 0)
843 return;
844
845 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
846 (void) set_attribute_and_warn(u, "io", "io.weight", buf);
847 }
848
849 static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
850 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
851 dev_t dev;
852 int r;
853
854 r = lookup_block_device(dev_path, &dev);
855 if (r < 0)
856 return;
857
858 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
859 (void) set_attribute_and_warn(u, "blkio", "blkio.weight_device", buf);
860 }
861
862 static void cgroup_apply_io_device_latency(Unit *u, const char *dev_path, usec_t target) {
863 char buf[DECIMAL_STR_MAX(dev_t)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
864 dev_t dev;
865 int r;
866
867 r = lookup_block_device(dev_path, &dev);
868 if (r < 0)
869 return;
870
871 if (target != USEC_INFINITY)
872 xsprintf(buf, "%u:%u target=%" PRIu64 "\n", major(dev), minor(dev), target);
873 else
874 xsprintf(buf, "%u:%u target=max\n", major(dev), minor(dev));
875
876 (void) set_attribute_and_warn(u, "io", "io.latency", buf);
877 }
878
879 static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
880 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
881 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
882 CGroupIOLimitType type;
883 dev_t dev;
884 int r;
885
886 r = lookup_block_device(dev_path, &dev);
887 if (r < 0)
888 return;
889
890 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
891 if (limits[type] != cgroup_io_limit_defaults[type])
892 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
893 else
894 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
895
896 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
897 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
898 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
899 (void) set_attribute_and_warn(u, "io", "io.max", buf);
900 }
901
902 static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
903 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
904 dev_t dev;
905 int r;
906
907 r = lookup_block_device(dev_path, &dev);
908 if (r < 0)
909 return;
910
911 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
912 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.read_bps_device", buf);
913
914 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
915 (void) set_attribute_and_warn(u, "blkio", "blkio.throttle.write_bps_device", buf);
916 }
917
918 static bool unit_has_unified_memory_config(Unit *u) {
919 CGroupContext *c;
920
921 assert(u);
922
923 c = unit_get_cgroup_context(u);
924 assert(c);
925
926 return c->memory_min > 0 || unit_get_ancestor_memory_low(u) > 0 ||
927 c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX ||
928 c->memory_swap_max != CGROUP_LIMIT_MAX;
929 }
930
931 static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
932 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max\n";
933
934 if (v != CGROUP_LIMIT_MAX)
935 xsprintf(buf, "%" PRIu64 "\n", v);
936
937 (void) set_attribute_and_warn(u, "memory", file, buf);
938 }
939
940 static void cgroup_apply_firewall(Unit *u) {
941 assert(u);
942
943 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
944
945 if (bpf_firewall_compile(u) < 0)
946 return;
947
948 (void) bpf_firewall_install(u);
949 }
950
951 static void cgroup_context_apply(
952 Unit *u,
953 CGroupMask apply_mask,
954 ManagerState state) {
955
956 const char *path;
957 CGroupContext *c;
958 bool is_host_root, is_local_root;
959 int r;
960
961 assert(u);
962
963 /* Nothing to do? Exit early! */
964 if (apply_mask == 0)
965 return;
966
967 /* Some cgroup attributes are not supported on the host root cgroup, hence silently ignore them here. And other
968 * attributes should only be managed for cgroups further down the tree. */
969 is_local_root = unit_has_name(u, SPECIAL_ROOT_SLICE);
970 is_host_root = unit_has_host_root_cgroup(u);
971
972 assert_se(c = unit_get_cgroup_context(u));
973 assert_se(path = u->cgroup_path);
974
975 if (is_local_root) /* Make sure we don't try to display messages with an empty path. */
976 path = "/";
977
978 /* We generally ignore errors caused by read-only mounted cgroup trees (assuming we are running in a container
979 * then), and missing cgroups, i.e. EROFS and ENOENT. */
980
981 /* In fully unified mode these attributes don't exist on the host cgroup root. On legacy the weights exist, but
982 * setting the weight makes very little sense on the host root cgroup, as there are no other cgroups at this
983 * level. The quota exists there too, but any attempt to write to it is refused with EINVAL. Inside of
984 * containers we want to leave control of these to the container manager (and if cgroup v2 delegation is used
985 * we couldn't even write to them if we wanted to). */
986 if ((apply_mask & CGROUP_MASK_CPU) && !is_local_root) {
987
988 if (cg_all_unified() > 0) {
989 uint64_t weight;
990
991 if (cgroup_context_has_cpu_weight(c))
992 weight = cgroup_context_cpu_weight(c, state);
993 else if (cgroup_context_has_cpu_shares(c)) {
994 uint64_t shares;
995
996 shares = cgroup_context_cpu_shares(c, state);
997 weight = cgroup_cpu_shares_to_weight(shares);
998
999 log_cgroup_compat(u, "Applying [Startup]CPUShares=%" PRIu64 " as [Startup]CPUWeight=%" PRIu64 " on %s",
1000 shares, weight, path);
1001 } else
1002 weight = CGROUP_WEIGHT_DEFAULT;
1003
1004 cgroup_apply_unified_cpu_weight(u, weight);
1005 cgroup_apply_unified_cpu_quota(u, c->cpu_quota_per_sec_usec, c->cpu_quota_period_usec);
1006
1007 } else {
1008 uint64_t shares;
1009
1010 if (cgroup_context_has_cpu_weight(c)) {
1011 uint64_t weight;
1012
1013 weight = cgroup_context_cpu_weight(c, state);
1014 shares = cgroup_cpu_weight_to_shares(weight);
1015
1016 log_cgroup_compat(u, "Applying [Startup]CPUWeight=%" PRIu64 " as [Startup]CPUShares=%" PRIu64 " on %s",
1017 weight, shares, path);
1018 } else if (cgroup_context_has_cpu_shares(c))
1019 shares = cgroup_context_cpu_shares(c, state);
1020 else
1021 shares = CGROUP_CPU_SHARES_DEFAULT;
1022
1023 cgroup_apply_legacy_cpu_shares(u, shares);
1024 cgroup_apply_legacy_cpu_quota(u, c->cpu_quota_per_sec_usec, c->cpu_quota_period_usec);
1025 }
1026 }
1027
1028 /* The 'io' controller attributes are not exported on the host's root cgroup (being a pure cgroup v2
1029 * controller), and in case of containers we want to leave control of these attributes to the container manager
1030 * (and we couldn't access that stuff anyway, even if we tried if proper delegation is used). */
1031 if ((apply_mask & CGROUP_MASK_IO) && !is_local_root) {
1032 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
1033 bool has_io, has_blockio;
1034 uint64_t weight;
1035
1036 has_io = cgroup_context_has_io_config(c);
1037 has_blockio = cgroup_context_has_blockio_config(c);
1038
1039 if (has_io)
1040 weight = cgroup_context_io_weight(c, state);
1041 else if (has_blockio) {
1042 uint64_t blkio_weight;
1043
1044 blkio_weight = cgroup_context_blkio_weight(c, state);
1045 weight = cgroup_weight_blkio_to_io(blkio_weight);
1046
1047 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight=%" PRIu64 " as [Startup]IOWeight=%" PRIu64,
1048 blkio_weight, weight);
1049 } else
1050 weight = CGROUP_WEIGHT_DEFAULT;
1051
1052 xsprintf(buf, "default %" PRIu64 "\n", weight);
1053 (void) set_attribute_and_warn(u, "io", "io.weight", buf);
1054
1055 if (has_io) {
1056 CGroupIODeviceLatency *latency;
1057 CGroupIODeviceLimit *limit;
1058 CGroupIODeviceWeight *w;
1059
1060 LIST_FOREACH(device_weights, w, c->io_device_weights)
1061 cgroup_apply_io_device_weight(u, w->path, w->weight);
1062
1063 LIST_FOREACH(device_limits, limit, c->io_device_limits)
1064 cgroup_apply_io_device_limit(u, limit->path, limit->limits);
1065
1066 LIST_FOREACH(device_latencies, latency, c->io_device_latencies)
1067 cgroup_apply_io_device_latency(u, latency->path, latency->target_usec);
1068
1069 } else if (has_blockio) {
1070 CGroupBlockIODeviceWeight *w;
1071 CGroupBlockIODeviceBandwidth *b;
1072
1073 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
1074 weight = cgroup_weight_blkio_to_io(w->weight);
1075
1076 log_cgroup_compat(u, "Applying BlockIODeviceWeight=%" PRIu64 " as IODeviceWeight=%" PRIu64 " for %s",
1077 w->weight, weight, w->path);
1078
1079 cgroup_apply_io_device_weight(u, w->path, weight);
1080 }
1081
1082 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
1083 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
1084 CGroupIOLimitType type;
1085
1086 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
1087 limits[type] = cgroup_io_limit_defaults[type];
1088
1089 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
1090 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
1091
1092 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth=%" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax= for %s",
1093 b->rbps, b->wbps, b->path);
1094
1095 cgroup_apply_io_device_limit(u, b->path, limits);
1096 }
1097 }
1098 }
1099
1100 if (apply_mask & CGROUP_MASK_BLKIO) {
1101 bool has_io, has_blockio;
1102
1103 has_io = cgroup_context_has_io_config(c);
1104 has_blockio = cgroup_context_has_blockio_config(c);
1105
1106 /* Applying a 'weight' never makes sense for the host root cgroup, and for containers this should be
1107 * left to our container manager, too. */
1108 if (!is_local_root) {
1109 char buf[DECIMAL_STR_MAX(uint64_t)+1];
1110 uint64_t weight;
1111
1112 if (has_io) {
1113 uint64_t io_weight;
1114
1115 io_weight = cgroup_context_io_weight(c, state);
1116 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
1117
1118 log_cgroup_compat(u, "Applying [Startup]IOWeight=%" PRIu64 " as [Startup]BlockIOWeight=%" PRIu64,
1119 io_weight, weight);
1120 } else if (has_blockio)
1121 weight = cgroup_context_blkio_weight(c, state);
1122 else
1123 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
1124
1125 xsprintf(buf, "%" PRIu64 "\n", weight);
1126 (void) set_attribute_and_warn(u, "blkio", "blkio.weight", buf);
1127
1128 if (has_io) {
1129 CGroupIODeviceWeight *w;
1130
1131 LIST_FOREACH(device_weights, w, c->io_device_weights) {
1132 weight = cgroup_weight_io_to_blkio(w->weight);
1133
1134 log_cgroup_compat(u, "Applying IODeviceWeight=%" PRIu64 " as BlockIODeviceWeight=%" PRIu64 " for %s",
1135 w->weight, weight, w->path);
1136
1137 cgroup_apply_blkio_device_weight(u, w->path, weight);
1138 }
1139 } else if (has_blockio) {
1140 CGroupBlockIODeviceWeight *w;
1141
1142 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
1143 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
1144 }
1145 }
1146
1147 /* The bandwidth limits are something that make sense to be applied to the host's root but not container
1148 * roots, as there we want the container manager to handle it */
1149 if (is_host_root || !is_local_root) {
1150 if (has_io) {
1151 CGroupIODeviceLimit *l;
1152
1153 LIST_FOREACH(device_limits, l, c->io_device_limits) {
1154 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth=%" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax= for %s",
1155 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
1156
1157 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
1158 }
1159 } else if (has_blockio) {
1160 CGroupBlockIODeviceBandwidth *b;
1161
1162 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
1163 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
1164 }
1165 }
1166 }
1167
1168 /* In unified mode 'memory' attributes do not exist on the root cgroup. In legacy mode 'memory.limit_in_bytes'
1169 * exists on the root cgroup, but any writes to it are refused with EINVAL. And if we run in a container we
1170 * want to leave control to the container manager (and if proper cgroup v2 delegation is used we couldn't even
1171 * write to this if we wanted to.) */
1172 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_local_root) {
1173
1174 if (cg_all_unified() > 0) {
1175 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
1176
1177 if (unit_has_unified_memory_config(u)) {
1178 max = c->memory_max;
1179 swap_max = c->memory_swap_max;
1180 } else {
1181 max = c->memory_limit;
1182
1183 if (max != CGROUP_LIMIT_MAX)
1184 log_cgroup_compat(u, "Applying MemoryLimit=%" PRIu64 " as MemoryMax=", max);
1185 }
1186
1187 cgroup_apply_unified_memory_limit(u, "memory.min", c->memory_min);
1188 cgroup_apply_unified_memory_limit(u, "memory.low", unit_get_ancestor_memory_low(u));
1189 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
1190 cgroup_apply_unified_memory_limit(u, "memory.max", max);
1191 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
1192
1193 (void) set_attribute_and_warn(u, "memory", "memory.oom.group", one_zero(c->memory_oom_group));
1194
1195 } else {
1196 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
1197 uint64_t val;
1198
1199 if (unit_has_unified_memory_config(u)) {
1200 val = c->memory_max;
1201 log_cgroup_compat(u, "Applying MemoryMax=%" PRIi64 " as MemoryLimit=", val);
1202 } else
1203 val = c->memory_limit;
1204
1205 if (val == CGROUP_LIMIT_MAX)
1206 strncpy(buf, "-1\n", sizeof(buf));
1207 else
1208 xsprintf(buf, "%" PRIu64 "\n", val);
1209
1210 (void) set_attribute_and_warn(u, "memory", "memory.limit_in_bytes", buf);
1211 }
1212 }
1213
1214 /* On cgroup v2 we can apply BPF everywhere. On cgroup v1 we apply it everywhere except for the root of
1215 * containers, where we leave this to the manager */
1216 if ((apply_mask & (CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES)) &&
1217 (is_host_root || cg_all_unified() > 0 || !is_local_root)) {
1218 _cleanup_(bpf_program_unrefp) BPFProgram *prog = NULL;
1219 CGroupDeviceAllow *a;
1220
1221 if (cg_all_unified() > 0) {
1222 r = cgroup_init_device_bpf(&prog, c->device_policy, c->device_allow);
1223 if (r < 0)
1224 log_unit_warning_errno(u, r, "Failed to initialize device control bpf program: %m");
1225 } else {
1226 /* Changing the devices list of a populated cgroup might result in EINVAL, hence ignore EINVAL
1227 * here. */
1228
1229 if (c->device_allow || c->device_policy != CGROUP_AUTO)
1230 r = cg_set_attribute("devices", path, "devices.deny", "a");
1231 else
1232 r = cg_set_attribute("devices", path, "devices.allow", "a");
1233 if (r < 0)
1234 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES, -EPERM) ? LOG_DEBUG : LOG_WARNING, r,
1235 "Failed to reset devices.allow/devices.deny: %m");
1236 }
1237
1238 if (c->device_policy == CGROUP_CLOSED ||
1239 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
1240 static const char auto_devices[] =
1241 "/dev/null\0" "rwm\0"
1242 "/dev/zero\0" "rwm\0"
1243 "/dev/full\0" "rwm\0"
1244 "/dev/random\0" "rwm\0"
1245 "/dev/urandom\0" "rwm\0"
1246 "/dev/tty\0" "rwm\0"
1247 "/dev/ptmx\0" "rwm\0"
1248 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
1249 "/run/systemd/inaccessible/chr\0" "rwm\0"
1250 "/run/systemd/inaccessible/blk\0" "rwm\0";
1251
1252 const char *x, *y;
1253
1254 NULSTR_FOREACH_PAIR(x, y, auto_devices)
1255 (void) whitelist_device(prog, path, x, y);
1256
1257 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
1258 (void) whitelist_major(prog, path, "pts", 'c', "rw");
1259 }
1260
1261 LIST_FOREACH(device_allow, a, c->device_allow) {
1262 char acc[4], *val;
1263 unsigned k = 0;
1264
1265 if (a->r)
1266 acc[k++] = 'r';
1267 if (a->w)
1268 acc[k++] = 'w';
1269 if (a->m)
1270 acc[k++] = 'm';
1271
1272 if (k == 0)
1273 continue;
1274
1275 acc[k++] = 0;
1276
1277 if (path_startswith(a->path, "/dev/"))
1278 (void) whitelist_device(prog, path, a->path, acc);
1279 else if ((val = startswith(a->path, "block-")))
1280 (void) whitelist_major(prog, path, val, 'b', acc);
1281 else if ((val = startswith(a->path, "char-")))
1282 (void) whitelist_major(prog, path, val, 'c', acc);
1283 else
1284 log_unit_debug(u, "Ignoring device '%s' while writing cgroup attribute.", a->path);
1285 }
1286
1287 r = cgroup_apply_device_bpf(u, prog, c->device_policy, c->device_allow);
1288 if (r < 0) {
1289 static bool warned = false;
1290
1291 log_full_errno(warned ? LOG_DEBUG : LOG_WARNING, r,
1292 "Unit %s configures device ACL, but the local system doesn't seem to support the BPF-based device controller.\n"
1293 "Proceeding WITHOUT applying ACL (all devices will be accessible)!\n"
1294 "(This warning is only shown for the first loaded unit using device ACL.)", u->id);
1295
1296 warned = true;
1297 }
1298 }
1299
1300 if (apply_mask & CGROUP_MASK_PIDS) {
1301
1302 if (is_host_root) {
1303 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1304 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1305 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1306 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1307 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1308 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1309 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1310 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1311 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1312 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1313 * which is desirable so that there's an official way to release control of the sysctl from
1314 * systemd: set the limit to unbounded and reload. */
1315
1316 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1317 u->manager->sysctl_pid_max_changed = true;
1318 r = procfs_tasks_set_limit(c->tasks_max);
1319 } else if (u->manager->sysctl_pid_max_changed)
1320 r = procfs_tasks_set_limit(TASKS_MAX);
1321 else
1322 r = 0;
1323 if (r < 0)
1324 log_unit_full(u, LOG_LEVEL_CGROUP_WRITE(r), r,
1325 "Failed to write to tasks limit sysctls: %m");
1326 }
1327
1328 /* The attribute itself is not available on the host root cgroup, and in the container case we want to
1329 * leave it for the container manager. */
1330 if (!is_local_root) {
1331 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1332 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
1333
1334 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1335 (void) set_attribute_and_warn(u, "pids", "pids.max", buf);
1336 } else
1337 (void) set_attribute_and_warn(u, "pids", "pids.max", "max\n");
1338 }
1339 }
1340
1341 if (apply_mask & CGROUP_MASK_BPF_FIREWALL)
1342 cgroup_apply_firewall(u);
1343 }
1344
1345 static bool unit_get_needs_bpf_firewall(Unit *u) {
1346 CGroupContext *c;
1347 Unit *p;
1348 assert(u);
1349
1350 c = unit_get_cgroup_context(u);
1351 if (!c)
1352 return false;
1353
1354 if (c->ip_accounting ||
1355 c->ip_address_allow ||
1356 c->ip_address_deny)
1357 return true;
1358
1359 /* If any parent slice has an IP access list defined, it applies too */
1360 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1361 c = unit_get_cgroup_context(p);
1362 if (!c)
1363 return false;
1364
1365 if (c->ip_address_allow ||
1366 c->ip_address_deny)
1367 return true;
1368 }
1369
1370 return false;
1371 }
1372
1373 static CGroupMask unit_get_cgroup_mask(Unit *u) {
1374 CGroupMask mask = 0;
1375 CGroupContext *c;
1376
1377 assert(u);
1378
1379 c = unit_get_cgroup_context(u);
1380
1381 /* Figure out which controllers we need, based on the cgroup context object */
1382
1383 if (c->cpu_accounting)
1384 mask |= get_cpu_accounting_mask();
1385
1386 if (cgroup_context_has_cpu_weight(c) ||
1387 cgroup_context_has_cpu_shares(c) ||
1388 c->cpu_quota_per_sec_usec != USEC_INFINITY)
1389 mask |= CGROUP_MASK_CPU;
1390
1391 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1392 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
1393
1394 if (c->memory_accounting ||
1395 c->memory_limit != CGROUP_LIMIT_MAX ||
1396 unit_has_unified_memory_config(u))
1397 mask |= CGROUP_MASK_MEMORY;
1398
1399 if (c->device_allow ||
1400 c->device_policy != CGROUP_AUTO)
1401 mask |= CGROUP_MASK_DEVICES | CGROUP_MASK_BPF_DEVICES;
1402
1403 if (c->tasks_accounting ||
1404 c->tasks_max != CGROUP_LIMIT_MAX)
1405 mask |= CGROUP_MASK_PIDS;
1406
1407 return CGROUP_MASK_EXTEND_JOINED(mask);
1408 }
1409
1410 static CGroupMask unit_get_bpf_mask(Unit *u) {
1411 CGroupMask mask = 0;
1412
1413 /* Figure out which controllers we need, based on the cgroup context, possibly taking into account children
1414 * too. */
1415
1416 if (unit_get_needs_bpf_firewall(u))
1417 mask |= CGROUP_MASK_BPF_FIREWALL;
1418
1419 return mask;
1420 }
1421
1422 CGroupMask unit_get_own_mask(Unit *u) {
1423 CGroupContext *c;
1424
1425 /* Returns the mask of controllers the unit needs for itself. If a unit is not properly loaded, return an empty
1426 * mask, as we shouldn't reflect it in the cgroup hierarchy then. */
1427
1428 if (u->load_state != UNIT_LOADED)
1429 return 0;
1430
1431 c = unit_get_cgroup_context(u);
1432 if (!c)
1433 return 0;
1434
1435 return (unit_get_cgroup_mask(u) | unit_get_bpf_mask(u) | unit_get_delegate_mask(u)) & ~unit_get_ancestor_disable_mask(u);
1436 }
1437
1438 CGroupMask unit_get_delegate_mask(Unit *u) {
1439 CGroupContext *c;
1440
1441 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1442 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1443 *
1444 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1445
1446 if (!unit_cgroup_delegate(u))
1447 return 0;
1448
1449 if (cg_all_unified() <= 0) {
1450 ExecContext *e;
1451
1452 e = unit_get_exec_context(u);
1453 if (e && !exec_context_maintains_privileges(e))
1454 return 0;
1455 }
1456
1457 assert_se(c = unit_get_cgroup_context(u));
1458 return CGROUP_MASK_EXTEND_JOINED(c->delegate_controllers);
1459 }
1460
1461 CGroupMask unit_get_members_mask(Unit *u) {
1462 assert(u);
1463
1464 /* Returns the mask of controllers all of the unit's children require, merged */
1465
1466 if (u->cgroup_members_mask_valid)
1467 return u->cgroup_members_mask; /* Use cached value if possible */
1468
1469 u->cgroup_members_mask = 0;
1470
1471 if (u->type == UNIT_SLICE) {
1472 void *v;
1473 Unit *member;
1474 Iterator i;
1475
1476 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
1477 if (UNIT_DEREF(member->slice) == u)
1478 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
1479 }
1480 }
1481
1482 u->cgroup_members_mask_valid = true;
1483 return u->cgroup_members_mask;
1484 }
1485
1486 CGroupMask unit_get_siblings_mask(Unit *u) {
1487 assert(u);
1488
1489 /* Returns the mask of controllers all of the unit's siblings
1490 * require, i.e. the members mask of the unit's parent slice
1491 * if there is one. */
1492
1493 if (UNIT_ISSET(u->slice))
1494 return unit_get_members_mask(UNIT_DEREF(u->slice));
1495
1496 return unit_get_subtree_mask(u); /* we are the top-level slice */
1497 }
1498
1499 CGroupMask unit_get_disable_mask(Unit *u) {
1500 CGroupContext *c;
1501
1502 c = unit_get_cgroup_context(u);
1503 if (!c)
1504 return 0;
1505
1506 return c->disable_controllers;
1507 }
1508
1509 CGroupMask unit_get_ancestor_disable_mask(Unit *u) {
1510 CGroupMask mask;
1511
1512 assert(u);
1513 mask = unit_get_disable_mask(u);
1514
1515 /* Returns the mask of controllers which are marked as forcibly
1516 * disabled in any ancestor unit or the unit in question. */
1517
1518 if (UNIT_ISSET(u->slice))
1519 mask |= unit_get_ancestor_disable_mask(UNIT_DEREF(u->slice));
1520
1521 return mask;
1522 }
1523
1524 CGroupMask unit_get_subtree_mask(Unit *u) {
1525
1526 /* Returns the mask of this subtree, meaning of the group
1527 * itself and its children. */
1528
1529 return unit_get_own_mask(u) | unit_get_members_mask(u);
1530 }
1531
1532 CGroupMask unit_get_target_mask(Unit *u) {
1533 CGroupMask mask;
1534
1535 /* This returns the cgroup mask of all controllers to enable
1536 * for a specific cgroup, i.e. everything it needs itself,
1537 * plus all that its children need, plus all that its siblings
1538 * need. This is primarily useful on the legacy cgroup
1539 * hierarchy, where we need to duplicate each cgroup in each
1540 * hierarchy that shall be enabled for it. */
1541
1542 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1543 mask &= u->manager->cgroup_supported;
1544 mask &= ~unit_get_ancestor_disable_mask(u);
1545
1546 return mask;
1547 }
1548
1549 CGroupMask unit_get_enable_mask(Unit *u) {
1550 CGroupMask mask;
1551
1552 /* This returns the cgroup mask of all controllers to enable
1553 * for the children of a specific cgroup. This is primarily
1554 * useful for the unified cgroup hierarchy, where each cgroup
1555 * controls which controllers are enabled for its children. */
1556
1557 mask = unit_get_members_mask(u);
1558 mask &= u->manager->cgroup_supported;
1559 mask &= ~unit_get_ancestor_disable_mask(u);
1560
1561 return mask;
1562 }
1563
1564 void unit_invalidate_cgroup_members_masks(Unit *u) {
1565 assert(u);
1566
1567 /* Recurse invalidate the member masks cache all the way up the tree */
1568 u->cgroup_members_mask_valid = false;
1569
1570 if (UNIT_ISSET(u->slice))
1571 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
1572 }
1573
1574 const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
1575
1576 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
1577
1578 while (u) {
1579
1580 if (u->cgroup_path &&
1581 u->cgroup_realized &&
1582 FLAGS_SET(u->cgroup_realized_mask, mask))
1583 return u->cgroup_path;
1584
1585 u = UNIT_DEREF(u->slice);
1586 }
1587
1588 return NULL;
1589 }
1590
1591 static const char *migrate_callback(CGroupMask mask, void *userdata) {
1592 return unit_get_realized_cgroup_path(userdata, mask);
1593 }
1594
1595 char *unit_default_cgroup_path(const Unit *u) {
1596 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1597 int r;
1598
1599 assert(u);
1600
1601 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1602 return strdup(u->manager->cgroup_root);
1603
1604 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1605 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1606 if (r < 0)
1607 return NULL;
1608 }
1609
1610 escaped = cg_escape(u->id);
1611 if (!escaped)
1612 return NULL;
1613
1614 if (slice)
1615 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1616 escaped);
1617 else
1618 return strjoin(u->manager->cgroup_root, "/", escaped);
1619 }
1620
1621 int unit_set_cgroup_path(Unit *u, const char *path) {
1622 _cleanup_free_ char *p = NULL;
1623 int r;
1624
1625 assert(u);
1626
1627 if (streq_ptr(u->cgroup_path, path))
1628 return 0;
1629
1630 if (path) {
1631 p = strdup(path);
1632 if (!p)
1633 return -ENOMEM;
1634 }
1635
1636 if (p) {
1637 r = hashmap_put(u->manager->cgroup_unit, p, u);
1638 if (r < 0)
1639 return r;
1640 }
1641
1642 unit_release_cgroup(u);
1643 u->cgroup_path = TAKE_PTR(p);
1644
1645 return 1;
1646 }
1647
1648 int unit_watch_cgroup(Unit *u) {
1649 _cleanup_free_ char *events = NULL;
1650 int r;
1651
1652 assert(u);
1653
1654 /* Watches the "cgroups.events" attribute of this unit's cgroup for "empty" events, but only if
1655 * cgroupv2 is available. */
1656
1657 if (!u->cgroup_path)
1658 return 0;
1659
1660 if (u->cgroup_control_inotify_wd >= 0)
1661 return 0;
1662
1663 /* Only applies to the unified hierarchy */
1664 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1665 if (r < 0)
1666 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1667 if (r == 0)
1668 return 0;
1669
1670 /* No point in watch the top-level slice, it's never going to run empty. */
1671 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1672 return 0;
1673
1674 r = hashmap_ensure_allocated(&u->manager->cgroup_control_inotify_wd_unit, &trivial_hash_ops);
1675 if (r < 0)
1676 return log_oom();
1677
1678 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
1679 if (r < 0)
1680 return log_oom();
1681
1682 u->cgroup_control_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
1683 if (u->cgroup_control_inotify_wd < 0) {
1684
1685 if (errno == ENOENT) /* If the directory is already gone we don't need to track it, so this
1686 * is not an error */
1687 return 0;
1688
1689 return log_unit_error_errno(u, errno, "Failed to add control inotify watch descriptor for control group %s: %m", u->cgroup_path);
1690 }
1691
1692 r = hashmap_put(u->manager->cgroup_control_inotify_wd_unit, INT_TO_PTR(u->cgroup_control_inotify_wd), u);
1693 if (r < 0)
1694 return log_unit_error_errno(u, r, "Failed to add control inotify watch descriptor to hash map: %m");
1695
1696 return 0;
1697 }
1698
1699 int unit_watch_cgroup_memory(Unit *u) {
1700 _cleanup_free_ char *events = NULL;
1701 CGroupContext *c;
1702 int r;
1703
1704 assert(u);
1705
1706 /* Watches the "memory.events" attribute of this unit's cgroup for "oom_kill" events, but only if
1707 * cgroupv2 is available. */
1708
1709 if (!u->cgroup_path)
1710 return 0;
1711
1712 c = unit_get_cgroup_context(u);
1713 if (!c)
1714 return 0;
1715
1716 /* The "memory.events" attribute is only available if the memory controller is on. Let's hence tie
1717 * this to memory accounting, in a way watching for OOM kills is a form of memory accounting after
1718 * all. */
1719 if (!c->memory_accounting)
1720 return 0;
1721
1722 /* Don't watch inner nodes, as the kernel doesn't report oom_kill events recursively currently, and
1723 * we also don't want to generate a log message for each parent cgroup of a process. */
1724 if (u->type == UNIT_SLICE)
1725 return 0;
1726
1727 if (u->cgroup_memory_inotify_wd >= 0)
1728 return 0;
1729
1730 /* Only applies to the unified hierarchy */
1731 r = cg_all_unified();
1732 if (r < 0)
1733 return log_error_errno(r, "Failed to determine whether the memory controller is unified: %m");
1734 if (r == 0)
1735 return 0;
1736
1737 r = hashmap_ensure_allocated(&u->manager->cgroup_memory_inotify_wd_unit, &trivial_hash_ops);
1738 if (r < 0)
1739 return log_oom();
1740
1741 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "memory.events", &events);
1742 if (r < 0)
1743 return log_oom();
1744
1745 u->cgroup_memory_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
1746 if (u->cgroup_memory_inotify_wd < 0) {
1747
1748 if (errno == ENOENT) /* If the directory is already gone we don't need to track it, so this
1749 * is not an error */
1750 return 0;
1751
1752 return log_unit_error_errno(u, errno, "Failed to add memory inotify watch descriptor for control group %s: %m", u->cgroup_path);
1753 }
1754
1755 r = hashmap_put(u->manager->cgroup_memory_inotify_wd_unit, INT_TO_PTR(u->cgroup_memory_inotify_wd), u);
1756 if (r < 0)
1757 return log_unit_error_errno(u, r, "Failed to add memory inotify watch descriptor to hash map: %m");
1758
1759 return 0;
1760 }
1761
1762 int unit_pick_cgroup_path(Unit *u) {
1763 _cleanup_free_ char *path = NULL;
1764 int r;
1765
1766 assert(u);
1767
1768 if (u->cgroup_path)
1769 return 0;
1770
1771 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1772 return -EINVAL;
1773
1774 path = unit_default_cgroup_path(u);
1775 if (!path)
1776 return log_oom();
1777
1778 r = unit_set_cgroup_path(u, path);
1779 if (r == -EEXIST)
1780 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1781 if (r < 0)
1782 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1783
1784 return 0;
1785 }
1786
1787 static int unit_create_cgroup(
1788 Unit *u,
1789 CGroupMask target_mask,
1790 CGroupMask enable_mask,
1791 ManagerState state) {
1792
1793 bool created;
1794 int r;
1795
1796 assert(u);
1797
1798 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1799 return 0;
1800
1801 /* Figure out our cgroup path */
1802 r = unit_pick_cgroup_path(u);
1803 if (r < 0)
1804 return r;
1805
1806 /* First, create our own group */
1807 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
1808 if (r < 0)
1809 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1810 created = r;
1811
1812 /* Start watching it */
1813 (void) unit_watch_cgroup(u);
1814 (void) unit_watch_cgroup_memory(u);
1815
1816 /* Preserve enabled controllers in delegated units, adjust others. */
1817 if (created || !u->cgroup_realized || !unit_cgroup_delegate(u)) {
1818 CGroupMask result_mask = 0;
1819
1820 /* Enable all controllers we need */
1821 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path, &result_mask);
1822 if (r < 0)
1823 log_unit_warning_errno(u, r, "Failed to enable/disable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
1824
1825 /* If we just turned off a controller, this might release the controller for our parent too, let's
1826 * enqueue the parent for re-realization in that case again. */
1827 if (UNIT_ISSET(u->slice)) {
1828 CGroupMask turned_off;
1829
1830 turned_off = (u->cgroup_realized ? u->cgroup_enabled_mask & ~result_mask : 0);
1831 if (turned_off != 0) {
1832 Unit *parent;
1833
1834 /* Force the parent to propagate the enable mask to the kernel again, by invalidating
1835 * the controller we just turned off. */
1836
1837 for (parent = UNIT_DEREF(u->slice); parent; parent = UNIT_DEREF(parent->slice))
1838 unit_invalidate_cgroup(parent, turned_off);
1839 }
1840 }
1841
1842 /* Remember what's actually enabled now */
1843 u->cgroup_enabled_mask = result_mask;
1844 }
1845
1846 /* Keep track that this is now realized */
1847 u->cgroup_realized = true;
1848 u->cgroup_realized_mask = target_mask;
1849
1850 if (u->type != UNIT_SLICE && !unit_cgroup_delegate(u)) {
1851
1852 /* Then, possibly move things over, but not if
1853 * subgroups may contain processes, which is the case
1854 * for slice and delegation units. */
1855 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1856 if (r < 0)
1857 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
1858 }
1859
1860 /* Set attributes */
1861 cgroup_context_apply(u, target_mask, state);
1862 cgroup_xattr_apply(u);
1863
1864 return 0;
1865 }
1866
1867 static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
1868 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1869 char *pp;
1870 int r;
1871
1872 assert(u);
1873
1874 if (MANAGER_IS_SYSTEM(u->manager))
1875 return -EINVAL;
1876
1877 if (!u->manager->system_bus)
1878 return -EIO;
1879
1880 if (!u->cgroup_path)
1881 return -EINVAL;
1882
1883 /* Determine this unit's cgroup path relative to our cgroup root */
1884 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
1885 if (!pp)
1886 return -EINVAL;
1887
1888 pp = strjoina("/", pp, suffix_path);
1889 path_simplify(pp, false);
1890
1891 r = sd_bus_call_method(u->manager->system_bus,
1892 "org.freedesktop.systemd1",
1893 "/org/freedesktop/systemd1",
1894 "org.freedesktop.systemd1.Manager",
1895 "AttachProcessesToUnit",
1896 &error, NULL,
1897 "ssau",
1898 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
1899 if (r < 0)
1900 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
1901
1902 return 0;
1903 }
1904
1905 int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
1906 CGroupMask delegated_mask;
1907 const char *p;
1908 Iterator i;
1909 void *pidp;
1910 int r, q;
1911
1912 assert(u);
1913
1914 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1915 return -EINVAL;
1916
1917 if (set_isempty(pids))
1918 return 0;
1919
1920 r = unit_realize_cgroup(u);
1921 if (r < 0)
1922 return r;
1923
1924 if (isempty(suffix_path))
1925 p = u->cgroup_path;
1926 else
1927 p = strjoina(u->cgroup_path, "/", suffix_path);
1928
1929 delegated_mask = unit_get_delegate_mask(u);
1930
1931 r = 0;
1932 SET_FOREACH(pidp, pids, i) {
1933 pid_t pid = PTR_TO_PID(pidp);
1934 CGroupController c;
1935
1936 /* First, attach the PID to the main cgroup hierarchy */
1937 q = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
1938 if (q < 0) {
1939 log_unit_debug_errno(u, q, "Couldn't move process " PID_FMT " to requested cgroup '%s': %m", pid, p);
1940
1941 if (MANAGER_IS_USER(u->manager) && IN_SET(q, -EPERM, -EACCES)) {
1942 int z;
1943
1944 /* If we are in a user instance, and we can't move the process ourselves due to
1945 * permission problems, let's ask the system instance about it instead. Since it's more
1946 * privileged it might be able to move the process across the leaves of a subtree who's
1947 * top node is not owned by us. */
1948
1949 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
1950 if (z < 0)
1951 log_unit_debug_errno(u, z, "Couldn't move process " PID_FMT " to requested cgroup '%s' via the system bus either: %m", pid, p);
1952 else
1953 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1954 }
1955
1956 if (r >= 0)
1957 r = q; /* Remember first error */
1958
1959 continue;
1960 }
1961
1962 q = cg_all_unified();
1963 if (q < 0)
1964 return q;
1965 if (q > 0)
1966 continue;
1967
1968 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1969 * innermost realized one */
1970
1971 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1972 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1973 const char *realized;
1974
1975 if (!(u->manager->cgroup_supported & bit))
1976 continue;
1977
1978 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1979 if (delegated_mask & u->cgroup_realized_mask & bit) {
1980 q = cg_attach(cgroup_controller_to_string(c), p, pid);
1981 if (q >= 0)
1982 continue; /* Success! */
1983
1984 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1985 pid, p, cgroup_controller_to_string(c));
1986 }
1987
1988 /* So this controller is either not delegate or realized, or something else weird happened. In
1989 * that case let's attach the PID at least to the closest cgroup up the tree that is
1990 * realized. */
1991 realized = unit_get_realized_cgroup_path(u, bit);
1992 if (!realized)
1993 continue; /* Not even realized in the root slice? Then let's not bother */
1994
1995 q = cg_attach(cgroup_controller_to_string(c), realized, pid);
1996 if (q < 0)
1997 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
1998 pid, realized, cgroup_controller_to_string(c));
1999 }
2000 }
2001
2002 return r;
2003 }
2004
2005 static bool unit_has_mask_realized(
2006 Unit *u,
2007 CGroupMask target_mask,
2008 CGroupMask enable_mask) {
2009
2010 assert(u);
2011
2012 /* Returns true if this unit is fully realized. We check four things:
2013 *
2014 * 1. Whether the cgroup was created at all
2015 * 2. Whether the cgroup was created in all the hierarchies we need it to be created in (in case of cgroup v1)
2016 * 3. Whether the cgroup has all the right controllers enabled (in case of cgroup v2)
2017 * 4. Whether the invalidation mask is currently zero
2018 *
2019 * If you wonder why we mask the target realization and enable mask with CGROUP_MASK_V1/CGROUP_MASK_V2: note
2020 * that there are three sets of bitmasks: CGROUP_MASK_V1 (for real cgroup v1 controllers), CGROUP_MASK_V2 (for
2021 * real cgroup v2 controllers) and CGROUP_MASK_BPF (for BPF-based pseudo-controllers). Now, cgroup_realized_mask
2022 * is only matters for cgroup v1 controllers, and cgroup_enabled_mask only used for cgroup v2, and if they
2023 * differ in the others, we don't really care. (After all, the cgroup_enabled_mask tracks with controllers are
2024 * enabled through cgroup.subtree_control, and since the BPF pseudo-controllers don't show up there, they
2025 * simply don't matter. */
2026
2027 return u->cgroup_realized &&
2028 ((u->cgroup_realized_mask ^ target_mask) & CGROUP_MASK_V1) == 0 &&
2029 ((u->cgroup_enabled_mask ^ enable_mask) & CGROUP_MASK_V2) == 0 &&
2030 u->cgroup_invalidated_mask == 0;
2031 }
2032
2033 static bool unit_has_mask_disables_realized(
2034 Unit *u,
2035 CGroupMask target_mask,
2036 CGroupMask enable_mask) {
2037
2038 assert(u);
2039
2040 /* Returns true if all controllers which should be disabled are indeed disabled.
2041 *
2042 * Unlike unit_has_mask_realized, we don't care what was enabled, only that anything we want to remove is
2043 * already removed. */
2044
2045 return !u->cgroup_realized ||
2046 (FLAGS_SET(u->cgroup_realized_mask, target_mask & CGROUP_MASK_V1) &&
2047 FLAGS_SET(u->cgroup_enabled_mask, enable_mask & CGROUP_MASK_V2));
2048 }
2049
2050 static bool unit_has_mask_enables_realized(
2051 Unit *u,
2052 CGroupMask target_mask,
2053 CGroupMask enable_mask) {
2054
2055 assert(u);
2056
2057 /* Returns true if all controllers which should be enabled are indeed enabled.
2058 *
2059 * Unlike unit_has_mask_realized, we don't care about the controllers that are not present, only that anything
2060 * we want to add is already added. */
2061
2062 return u->cgroup_realized &&
2063 ((u->cgroup_realized_mask | target_mask) & CGROUP_MASK_V1) == (u->cgroup_realized_mask & CGROUP_MASK_V1) &&
2064 ((u->cgroup_enabled_mask | enable_mask) & CGROUP_MASK_V2) == (u->cgroup_enabled_mask & CGROUP_MASK_V2);
2065 }
2066
2067 void unit_add_to_cgroup_realize_queue(Unit *u) {
2068 assert(u);
2069
2070 if (u->in_cgroup_realize_queue)
2071 return;
2072
2073 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
2074 u->in_cgroup_realize_queue = true;
2075 }
2076
2077 static void unit_remove_from_cgroup_realize_queue(Unit *u) {
2078 assert(u);
2079
2080 if (!u->in_cgroup_realize_queue)
2081 return;
2082
2083 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
2084 u->in_cgroup_realize_queue = false;
2085 }
2086
2087 /* Controllers can only be enabled breadth-first, from the root of the
2088 * hierarchy downwards to the unit in question. */
2089 static int unit_realize_cgroup_now_enable(Unit *u, ManagerState state) {
2090 CGroupMask target_mask, enable_mask, new_target_mask, new_enable_mask;
2091 int r;
2092
2093 assert(u);
2094
2095 /* First go deal with this unit's parent, or we won't be able to enable
2096 * any new controllers at this layer. */
2097 if (UNIT_ISSET(u->slice)) {
2098 r = unit_realize_cgroup_now_enable(UNIT_DEREF(u->slice), state);
2099 if (r < 0)
2100 return r;
2101 }
2102
2103 target_mask = unit_get_target_mask(u);
2104 enable_mask = unit_get_enable_mask(u);
2105
2106 /* We can only enable in this direction, don't try to disable anything.
2107 */
2108 if (unit_has_mask_enables_realized(u, target_mask, enable_mask))
2109 return 0;
2110
2111 new_target_mask = u->cgroup_realized_mask | target_mask;
2112 new_enable_mask = u->cgroup_enabled_mask | enable_mask;
2113
2114 return unit_create_cgroup(u, new_target_mask, new_enable_mask, state);
2115 }
2116
2117 /* Controllers can only be disabled depth-first, from the leaves of the
2118 * hierarchy upwards to the unit in question. */
2119 static int unit_realize_cgroup_now_disable(Unit *u, ManagerState state) {
2120 Iterator i;
2121 Unit *m;
2122 void *v;
2123
2124 assert(u);
2125
2126 if (u->type != UNIT_SLICE)
2127 return 0;
2128
2129 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
2130 CGroupMask target_mask, enable_mask, new_target_mask, new_enable_mask;
2131 int r;
2132
2133 if (UNIT_DEREF(m->slice) != u)
2134 continue;
2135
2136 /* The cgroup for this unit might not actually be fully
2137 * realised yet, in which case it isn't holding any controllers
2138 * open anyway. */
2139 if (!m->cgroup_path)
2140 continue;
2141
2142 /* We must disable those below us first in order to release the
2143 * controller. */
2144 if (m->type == UNIT_SLICE)
2145 (void) unit_realize_cgroup_now_disable(m, state);
2146
2147 target_mask = unit_get_target_mask(m);
2148 enable_mask = unit_get_enable_mask(m);
2149
2150 /* We can only disable in this direction, don't try to enable
2151 * anything. */
2152 if (unit_has_mask_disables_realized(m, target_mask, enable_mask))
2153 continue;
2154
2155 new_target_mask = m->cgroup_realized_mask & target_mask;
2156 new_enable_mask = m->cgroup_enabled_mask & enable_mask;
2157
2158 r = unit_create_cgroup(m, new_target_mask, new_enable_mask, state);
2159 if (r < 0)
2160 return r;
2161 }
2162
2163 return 0;
2164 }
2165
2166 /* Check if necessary controllers and attributes for a unit are in place.
2167 *
2168 * - If so, do nothing.
2169 * - If not, create paths, move processes over, and set attributes.
2170 *
2171 * Controllers can only be *enabled* in a breadth-first way, and *disabled* in
2172 * a depth-first way. As such the process looks like this:
2173 *
2174 * Suppose we have a cgroup hierarchy which looks like this:
2175 *
2176 * root
2177 * / \
2178 * / \
2179 * / \
2180 * a b
2181 * / \ / \
2182 * / \ / \
2183 * c d e f
2184 * / \ / \ / \ / \
2185 * h i j k l m n o
2186 *
2187 * 1. We want to realise cgroup "d" now.
2188 * 2. cgroup "a" has DisableControllers=cpu in the associated unit.
2189 * 3. cgroup "k" just started requesting the memory controller.
2190 *
2191 * To make this work we must do the following in order:
2192 *
2193 * 1. Disable CPU controller in k, j
2194 * 2. Disable CPU controller in d
2195 * 3. Enable memory controller in root
2196 * 4. Enable memory controller in a
2197 * 5. Enable memory controller in d
2198 * 6. Enable memory controller in k
2199 *
2200 * Notice that we need to touch j in one direction, but not the other. We also
2201 * don't go beyond d when disabling -- it's up to "a" to get realized if it
2202 * wants to disable further. The basic rules are therefore:
2203 *
2204 * - If you're disabling something, you need to realise all of the cgroups from
2205 * your recursive descendants to the root. This starts from the leaves.
2206 * - If you're enabling something, you need to realise from the root cgroup
2207 * downwards, but you don't need to iterate your recursive descendants.
2208 *
2209 * Returns 0 on success and < 0 on failure. */
2210 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
2211 CGroupMask target_mask, enable_mask;
2212 int r;
2213
2214 assert(u);
2215
2216 unit_remove_from_cgroup_realize_queue(u);
2217
2218 target_mask = unit_get_target_mask(u);
2219 enable_mask = unit_get_enable_mask(u);
2220
2221 if (unit_has_mask_realized(u, target_mask, enable_mask))
2222 return 0;
2223
2224 /* Disable controllers below us, if there are any */
2225 r = unit_realize_cgroup_now_disable(u, state);
2226 if (r < 0)
2227 return r;
2228
2229 /* Enable controllers above us, if there are any */
2230 if (UNIT_ISSET(u->slice)) {
2231 r = unit_realize_cgroup_now_enable(UNIT_DEREF(u->slice), state);
2232 if (r < 0)
2233 return r;
2234 }
2235
2236 /* Now actually deal with the cgroup we were trying to realise and set attributes */
2237 r = unit_create_cgroup(u, target_mask, enable_mask, state);
2238 if (r < 0)
2239 return r;
2240
2241 /* Now, reset the invalidation mask */
2242 u->cgroup_invalidated_mask = 0;
2243 return 0;
2244 }
2245
2246 unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
2247 ManagerState state;
2248 unsigned n = 0;
2249 Unit *i;
2250 int r;
2251
2252 assert(m);
2253
2254 state = manager_state(m);
2255
2256 while ((i = m->cgroup_realize_queue)) {
2257 assert(i->in_cgroup_realize_queue);
2258
2259 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
2260 /* Maybe things changed, and the unit is not actually active anymore? */
2261 unit_remove_from_cgroup_realize_queue(i);
2262 continue;
2263 }
2264
2265 r = unit_realize_cgroup_now(i, state);
2266 if (r < 0)
2267 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
2268
2269 n++;
2270 }
2271
2272 return n;
2273 }
2274
2275 static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
2276 Unit *slice;
2277
2278 /* This adds the siblings of the specified unit and the
2279 * siblings of all parent units to the cgroup queue. (But
2280 * neither the specified unit itself nor the parents.) */
2281
2282 while ((slice = UNIT_DEREF(u->slice))) {
2283 Iterator i;
2284 Unit *m;
2285 void *v;
2286
2287 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
2288 /* Skip units that have a dependency on the slice
2289 * but aren't actually in it. */
2290 if (UNIT_DEREF(m->slice) != slice)
2291 continue;
2292
2293 /* No point in doing cgroup application for units
2294 * without active processes. */
2295 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
2296 continue;
2297
2298 /* If the unit doesn't need any new controllers
2299 * and has current ones realized, it doesn't need
2300 * any changes. */
2301 if (unit_has_mask_realized(m,
2302 unit_get_target_mask(m),
2303 unit_get_enable_mask(m)))
2304 continue;
2305
2306 unit_add_to_cgroup_realize_queue(m);
2307 }
2308
2309 u = slice;
2310 }
2311 }
2312
2313 int unit_realize_cgroup(Unit *u) {
2314 assert(u);
2315
2316 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2317 return 0;
2318
2319 /* So, here's the deal: when realizing the cgroups for this
2320 * unit, we need to first create all parents, but there's more
2321 * actually: for the weight-based controllers we also need to
2322 * make sure that all our siblings (i.e. units that are in the
2323 * same slice as we are) have cgroups, too. Otherwise, things
2324 * would become very uneven as each of their processes would
2325 * get as much resources as all our group together. This call
2326 * will synchronously create the parent cgroups, but will
2327 * defer work on the siblings to the next event loop
2328 * iteration. */
2329
2330 /* Add all sibling slices to the cgroup queue. */
2331 unit_add_siblings_to_cgroup_realize_queue(u);
2332
2333 /* And realize this one now (and apply the values) */
2334 return unit_realize_cgroup_now(u, manager_state(u->manager));
2335 }
2336
2337 void unit_release_cgroup(Unit *u) {
2338 assert(u);
2339
2340 /* Forgets all cgroup details for this cgroup — but does *not* destroy the cgroup. This is hence OK to call
2341 * when we close down everything for reexecution, where we really want to leave the cgroup in place. */
2342
2343 if (u->cgroup_path) {
2344 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
2345 u->cgroup_path = mfree(u->cgroup_path);
2346 }
2347
2348 if (u->cgroup_control_inotify_wd >= 0) {
2349 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_control_inotify_wd) < 0)
2350 log_unit_debug_errno(u, errno, "Failed to remove cgroup control inotify watch %i for %s, ignoring: %m", u->cgroup_control_inotify_wd, u->id);
2351
2352 (void) hashmap_remove(u->manager->cgroup_control_inotify_wd_unit, INT_TO_PTR(u->cgroup_control_inotify_wd));
2353 u->cgroup_control_inotify_wd = -1;
2354 }
2355
2356 if (u->cgroup_memory_inotify_wd >= 0) {
2357 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_memory_inotify_wd) < 0)
2358 log_unit_debug_errno(u, errno, "Failed to remove cgroup memory inotify watch %i for %s, ignoring: %m", u->cgroup_memory_inotify_wd, u->id);
2359
2360 (void) hashmap_remove(u->manager->cgroup_memory_inotify_wd_unit, INT_TO_PTR(u->cgroup_memory_inotify_wd));
2361 u->cgroup_memory_inotify_wd = -1;
2362 }
2363 }
2364
2365 void unit_prune_cgroup(Unit *u) {
2366 int r;
2367 bool is_root_slice;
2368
2369 assert(u);
2370
2371 /* Removes the cgroup, if empty and possible, and stops watching it. */
2372
2373 if (!u->cgroup_path)
2374 return;
2375
2376 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
2377
2378 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
2379
2380 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
2381 if (r < 0) {
2382 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
2383 return;
2384 }
2385
2386 if (is_root_slice)
2387 return;
2388
2389 unit_release_cgroup(u);
2390
2391 u->cgroup_realized = false;
2392 u->cgroup_realized_mask = 0;
2393 u->cgroup_enabled_mask = 0;
2394
2395 u->bpf_device_control_installed = bpf_program_unref(u->bpf_device_control_installed);
2396 }
2397
2398 int unit_search_main_pid(Unit *u, pid_t *ret) {
2399 _cleanup_fclose_ FILE *f = NULL;
2400 pid_t pid = 0, npid;
2401 int r;
2402
2403 assert(u);
2404 assert(ret);
2405
2406 if (!u->cgroup_path)
2407 return -ENXIO;
2408
2409 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
2410 if (r < 0)
2411 return r;
2412
2413 while (cg_read_pid(f, &npid) > 0) {
2414
2415 if (npid == pid)
2416 continue;
2417
2418 if (pid_is_my_child(npid) == 0)
2419 continue;
2420
2421 if (pid != 0)
2422 /* Dang, there's more than one daemonized PID
2423 in this group, so we don't know what process
2424 is the main process. */
2425
2426 return -ENODATA;
2427
2428 pid = npid;
2429 }
2430
2431 *ret = pid;
2432 return 0;
2433 }
2434
2435 static int unit_watch_pids_in_path(Unit *u, const char *path) {
2436 _cleanup_closedir_ DIR *d = NULL;
2437 _cleanup_fclose_ FILE *f = NULL;
2438 int ret = 0, r;
2439
2440 assert(u);
2441 assert(path);
2442
2443 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2444 if (r < 0)
2445 ret = r;
2446 else {
2447 pid_t pid;
2448
2449 while ((r = cg_read_pid(f, &pid)) > 0) {
2450 r = unit_watch_pid(u, pid, false);
2451 if (r < 0 && ret >= 0)
2452 ret = r;
2453 }
2454
2455 if (r < 0 && ret >= 0)
2456 ret = r;
2457 }
2458
2459 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2460 if (r < 0) {
2461 if (ret >= 0)
2462 ret = r;
2463 } else {
2464 char *fn;
2465
2466 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2467 _cleanup_free_ char *p = NULL;
2468
2469 p = strjoin(path, "/", fn);
2470 free(fn);
2471
2472 if (!p)
2473 return -ENOMEM;
2474
2475 r = unit_watch_pids_in_path(u, p);
2476 if (r < 0 && ret >= 0)
2477 ret = r;
2478 }
2479
2480 if (r < 0 && ret >= 0)
2481 ret = r;
2482 }
2483
2484 return ret;
2485 }
2486
2487 int unit_synthesize_cgroup_empty_event(Unit *u) {
2488 int r;
2489
2490 assert(u);
2491
2492 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2493 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2494 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2495
2496 if (!u->cgroup_path)
2497 return -ENOENT;
2498
2499 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2500 if (r < 0)
2501 return r;
2502 if (r > 0) /* On unified we have reliable notifications, and don't need this */
2503 return 0;
2504
2505 if (!set_isempty(u->pids))
2506 return 0;
2507
2508 unit_add_to_cgroup_empty_queue(u);
2509 return 0;
2510 }
2511
2512 int unit_watch_all_pids(Unit *u) {
2513 int r;
2514
2515 assert(u);
2516
2517 /* Adds all PIDs from our cgroup to the set of PIDs we
2518 * watch. This is a fallback logic for cases where we do not
2519 * get reliable cgroup empty notifications: we try to use
2520 * SIGCHLD as replacement. */
2521
2522 if (!u->cgroup_path)
2523 return -ENOENT;
2524
2525 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2526 if (r < 0)
2527 return r;
2528 if (r > 0) /* On unified we can use proper notifications */
2529 return 0;
2530
2531 return unit_watch_pids_in_path(u, u->cgroup_path);
2532 }
2533
2534 static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
2535 Manager *m = userdata;
2536 Unit *u;
2537 int r;
2538
2539 assert(s);
2540 assert(m);
2541
2542 u = m->cgroup_empty_queue;
2543 if (!u)
2544 return 0;
2545
2546 assert(u->in_cgroup_empty_queue);
2547 u->in_cgroup_empty_queue = false;
2548 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
2549
2550 if (m->cgroup_empty_queue) {
2551 /* More stuff queued, let's make sure we remain enabled */
2552 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2553 if (r < 0)
2554 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
2555 }
2556
2557 unit_add_to_gc_queue(u);
2558
2559 if (UNIT_VTABLE(u)->notify_cgroup_empty)
2560 UNIT_VTABLE(u)->notify_cgroup_empty(u);
2561
2562 return 0;
2563 }
2564
2565 void unit_add_to_cgroup_empty_queue(Unit *u) {
2566 int r;
2567
2568 assert(u);
2569
2570 /* Note that there are four different ways how cgroup empty events reach us:
2571 *
2572 * 1. On the unified hierarchy we get an inotify event on the cgroup
2573 *
2574 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2575 *
2576 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2577 *
2578 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2579 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2580 *
2581 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2582 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2583 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2584 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2585 * case for scope units). */
2586
2587 if (u->in_cgroup_empty_queue)
2588 return;
2589
2590 /* Let's verify that the cgroup is really empty */
2591 if (!u->cgroup_path)
2592 return;
2593 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2594 if (r < 0) {
2595 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
2596 return;
2597 }
2598 if (r == 0)
2599 return;
2600
2601 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
2602 u->in_cgroup_empty_queue = true;
2603
2604 /* Trigger the defer event */
2605 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
2606 if (r < 0)
2607 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
2608 }
2609
2610 static int unit_check_oom(Unit *u) {
2611 _cleanup_free_ char *oom_kill = NULL;
2612 bool increased;
2613 uint64_t c;
2614 int r;
2615
2616 if (!u->cgroup_path)
2617 return 0;
2618
2619 r = cg_get_keyed_attribute("memory", u->cgroup_path, "memory.events", STRV_MAKE("oom_kill"), &oom_kill);
2620 if (r < 0)
2621 return log_unit_debug_errno(u, r, "Failed to read oom_kill field of memory.events cgroup attribute: %m");
2622
2623 r = safe_atou64(oom_kill, &c);
2624 if (r < 0)
2625 return log_unit_debug_errno(u, r, "Failed to parse oom_kill field: %m");
2626
2627 increased = c > u->oom_kill_last;
2628 u->oom_kill_last = c;
2629
2630 if (!increased)
2631 return 0;
2632
2633 log_struct(LOG_NOTICE,
2634 "MESSAGE_ID=" SD_MESSAGE_UNIT_OUT_OF_MEMORY_STR,
2635 LOG_UNIT_ID(u),
2636 LOG_UNIT_INVOCATION_ID(u),
2637 LOG_UNIT_MESSAGE(u, "A process of this unit has been killed by the OOM killer."));
2638
2639 if (UNIT_VTABLE(u)->notify_cgroup_oom)
2640 UNIT_VTABLE(u)->notify_cgroup_oom(u);
2641
2642 return 1;
2643 }
2644
2645 static int on_cgroup_oom_event(sd_event_source *s, void *userdata) {
2646 Manager *m = userdata;
2647 Unit *u;
2648 int r;
2649
2650 assert(s);
2651 assert(m);
2652
2653 u = m->cgroup_oom_queue;
2654 if (!u)
2655 return 0;
2656
2657 assert(u->in_cgroup_oom_queue);
2658 u->in_cgroup_oom_queue = false;
2659 LIST_REMOVE(cgroup_oom_queue, m->cgroup_oom_queue, u);
2660
2661 if (m->cgroup_oom_queue) {
2662 /* More stuff queued, let's make sure we remain enabled */
2663 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2664 if (r < 0)
2665 log_debug_errno(r, "Failed to reenable cgroup oom event source, ignoring: %m");
2666 }
2667
2668 (void) unit_check_oom(u);
2669 return 0;
2670 }
2671
2672 static void unit_add_to_cgroup_oom_queue(Unit *u) {
2673 int r;
2674
2675 assert(u);
2676
2677 if (u->in_cgroup_oom_queue)
2678 return;
2679 if (!u->cgroup_path)
2680 return;
2681
2682 LIST_PREPEND(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
2683 u->in_cgroup_oom_queue = true;
2684
2685 /* Trigger the defer event */
2686 if (!u->manager->cgroup_oom_event_source) {
2687 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2688
2689 r = sd_event_add_defer(u->manager->event, &s, on_cgroup_oom_event, u->manager);
2690 if (r < 0) {
2691 log_error_errno(r, "Failed to create cgroup oom event source: %m");
2692 return;
2693 }
2694
2695 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_NORMAL-8);
2696 if (r < 0) {
2697 log_error_errno(r, "Failed to set priority of cgroup oom event source: %m");
2698 return;
2699 }
2700
2701 (void) sd_event_source_set_description(s, "cgroup-oom");
2702 u->manager->cgroup_oom_event_source = TAKE_PTR(s);
2703 }
2704
2705 r = sd_event_source_set_enabled(u->manager->cgroup_oom_event_source, SD_EVENT_ONESHOT);
2706 if (r < 0)
2707 log_error_errno(r, "Failed to enable cgroup oom event source: %m");
2708 }
2709
2710 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2711 Manager *m = userdata;
2712
2713 assert(s);
2714 assert(fd >= 0);
2715 assert(m);
2716
2717 for (;;) {
2718 union inotify_event_buffer buffer;
2719 struct inotify_event *e;
2720 ssize_t l;
2721
2722 l = read(fd, &buffer, sizeof(buffer));
2723 if (l < 0) {
2724 if (IN_SET(errno, EINTR, EAGAIN))
2725 return 0;
2726
2727 return log_error_errno(errno, "Failed to read control group inotify events: %m");
2728 }
2729
2730 FOREACH_INOTIFY_EVENT(e, buffer, l) {
2731 Unit *u;
2732
2733 if (e->wd < 0)
2734 /* Queue overflow has no watch descriptor */
2735 continue;
2736
2737 if (e->mask & IN_IGNORED)
2738 /* The watch was just removed */
2739 continue;
2740
2741 /* Note that inotify might deliver events for a watch even after it was removed,
2742 * because it was queued before the removal. Let's ignore this here safely. */
2743
2744 u = hashmap_get(m->cgroup_control_inotify_wd_unit, INT_TO_PTR(e->wd));
2745 if (u)
2746 unit_add_to_cgroup_empty_queue(u);
2747
2748 u = hashmap_get(m->cgroup_memory_inotify_wd_unit, INT_TO_PTR(e->wd));
2749 if (u)
2750 unit_add_to_cgroup_oom_queue(u);
2751 }
2752 }
2753 }
2754
2755 static int cg_bpf_mask_supported(CGroupMask *ret) {
2756 CGroupMask mask = 0;
2757 int r;
2758
2759 /* BPF-based firewall */
2760 r = bpf_firewall_supported();
2761 if (r > 0)
2762 mask |= CGROUP_MASK_BPF_FIREWALL;
2763
2764 /* BPF-based device access control */
2765 r = bpf_devices_supported();
2766 if (r > 0)
2767 mask |= CGROUP_MASK_BPF_DEVICES;
2768
2769 *ret = mask;
2770 return 0;
2771 }
2772
2773 int manager_setup_cgroup(Manager *m) {
2774 _cleanup_free_ char *path = NULL;
2775 const char *scope_path;
2776 CGroupController c;
2777 int r, all_unified;
2778 CGroupMask mask;
2779 char *e;
2780
2781 assert(m);
2782
2783 /* 1. Determine hierarchy */
2784 m->cgroup_root = mfree(m->cgroup_root);
2785 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
2786 if (r < 0)
2787 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
2788
2789 /* Chop off the init scope, if we are already located in it */
2790 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2791
2792 /* LEGACY: Also chop off the system slice if we are in
2793 * it. This is to support live upgrades from older systemd
2794 * versions where PID 1 was moved there. Also see
2795 * cg_get_root_path(). */
2796 if (!e && MANAGER_IS_SYSTEM(m)) {
2797 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
2798 if (!e)
2799 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
2800 }
2801 if (e)
2802 *e = 0;
2803
2804 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2805 * easily prepend it everywhere. */
2806 delete_trailing_chars(m->cgroup_root, "/");
2807
2808 /* 2. Show data */
2809 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
2810 if (r < 0)
2811 return log_error_errno(r, "Cannot find cgroup mount point: %m");
2812
2813 r = cg_unified_flush();
2814 if (r < 0)
2815 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
2816
2817 all_unified = cg_all_unified();
2818 if (all_unified < 0)
2819 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
2820 if (all_unified > 0)
2821 log_debug("Unified cgroup hierarchy is located at %s.", path);
2822 else {
2823 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2824 if (r < 0)
2825 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2826 if (r > 0)
2827 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2828 else
2829 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2830 }
2831
2832 /* 3. Allocate cgroup empty defer event source */
2833 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2834 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2835 if (r < 0)
2836 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2837
2838 /* Schedule cgroup empty checks early, but after having processed service notification messages or
2839 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of
2840 * notification, and we collected the metadata the notification and SIGCHLD stuff offers first. */
2841 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2842 if (r < 0)
2843 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2844
2845 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2846 if (r < 0)
2847 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2848
2849 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2850
2851 /* 4. Install notifier inotify object, or agent */
2852 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
2853
2854 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
2855
2856 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2857 safe_close(m->cgroup_inotify_fd);
2858
2859 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2860 if (m->cgroup_inotify_fd < 0)
2861 return log_error_errno(errno, "Failed to create control group inotify object: %m");
2862
2863 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2864 if (r < 0)
2865 return log_error_errno(r, "Failed to watch control group inotify object: %m");
2866
2867 /* Process cgroup empty notifications early. Note that when this event is dispatched it'll
2868 * just add the unit to a cgroup empty queue, hence let's run earlier than that. Also see
2869 * handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2870 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-9);
2871 if (r < 0)
2872 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
2873
2874 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
2875
2876 } else if (MANAGER_IS_SYSTEM(m) && manager_owns_host_root_cgroup(m) && !MANAGER_IS_TEST_RUN(m)) {
2877
2878 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2879 * since it does not generate events when control groups with children run empty. */
2880
2881 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
2882 if (r < 0)
2883 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2884 else if (r > 0)
2885 log_debug("Installed release agent.");
2886 else if (r == 0)
2887 log_debug("Release agent already installed.");
2888 }
2889
2890 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2891 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2892 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2893 if (r >= 0) {
2894 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2895 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2896 if (r < 0)
2897 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
2898
2899 /* 6. And pin it, so that it cannot be unmounted */
2900 safe_close(m->pin_cgroupfs_fd);
2901 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2902 if (m->pin_cgroupfs_fd < 0)
2903 return log_error_errno(errno, "Failed to open pin file: %m");
2904
2905 } else if (!MANAGER_IS_TEST_RUN(m))
2906 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
2907
2908 /* 7. Always enable hierarchical support if it exists... */
2909 if (!all_unified && !MANAGER_IS_TEST_RUN(m))
2910 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2911
2912 /* 8. Figure out which controllers are supported */
2913 r = cg_mask_supported(&m->cgroup_supported);
2914 if (r < 0)
2915 return log_error_errno(r, "Failed to determine supported controllers: %m");
2916
2917 /* 9. Figure out which bpf-based pseudo-controllers are supported */
2918 r = cg_bpf_mask_supported(&mask);
2919 if (r < 0)
2920 return log_error_errno(r, "Failed to determine supported bpf-based pseudo-controllers: %m");
2921 m->cgroup_supported |= mask;
2922
2923 /* 10. Log which controllers are supported */
2924 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
2925 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
2926
2927 return 0;
2928 }
2929
2930 void manager_shutdown_cgroup(Manager *m, bool delete) {
2931 assert(m);
2932
2933 /* We can't really delete the group, since we are in it. But
2934 * let's trim it. */
2935 if (delete && m->cgroup_root && m->test_run_flags != MANAGER_TEST_RUN_MINIMAL)
2936 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2937
2938 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2939
2940 m->cgroup_control_inotify_wd_unit = hashmap_free(m->cgroup_control_inotify_wd_unit);
2941 m->cgroup_memory_inotify_wd_unit = hashmap_free(m->cgroup_memory_inotify_wd_unit);
2942
2943 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2944 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
2945
2946 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
2947
2948 m->cgroup_root = mfree(m->cgroup_root);
2949 }
2950
2951 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
2952 char *p;
2953 Unit *u;
2954
2955 assert(m);
2956 assert(cgroup);
2957
2958 u = hashmap_get(m->cgroup_unit, cgroup);
2959 if (u)
2960 return u;
2961
2962 p = strdupa(cgroup);
2963 for (;;) {
2964 char *e;
2965
2966 e = strrchr(p, '/');
2967 if (!e || e == p)
2968 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
2969
2970 *e = 0;
2971
2972 u = hashmap_get(m->cgroup_unit, p);
2973 if (u)
2974 return u;
2975 }
2976 }
2977
2978 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
2979 _cleanup_free_ char *cgroup = NULL;
2980
2981 assert(m);
2982
2983 if (!pid_is_valid(pid))
2984 return NULL;
2985
2986 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
2987 return NULL;
2988
2989 return manager_get_unit_by_cgroup(m, cgroup);
2990 }
2991
2992 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
2993 Unit *u, **array;
2994
2995 assert(m);
2996
2997 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2998 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2999 * relevant one as children of the process will be assigned to that one, too, before all else. */
3000
3001 if (!pid_is_valid(pid))
3002 return NULL;
3003
3004 if (pid == getpid_cached())
3005 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
3006
3007 u = manager_get_unit_by_pid_cgroup(m, pid);
3008 if (u)
3009 return u;
3010
3011 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
3012 if (u)
3013 return u;
3014
3015 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
3016 if (array)
3017 return array[0];
3018
3019 return NULL;
3020 }
3021
3022 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
3023 Unit *u;
3024
3025 assert(m);
3026 assert(cgroup);
3027
3028 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
3029 * or from the --system instance */
3030
3031 log_debug("Got cgroup empty notification for: %s", cgroup);
3032
3033 u = manager_get_unit_by_cgroup(m, cgroup);
3034 if (!u)
3035 return 0;
3036
3037 unit_add_to_cgroup_empty_queue(u);
3038 return 1;
3039 }
3040
3041 int unit_get_memory_current(Unit *u, uint64_t *ret) {
3042 _cleanup_free_ char *v = NULL;
3043 int r;
3044
3045 assert(u);
3046 assert(ret);
3047
3048 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
3049 return -ENODATA;
3050
3051 if (!u->cgroup_path)
3052 return -ENODATA;
3053
3054 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3055 if (unit_has_host_root_cgroup(u))
3056 return procfs_memory_get_used(ret);
3057
3058 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
3059 return -ENODATA;
3060
3061 r = cg_all_unified();
3062 if (r < 0)
3063 return r;
3064 if (r > 0)
3065 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
3066 else
3067 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
3068 if (r == -ENOENT)
3069 return -ENODATA;
3070 if (r < 0)
3071 return r;
3072
3073 return safe_atou64(v, ret);
3074 }
3075
3076 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
3077 _cleanup_free_ char *v = NULL;
3078 int r;
3079
3080 assert(u);
3081 assert(ret);
3082
3083 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
3084 return -ENODATA;
3085
3086 if (!u->cgroup_path)
3087 return -ENODATA;
3088
3089 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3090 if (unit_has_host_root_cgroup(u))
3091 return procfs_tasks_get_current(ret);
3092
3093 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
3094 return -ENODATA;
3095
3096 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
3097 if (r == -ENOENT)
3098 return -ENODATA;
3099 if (r < 0)
3100 return r;
3101
3102 return safe_atou64(v, ret);
3103 }
3104
3105 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
3106 _cleanup_free_ char *v = NULL;
3107 uint64_t ns;
3108 int r;
3109
3110 assert(u);
3111 assert(ret);
3112
3113 if (!u->cgroup_path)
3114 return -ENODATA;
3115
3116 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
3117 if (unit_has_host_root_cgroup(u))
3118 return procfs_cpu_get_usage(ret);
3119
3120 /* Requisite controllers for CPU accounting are not enabled */
3121 if ((get_cpu_accounting_mask() & ~u->cgroup_realized_mask) != 0)
3122 return -ENODATA;
3123
3124 r = cg_all_unified();
3125 if (r < 0)
3126 return r;
3127 if (r > 0) {
3128 _cleanup_free_ char *val = NULL;
3129 uint64_t us;
3130
3131 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
3132 if (IN_SET(r, -ENOENT, -ENXIO))
3133 return -ENODATA;
3134 if (r < 0)
3135 return r;
3136
3137 r = safe_atou64(val, &us);
3138 if (r < 0)
3139 return r;
3140
3141 ns = us * NSEC_PER_USEC;
3142 } else {
3143 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
3144 if (r == -ENOENT)
3145 return -ENODATA;
3146 if (r < 0)
3147 return r;
3148
3149 r = safe_atou64(v, &ns);
3150 if (r < 0)
3151 return r;
3152 }
3153
3154 *ret = ns;
3155 return 0;
3156 }
3157
3158 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
3159 nsec_t ns;
3160 int r;
3161
3162 assert(u);
3163
3164 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
3165 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
3166 * call this function with a NULL return value. */
3167
3168 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
3169 return -ENODATA;
3170
3171 r = unit_get_cpu_usage_raw(u, &ns);
3172 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
3173 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
3174 * cached value. */
3175
3176 if (ret)
3177 *ret = u->cpu_usage_last;
3178 return 0;
3179 }
3180 if (r < 0)
3181 return r;
3182
3183 if (ns > u->cpu_usage_base)
3184 ns -= u->cpu_usage_base;
3185 else
3186 ns = 0;
3187
3188 u->cpu_usage_last = ns;
3189 if (ret)
3190 *ret = ns;
3191
3192 return 0;
3193 }
3194
3195 int unit_get_ip_accounting(
3196 Unit *u,
3197 CGroupIPAccountingMetric metric,
3198 uint64_t *ret) {
3199
3200 uint64_t value;
3201 int fd, r;
3202
3203 assert(u);
3204 assert(metric >= 0);
3205 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
3206 assert(ret);
3207
3208 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
3209 return -ENODATA;
3210
3211 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
3212 u->ip_accounting_ingress_map_fd :
3213 u->ip_accounting_egress_map_fd;
3214 if (fd < 0)
3215 return -ENODATA;
3216
3217 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
3218 r = bpf_firewall_read_accounting(fd, &value, NULL);
3219 else
3220 r = bpf_firewall_read_accounting(fd, NULL, &value);
3221 if (r < 0)
3222 return r;
3223
3224 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
3225 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
3226 * ip_accounting_extra[] field, and add them in here transparently. */
3227
3228 *ret = value + u->ip_accounting_extra[metric];
3229
3230 return r;
3231 }
3232
3233 static int unit_get_io_accounting_raw(Unit *u, uint64_t ret[static _CGROUP_IO_ACCOUNTING_METRIC_MAX]) {
3234 static const char *const field_names[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3235 [CGROUP_IO_READ_BYTES] = "rbytes=",
3236 [CGROUP_IO_WRITE_BYTES] = "wbytes=",
3237 [CGROUP_IO_READ_OPERATIONS] = "rios=",
3238 [CGROUP_IO_WRITE_OPERATIONS] = "wios=",
3239 };
3240 uint64_t acc[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {};
3241 _cleanup_free_ char *path = NULL;
3242 _cleanup_fclose_ FILE *f = NULL;
3243 int r;
3244
3245 assert(u);
3246
3247 if (!u->cgroup_path)
3248 return -ENODATA;
3249
3250 if (unit_has_host_root_cgroup(u))
3251 return -ENODATA; /* TODO: return useful data for the top-level cgroup */
3252
3253 r = cg_all_unified();
3254 if (r < 0)
3255 return r;
3256 if (r == 0) /* TODO: support cgroupv1 */
3257 return -ENODATA;
3258
3259 if (!FLAGS_SET(u->cgroup_realized_mask, CGROUP_MASK_IO))
3260 return -ENODATA;
3261
3262 r = cg_get_path("io", u->cgroup_path, "io.stat", &path);
3263 if (r < 0)
3264 return r;
3265
3266 f = fopen(path, "re");
3267 if (!f)
3268 return -errno;
3269
3270 for (;;) {
3271 _cleanup_free_ char *line = NULL;
3272 const char *p;
3273
3274 r = read_line(f, LONG_LINE_MAX, &line);
3275 if (r < 0)
3276 return r;
3277 if (r == 0)
3278 break;
3279
3280 p = line;
3281 p += strcspn(p, WHITESPACE); /* Skip over device major/minor */
3282 p += strspn(p, WHITESPACE); /* Skip over following whitespace */
3283
3284 for (;;) {
3285 _cleanup_free_ char *word = NULL;
3286
3287 r = extract_first_word(&p, &word, NULL, EXTRACT_RETAIN_ESCAPE);
3288 if (r < 0)
3289 return r;
3290 if (r == 0)
3291 break;
3292
3293 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++) {
3294 const char *x;
3295
3296 x = startswith(word, field_names[i]);
3297 if (x) {
3298 uint64_t w;
3299
3300 r = safe_atou64(x, &w);
3301 if (r < 0)
3302 return r;
3303
3304 /* Sum up the stats of all devices */
3305 acc[i] += w;
3306 break;
3307 }
3308 }
3309 }
3310 }
3311
3312 memcpy(ret, acc, sizeof(acc));
3313 return 0;
3314 }
3315
3316 int unit_get_io_accounting(
3317 Unit *u,
3318 CGroupIOAccountingMetric metric,
3319 bool allow_cache,
3320 uint64_t *ret) {
3321
3322 uint64_t raw[_CGROUP_IO_ACCOUNTING_METRIC_MAX];
3323 int r;
3324
3325 /* Retrieve an IO account parameter. This will subtract the counter when the unit was started. */
3326
3327 if (!UNIT_CGROUP_BOOL(u, io_accounting))
3328 return -ENODATA;
3329
3330 if (allow_cache && u->io_accounting_last[metric] != UINT64_MAX)
3331 goto done;
3332
3333 r = unit_get_io_accounting_raw(u, raw);
3334 if (r == -ENODATA && u->io_accounting_last[metric] != UINT64_MAX)
3335 goto done;
3336 if (r < 0)
3337 return r;
3338
3339 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++) {
3340 /* Saturated subtraction */
3341 if (raw[i] > u->io_accounting_base[i])
3342 u->io_accounting_last[i] = raw[i] - u->io_accounting_base[i];
3343 else
3344 u->io_accounting_last[i] = 0;
3345 }
3346
3347 done:
3348 if (ret)
3349 *ret = u->io_accounting_last[metric];
3350
3351 return 0;
3352 }
3353
3354 int unit_reset_cpu_accounting(Unit *u) {
3355 int r;
3356
3357 assert(u);
3358
3359 u->cpu_usage_last = NSEC_INFINITY;
3360
3361 r = unit_get_cpu_usage_raw(u, &u->cpu_usage_base);
3362 if (r < 0) {
3363 u->cpu_usage_base = 0;
3364 return r;
3365 }
3366
3367 return 0;
3368 }
3369
3370 int unit_reset_ip_accounting(Unit *u) {
3371 int r = 0, q = 0;
3372
3373 assert(u);
3374
3375 if (u->ip_accounting_ingress_map_fd >= 0)
3376 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
3377
3378 if (u->ip_accounting_egress_map_fd >= 0)
3379 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
3380
3381 zero(u->ip_accounting_extra);
3382
3383 return r < 0 ? r : q;
3384 }
3385
3386 int unit_reset_io_accounting(Unit *u) {
3387 int r;
3388
3389 assert(u);
3390
3391 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
3392 u->io_accounting_last[i] = UINT64_MAX;
3393
3394 r = unit_get_io_accounting_raw(u, u->io_accounting_base);
3395 if (r < 0) {
3396 zero(u->io_accounting_base);
3397 return r;
3398 }
3399
3400 return 0;
3401 }
3402
3403 int unit_reset_accounting(Unit *u) {
3404 int r, q, v;
3405
3406 assert(u);
3407
3408 r = unit_reset_cpu_accounting(u);
3409 q = unit_reset_io_accounting(u);
3410 v = unit_reset_ip_accounting(u);
3411
3412 return r < 0 ? r : q < 0 ? q : v;
3413 }
3414
3415 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
3416 assert(u);
3417
3418 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3419 return;
3420
3421 if (m == 0)
3422 return;
3423
3424 /* always invalidate compat pairs together */
3425 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
3426 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
3427
3428 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
3429 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
3430
3431 if (FLAGS_SET(u->cgroup_invalidated_mask, m)) /* NOP? */
3432 return;
3433
3434 u->cgroup_invalidated_mask |= m;
3435 unit_add_to_cgroup_realize_queue(u);
3436 }
3437
3438 void unit_invalidate_cgroup_bpf(Unit *u) {
3439 assert(u);
3440
3441 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3442 return;
3443
3444 if (u->cgroup_invalidated_mask & CGROUP_MASK_BPF_FIREWALL) /* NOP? */
3445 return;
3446
3447 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
3448 unit_add_to_cgroup_realize_queue(u);
3449
3450 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
3451 * list of our children includes our own. */
3452 if (u->type == UNIT_SLICE) {
3453 Unit *member;
3454 Iterator i;
3455 void *v;
3456
3457 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
3458 if (UNIT_DEREF(member->slice) == u)
3459 unit_invalidate_cgroup_bpf(member);
3460 }
3461 }
3462 }
3463
3464 bool unit_cgroup_delegate(Unit *u) {
3465 CGroupContext *c;
3466
3467 assert(u);
3468
3469 if (!UNIT_VTABLE(u)->can_delegate)
3470 return false;
3471
3472 c = unit_get_cgroup_context(u);
3473 if (!c)
3474 return false;
3475
3476 return c->delegate;
3477 }
3478
3479 void manager_invalidate_startup_units(Manager *m) {
3480 Iterator i;
3481 Unit *u;
3482
3483 assert(m);
3484
3485 SET_FOREACH(u, m->startup_units, i)
3486 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
3487 }
3488
3489 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
3490 [CGROUP_AUTO] = "auto",
3491 [CGROUP_CLOSED] = "closed",
3492 [CGROUP_STRICT] = "strict",
3493 };
3494
3495 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);