]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
manager: normalize /run disk space checks
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
8e274523 2
c6c18be3 3#include <fcntl.h>
e41969e3 4#include <fnmatch.h>
8c6db833 5
b5efdb8a 6#include "alloc-util.h"
18c528e9 7#include "blockdev-util.h"
906c06f6 8#include "bpf-firewall.h"
45c2e068 9#include "btrfs-util.h"
6592b975 10#include "bus-error.h"
03a7b521 11#include "cgroup-util.h"
3ffd4af2
LP
12#include "cgroup.h"
13#include "fd-util.h"
0d39fa9c 14#include "fileio.h"
77601719 15#include "fs-util.h"
6bedfcbb 16#include "parse-util.h"
9eb977db 17#include "path-util.h"
03a7b521 18#include "process-util.h"
c36a69f4 19#include "procfs-util.h"
9444b1f2 20#include "special.h"
906c06f6 21#include "stdio-util.h"
8b43440b 22#include "string-table.h"
07630cea 23#include "string-util.h"
cc6271f1 24#include "virt.h"
8e274523 25
9a054909
LP
26#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
27
cc6271f1
LP
28bool manager_owns_root_cgroup(Manager *m) {
29 assert(m);
30
31 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
32 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
33 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
34 * we run in any kind of container virtualization. */
35
36 if (detect_container() > 0)
37 return false;
38
57ea45e1 39 return empty_or_root(m->cgroup_root);
cc6271f1
LP
40}
41
f3725e64
LP
42bool unit_has_root_cgroup(Unit *u) {
43 assert(u);
44
cc6271f1
LP
45 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
46 * the manager manages the root cgroup. */
f3725e64 47
cc6271f1 48 if (!manager_owns_root_cgroup(u->manager))
f3725e64
LP
49 return false;
50
cc6271f1 51 return unit_has_name(u, SPECIAL_ROOT_SLICE);
f3725e64
LP
52}
53
2b40998d 54static void cgroup_compat_warn(void) {
128fadc9
TH
55 static bool cgroup_compat_warned = false;
56
57 if (cgroup_compat_warned)
58 return;
59
cc6271f1
LP
60 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
61 "See cgroup-compat debug messages for details.");
62
128fadc9
TH
63 cgroup_compat_warned = true;
64}
65
66#define log_cgroup_compat(unit, fmt, ...) do { \
67 cgroup_compat_warn(); \
68 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 69 } while (false)
128fadc9 70
4ad49000
LP
71void cgroup_context_init(CGroupContext *c) {
72 assert(c);
73
74 /* Initialize everything to the kernel defaults, assuming the
75 * structure is preinitialized to 0 */
76
66ebf6c0
TH
77 c->cpu_weight = CGROUP_WEIGHT_INVALID;
78 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
79 c->cpu_quota_per_sec_usec = USEC_INFINITY;
80
d53d9474
LP
81 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
82 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
d53d9474 83
da4d897e
TH
84 c->memory_high = CGROUP_LIMIT_MAX;
85 c->memory_max = CGROUP_LIMIT_MAX;
96e131ea 86 c->memory_swap_max = CGROUP_LIMIT_MAX;
da4d897e
TH
87
88 c->memory_limit = CGROUP_LIMIT_MAX;
b2f8b02e 89
13c31542
TH
90 c->io_weight = CGROUP_WEIGHT_INVALID;
91 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
92
d53d9474
LP
93 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
94 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
95
96 c->tasks_max = (uint64_t) -1;
4ad49000 97}
8e274523 98
4ad49000
LP
99void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
100 assert(c);
101 assert(a);
102
71fda00f 103 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
104 free(a->path);
105 free(a);
106}
107
13c31542
TH
108void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
109 assert(c);
110 assert(w);
111
112 LIST_REMOVE(device_weights, c->io_device_weights, w);
113 free(w->path);
114 free(w);
115}
116
6ae4283c
TH
117void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l) {
118 assert(c);
119 assert(l);
120
121 LIST_REMOVE(device_latencies, c->io_device_latencies, l);
122 free(l->path);
123 free(l);
124}
125
13c31542
TH
126void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
127 assert(c);
128 assert(l);
129
130 LIST_REMOVE(device_limits, c->io_device_limits, l);
131 free(l->path);
132 free(l);
133}
134
4ad49000
LP
135void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
136 assert(c);
137 assert(w);
138
71fda00f 139 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
140 free(w->path);
141 free(w);
142}
143
144void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
145 assert(c);
8e274523 146 assert(b);
8e274523 147
71fda00f 148 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
149 free(b->path);
150 free(b);
151}
152
153void cgroup_context_done(CGroupContext *c) {
154 assert(c);
155
13c31542
TH
156 while (c->io_device_weights)
157 cgroup_context_free_io_device_weight(c, c->io_device_weights);
158
6ae4283c
TH
159 while (c->io_device_latencies)
160 cgroup_context_free_io_device_latency(c, c->io_device_latencies);
161
13c31542
TH
162 while (c->io_device_limits)
163 cgroup_context_free_io_device_limit(c, c->io_device_limits);
164
4ad49000
LP
165 while (c->blockio_device_weights)
166 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
167
168 while (c->blockio_device_bandwidths)
169 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
170
171 while (c->device_allow)
172 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f
DM
173
174 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
175 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
4ad49000
LP
176}
177
178void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
13c31542
TH
179 CGroupIODeviceLimit *il;
180 CGroupIODeviceWeight *iw;
6ae4283c 181 CGroupIODeviceLatency *l;
4ad49000
LP
182 CGroupBlockIODeviceBandwidth *b;
183 CGroupBlockIODeviceWeight *w;
184 CGroupDeviceAllow *a;
c21c9906 185 IPAddressAccessItem *iaai;
9a054909 186 char u[FORMAT_TIMESPAN_MAX];
4ad49000
LP
187
188 assert(c);
189 assert(f);
190
191 prefix = strempty(prefix);
192
193 fprintf(f,
194 "%sCPUAccounting=%s\n"
13c31542 195 "%sIOAccounting=%s\n"
4ad49000
LP
196 "%sBlockIOAccounting=%s\n"
197 "%sMemoryAccounting=%s\n"
d53d9474 198 "%sTasksAccounting=%s\n"
c21c9906 199 "%sIPAccounting=%s\n"
66ebf6c0
TH
200 "%sCPUWeight=%" PRIu64 "\n"
201 "%sStartupCPUWeight=%" PRIu64 "\n"
d53d9474
LP
202 "%sCPUShares=%" PRIu64 "\n"
203 "%sStartupCPUShares=%" PRIu64 "\n"
b2f8b02e 204 "%sCPUQuotaPerSecSec=%s\n"
13c31542
TH
205 "%sIOWeight=%" PRIu64 "\n"
206 "%sStartupIOWeight=%" PRIu64 "\n"
d53d9474
LP
207 "%sBlockIOWeight=%" PRIu64 "\n"
208 "%sStartupBlockIOWeight=%" PRIu64 "\n"
48422635 209 "%sMemoryMin=%" PRIu64 "\n"
da4d897e
TH
210 "%sMemoryLow=%" PRIu64 "\n"
211 "%sMemoryHigh=%" PRIu64 "\n"
212 "%sMemoryMax=%" PRIu64 "\n"
96e131ea 213 "%sMemorySwapMax=%" PRIu64 "\n"
4ad49000 214 "%sMemoryLimit=%" PRIu64 "\n"
03a7b521 215 "%sTasksMax=%" PRIu64 "\n"
a931ad47
LP
216 "%sDevicePolicy=%s\n"
217 "%sDelegate=%s\n",
4ad49000 218 prefix, yes_no(c->cpu_accounting),
13c31542 219 prefix, yes_no(c->io_accounting),
4ad49000
LP
220 prefix, yes_no(c->blockio_accounting),
221 prefix, yes_no(c->memory_accounting),
d53d9474 222 prefix, yes_no(c->tasks_accounting),
c21c9906 223 prefix, yes_no(c->ip_accounting),
66ebf6c0
TH
224 prefix, c->cpu_weight,
225 prefix, c->startup_cpu_weight,
4ad49000 226 prefix, c->cpu_shares,
95ae05c0 227 prefix, c->startup_cpu_shares,
b1d6dcf5 228 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
13c31542
TH
229 prefix, c->io_weight,
230 prefix, c->startup_io_weight,
4ad49000 231 prefix, c->blockio_weight,
95ae05c0 232 prefix, c->startup_blockio_weight,
48422635 233 prefix, c->memory_min,
da4d897e
TH
234 prefix, c->memory_low,
235 prefix, c->memory_high,
236 prefix, c->memory_max,
96e131ea 237 prefix, c->memory_swap_max,
4ad49000 238 prefix, c->memory_limit,
03a7b521 239 prefix, c->tasks_max,
a931ad47
LP
240 prefix, cgroup_device_policy_to_string(c->device_policy),
241 prefix, yes_no(c->delegate));
4ad49000 242
02638280
LP
243 if (c->delegate) {
244 _cleanup_free_ char *t = NULL;
245
246 (void) cg_mask_to_string(c->delegate_controllers, &t);
247
47a78d41 248 fprintf(f, "%sDelegateControllers=%s\n",
02638280
LP
249 prefix,
250 strempty(t));
251 }
252
4ad49000
LP
253 LIST_FOREACH(device_allow, a, c->device_allow)
254 fprintf(f,
255 "%sDeviceAllow=%s %s%s%s\n",
256 prefix,
257 a->path,
258 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
259
13c31542
TH
260 LIST_FOREACH(device_weights, iw, c->io_device_weights)
261 fprintf(f,
6ae4283c 262 "%sIODeviceWeight=%s %" PRIu64 "\n",
13c31542
TH
263 prefix,
264 iw->path,
265 iw->weight);
266
6ae4283c
TH
267 LIST_FOREACH(device_latencies, l, c->io_device_latencies)
268 fprintf(f,
269 "%sIODeviceLatencyTargetSec=%s %s\n",
270 prefix,
271 l->path,
272 format_timespan(u, sizeof(u), l->target_usec, 1));
273
13c31542
TH
274 LIST_FOREACH(device_limits, il, c->io_device_limits) {
275 char buf[FORMAT_BYTES_MAX];
9be57249
TH
276 CGroupIOLimitType type;
277
278 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
279 if (il->limits[type] != cgroup_io_limit_defaults[type])
280 fprintf(f,
281 "%s%s=%s %s\n",
282 prefix,
283 cgroup_io_limit_type_to_string(type),
284 il->path,
285 format_bytes(buf, sizeof(buf), il->limits[type]));
13c31542
TH
286 }
287
4ad49000
LP
288 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
289 fprintf(f,
d53d9474 290 "%sBlockIODeviceWeight=%s %" PRIu64,
4ad49000
LP
291 prefix,
292 w->path,
293 w->weight);
294
295 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
296 char buf[FORMAT_BYTES_MAX];
297
979d0311
TH
298 if (b->rbps != CGROUP_LIMIT_MAX)
299 fprintf(f,
300 "%sBlockIOReadBandwidth=%s %s\n",
301 prefix,
302 b->path,
303 format_bytes(buf, sizeof(buf), b->rbps));
304 if (b->wbps != CGROUP_LIMIT_MAX)
305 fprintf(f,
306 "%sBlockIOWriteBandwidth=%s %s\n",
307 prefix,
308 b->path,
309 format_bytes(buf, sizeof(buf), b->wbps));
4ad49000 310 }
c21c9906
LP
311
312 LIST_FOREACH(items, iaai, c->ip_address_allow) {
313 _cleanup_free_ char *k = NULL;
314
315 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
316 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
317 }
318
319 LIST_FOREACH(items, iaai, c->ip_address_deny) {
320 _cleanup_free_ char *k = NULL;
321
322 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
323 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
324 }
4ad49000
LP
325}
326
fd870bac
YW
327int cgroup_add_device_allow(CGroupContext *c, const char *dev, const char *mode) {
328 _cleanup_free_ CGroupDeviceAllow *a = NULL;
329 _cleanup_free_ char *d = NULL;
330
331 assert(c);
332 assert(dev);
333 assert(isempty(mode) || in_charset(mode, "rwm"));
334
335 a = new(CGroupDeviceAllow, 1);
336 if (!a)
337 return -ENOMEM;
338
339 d = strdup(dev);
340 if (!d)
341 return -ENOMEM;
342
343 *a = (CGroupDeviceAllow) {
344 .path = TAKE_PTR(d),
345 .r = isempty(mode) || !!strchr(mode, 'r'),
346 .w = isempty(mode) || !!strchr(mode, 'w'),
347 .m = isempty(mode) || !!strchr(mode, 'm'),
348 };
349
350 LIST_PREPEND(device_allow, c->device_allow, a);
351 TAKE_PTR(a);
352
353 return 0;
354}
355
45c2e068 356static int lookup_block_device(const char *p, dev_t *ret) {
4ad49000 357 struct stat st;
45c2e068 358 int r;
4ad49000
LP
359
360 assert(p);
45c2e068 361 assert(ret);
4ad49000 362
b1c05b98 363 if (stat(p, &st) < 0)
45c2e068 364 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
8e274523 365
4ad49000 366 if (S_ISBLK(st.st_mode))
45c2e068
LP
367 *ret = st.st_rdev;
368 else if (major(st.st_dev) != 0)
369 *ret = st.st_dev; /* If this is not a device node then use the block device this file is stored on */
370 else {
371 /* If this is btrfs, getting the backing block device is a bit harder */
372 r = btrfs_get_block_device(p, ret);
373 if (r < 0 && r != -ENOTTY)
374 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
375 if (r == -ENOTTY) {
376 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
377 return -ENODEV;
378 }
4ad49000 379 }
8e274523 380
45c2e068
LP
381 /* If this is a LUKS device, try to get the originating block device */
382 (void) block_get_originating(*ret, ret);
383
384 /* If this is a partition, try to get the originating block device */
385 (void) block_get_whole_disk(*ret, ret);
8e274523 386 return 0;
8e274523
LP
387}
388
4ad49000
LP
389static int whitelist_device(const char *path, const char *node, const char *acc) {
390 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
391 struct stat st;
b200489b 392 bool ignore_notfound;
8c6db833 393 int r;
8e274523 394
4ad49000
LP
395 assert(path);
396 assert(acc);
8e274523 397
b200489b
DR
398 if (node[0] == '-') {
399 /* Non-existent paths starting with "-" must be silently ignored */
400 node++;
401 ignore_notfound = true;
402 } else
403 ignore_notfound = false;
404
4ad49000 405 if (stat(node, &st) < 0) {
b200489b 406 if (errno == ENOENT && ignore_notfound)
e7330dfe
DP
407 return 0;
408
409 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
4ad49000
LP
410 }
411
412 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
413 log_warning("%s is not a device.", node);
414 return -ENODEV;
415 }
416
417 sprintf(buf,
418 "%c %u:%u %s",
419 S_ISCHR(st.st_mode) ? 'c' : 'b',
420 major(st.st_rdev), minor(st.st_rdev),
421 acc);
422
423 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 424 if (r < 0)
077ba06e 425 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 426 "Failed to set devices.allow on %s: %m", path);
4ad49000
LP
427
428 return r;
8e274523
LP
429}
430
90060676
LP
431static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
432 _cleanup_fclose_ FILE *f = NULL;
433 char line[LINE_MAX];
434 bool good = false;
435 int r;
436
437 assert(path);
438 assert(acc);
4c701096 439 assert(IN_SET(type, 'b', 'c'));
90060676
LP
440
441 f = fopen("/proc/devices", "re");
4a62c710
MS
442 if (!f)
443 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
90060676
LP
444
445 FOREACH_LINE(line, f, goto fail) {
446 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
447 unsigned maj;
448
449 truncate_nl(line);
450
451 if (type == 'c' && streq(line, "Character devices:")) {
452 good = true;
453 continue;
454 }
455
456 if (type == 'b' && streq(line, "Block devices:")) {
457 good = true;
458 continue;
459 }
460
461 if (isempty(line)) {
462 good = false;
463 continue;
464 }
465
466 if (!good)
467 continue;
468
469 p = strstrip(line);
470
471 w = strpbrk(p, WHITESPACE);
472 if (!w)
473 continue;
474 *w = 0;
475
476 r = safe_atou(p, &maj);
477 if (r < 0)
478 continue;
479 if (maj <= 0)
480 continue;
481
482 w++;
483 w += strspn(w, WHITESPACE);
e41969e3
LP
484
485 if (fnmatch(name, w, 0) != 0)
90060676
LP
486 continue;
487
488 sprintf(buf,
489 "%c %u:* %s",
490 type,
491 maj,
492 acc);
493
494 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 495 if (r < 0)
077ba06e 496 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 497 "Failed to set devices.allow on %s: %m", path);
90060676
LP
498 }
499
500 return 0;
501
502fail:
25f027c5 503 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
90060676
LP
504}
505
66ebf6c0
TH
506static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
507 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
508 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
509}
510
511static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
512 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
513 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
514}
515
516static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
517 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
518 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
519 return c->startup_cpu_weight;
520 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
521 return c->cpu_weight;
522 else
523 return CGROUP_WEIGHT_DEFAULT;
524}
525
526static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
527 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
528 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
529 return c->startup_cpu_shares;
530 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
531 return c->cpu_shares;
532 else
533 return CGROUP_CPU_SHARES_DEFAULT;
534}
535
536static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
537 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
538 int r;
539
540 xsprintf(buf, "%" PRIu64 "\n", weight);
541 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
542 if (r < 0)
543 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
544 "Failed to set cpu.weight: %m");
545
546 if (quota != USEC_INFINITY)
547 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
548 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
549 else
550 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
551
552 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
553
554 if (r < 0)
555 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
556 "Failed to set cpu.max: %m");
557}
558
559static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
560 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
561 int r;
562
563 xsprintf(buf, "%" PRIu64 "\n", shares);
564 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
565 if (r < 0)
566 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
567 "Failed to set cpu.shares: %m");
568
569 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
570 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
571 if (r < 0)
572 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
573 "Failed to set cpu.cfs_period_us: %m");
574
575 if (quota != USEC_INFINITY) {
576 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
577 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
578 } else
579 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
580 if (r < 0)
581 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
582 "Failed to set cpu.cfs_quota_us: %m");
583}
584
585static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
586 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
587 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
588}
589
590static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
591 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
592 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
593}
594
508c45da 595static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
596 return c->io_accounting ||
597 c->io_weight != CGROUP_WEIGHT_INVALID ||
598 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
599 c->io_device_weights ||
6ae4283c 600 c->io_device_latencies ||
538b4852
TH
601 c->io_device_limits;
602}
603
508c45da 604static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
605 return c->blockio_accounting ||
606 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
607 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
608 c->blockio_device_weights ||
609 c->blockio_device_bandwidths;
610}
611
508c45da 612static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
613 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
614 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
615 return c->startup_io_weight;
616 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
617 return c->io_weight;
618 else
619 return CGROUP_WEIGHT_DEFAULT;
620}
621
508c45da 622static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
623 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
624 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
625 return c->startup_blockio_weight;
626 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
627 return c->blockio_weight;
628 else
629 return CGROUP_BLKIO_WEIGHT_DEFAULT;
630}
631
508c45da 632static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
633 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
634 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
635}
636
508c45da 637static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
638 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
639 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
640}
641
f29ff115 642static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
643 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
644 dev_t dev;
645 int r;
646
647 r = lookup_block_device(dev_path, &dev);
648 if (r < 0)
649 return;
650
651 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
f29ff115 652 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
64faf04c 653 if (r < 0)
f29ff115
TH
654 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
655 "Failed to set io.weight: %m");
64faf04c
TH
656}
657
f29ff115 658static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
659 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
660 dev_t dev;
661 int r;
662
663 r = lookup_block_device(dev_path, &dev);
664 if (r < 0)
665 return;
666
667 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
f29ff115 668 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
64faf04c 669 if (r < 0)
f29ff115
TH
670 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
671 "Failed to set blkio.weight_device: %m");
64faf04c
TH
672}
673
6ae4283c
TH
674static void cgroup_apply_io_device_latency(Unit *u, const char *dev_path, usec_t target) {
675 char buf[DECIMAL_STR_MAX(dev_t)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
676 dev_t dev;
677 int r;
678
679 r = lookup_block_device(dev_path, &dev);
680 if (r < 0)
681 return;
682
683 if (target != USEC_INFINITY)
684 xsprintf(buf, "%u:%u target=%" PRIu64 "\n", major(dev), minor(dev), target);
685 else
686 xsprintf(buf, "%u:%u target=max\n", major(dev), minor(dev));
687
688 r = cg_set_attribute("io", u->cgroup_path, "io.latency", buf);
689 if (r < 0)
690 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
691 "Failed to set io.latency on cgroup %s: %m", u->cgroup_path);
692}
693
17ae2780 694static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
64faf04c
TH
695 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
696 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
697 CGroupIOLimitType type;
698 dev_t dev;
64faf04c
TH
699 int r;
700
701 r = lookup_block_device(dev_path, &dev);
702 if (r < 0)
17ae2780 703 return;
64faf04c 704
17ae2780
LP
705 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
706 if (limits[type] != cgroup_io_limit_defaults[type])
64faf04c 707 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
17ae2780 708 else
64faf04c 709 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
64faf04c
TH
710
711 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
712 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
713 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
f29ff115 714 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
64faf04c 715 if (r < 0)
f29ff115
TH
716 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
717 "Failed to set io.max: %m");
64faf04c
TH
718}
719
17ae2780 720static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
721 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
722 dev_t dev;
64faf04c
TH
723 int r;
724
725 r = lookup_block_device(dev_path, &dev);
726 if (r < 0)
17ae2780 727 return;
64faf04c 728
64faf04c 729 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
f29ff115 730 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
64faf04c 731 if (r < 0)
f29ff115
TH
732 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
733 "Failed to set blkio.throttle.read_bps_device: %m");
64faf04c 734
64faf04c 735 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
f29ff115 736 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
64faf04c 737 if (r < 0)
f29ff115
TH
738 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
739 "Failed to set blkio.throttle.write_bps_device: %m");
64faf04c
TH
740}
741
da4d897e 742static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
48422635 743 return c->memory_min > 0 || c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
da4d897e
TH
744}
745
f29ff115 746static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
da4d897e
TH
747 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
748 int r;
749
750 if (v != CGROUP_LIMIT_MAX)
751 xsprintf(buf, "%" PRIu64 "\n", v);
752
f29ff115 753 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
da4d897e 754 if (r < 0)
f29ff115
TH
755 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
756 "Failed to set %s: %m", file);
da4d897e
TH
757}
758
0f2d84d2 759static void cgroup_apply_firewall(Unit *u) {
0f2d84d2
LP
760 assert(u);
761
acf7f253 762 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
906c06f6 763
acf7f253 764 if (bpf_firewall_compile(u) < 0)
906c06f6
DM
765 return;
766
767 (void) bpf_firewall_install(u);
906c06f6
DM
768}
769
770static void cgroup_context_apply(
771 Unit *u,
772 CGroupMask apply_mask,
773 bool apply_bpf,
774 ManagerState state) {
775
f29ff115
TH
776 const char *path;
777 CGroupContext *c;
01efdf13 778 bool is_root;
4ad49000
LP
779 int r;
780
f29ff115
TH
781 assert(u);
782
906c06f6
DM
783 /* Nothing to do? Exit early! */
784 if (apply_mask == 0 && !apply_bpf)
4ad49000 785 return;
8e274523 786
f3725e64
LP
787 /* Some cgroup attributes are not supported on the root cgroup, hence silently ignore */
788 is_root = unit_has_root_cgroup(u);
789
790 assert_se(c = unit_get_cgroup_context(u));
791 assert_se(path = u->cgroup_path);
792
793 if (is_root) /* Make sure we don't try to display messages with an empty path. */
6da13913 794 path = "/";
01efdf13 795
714e2e1d
LP
796 /* We generally ignore errors caused by read-only mounted
797 * cgroup trees (assuming we are running in a container then),
798 * and missing cgroups, i.e. EROFS and ENOENT. */
799
906c06f6
DM
800 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
801 bool has_weight, has_shares;
802
803 has_weight = cgroup_context_has_cpu_weight(c);
804 has_shares = cgroup_context_has_cpu_shares(c);
8e274523 805
b4cccbc1 806 if (cg_all_unified() > 0) {
66ebf6c0 807 uint64_t weight;
b2f8b02e 808
66ebf6c0
TH
809 if (has_weight)
810 weight = cgroup_context_cpu_weight(c, state);
811 else if (has_shares) {
812 uint64_t shares = cgroup_context_cpu_shares(c, state);
b2f8b02e 813
66ebf6c0
TH
814 weight = cgroup_cpu_shares_to_weight(shares);
815
816 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
817 shares, weight, path);
818 } else
819 weight = CGROUP_WEIGHT_DEFAULT;
820
821 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
822 } else {
823 uint64_t shares;
824
7d862ab8 825 if (has_weight) {
66ebf6c0
TH
826 uint64_t weight = cgroup_context_cpu_weight(c, state);
827
828 shares = cgroup_cpu_weight_to_shares(weight);
829
830 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
831 weight, shares, path);
7d862ab8
TH
832 } else if (has_shares)
833 shares = cgroup_context_cpu_shares(c, state);
834 else
66ebf6c0
TH
835 shares = CGROUP_CPU_SHARES_DEFAULT;
836
837 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
838 }
4ad49000
LP
839 }
840
906c06f6 841 if (apply_mask & CGROUP_MASK_IO) {
538b4852
TH
842 bool has_io = cgroup_context_has_io_config(c);
843 bool has_blockio = cgroup_context_has_blockio_config(c);
13c31542
TH
844
845 if (!is_root) {
64faf04c
TH
846 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
847 uint64_t weight;
13c31542 848
538b4852
TH
849 if (has_io)
850 weight = cgroup_context_io_weight(c, state);
128fadc9
TH
851 else if (has_blockio) {
852 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
853
854 weight = cgroup_weight_blkio_to_io(blkio_weight);
855
856 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
857 blkio_weight, weight);
858 } else
538b4852 859 weight = CGROUP_WEIGHT_DEFAULT;
13c31542
TH
860
861 xsprintf(buf, "default %" PRIu64 "\n", weight);
862 r = cg_set_attribute("io", path, "io.weight", buf);
863 if (r < 0)
f29ff115
TH
864 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
865 "Failed to set io.weight: %m");
13c31542 866
538b4852
TH
867 if (has_io) {
868 CGroupIODeviceWeight *w;
869
538b4852 870 LIST_FOREACH(device_weights, w, c->io_device_weights)
f29ff115 871 cgroup_apply_io_device_weight(u, w->path, w->weight);
538b4852
TH
872 } else if (has_blockio) {
873 CGroupBlockIODeviceWeight *w;
874
128fadc9
TH
875 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
876 weight = cgroup_weight_blkio_to_io(w->weight);
877
878 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
879 w->weight, weight, w->path);
880
881 cgroup_apply_io_device_weight(u, w->path, weight);
882 }
538b4852 883 }
6ae4283c
TH
884
885 if (has_io) {
886 CGroupIODeviceLatency *l;
887
888 LIST_FOREACH(device_latencies, l, c->io_device_latencies)
889 cgroup_apply_io_device_latency(u, l->path, l->target_usec);
890 }
13c31542
TH
891 }
892
538b4852 893 if (has_io) {
17ae2780
LP
894 CGroupIODeviceLimit *l;
895
896 LIST_FOREACH(device_limits, l, c->io_device_limits)
897 cgroup_apply_io_device_limit(u, l->path, l->limits);
538b4852 898
538b4852 899 } else if (has_blockio) {
17ae2780 900 CGroupBlockIODeviceBandwidth *b;
538b4852 901
17ae2780 902 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
538b4852
TH
903 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
904 CGroupIOLimitType type;
905
906 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
907 limits[type] = cgroup_io_limit_defaults[type];
908
909 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
910 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
911
128fadc9
TH
912 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
913 b->rbps, b->wbps, b->path);
914
17ae2780 915 cgroup_apply_io_device_limit(u, b->path, limits);
538b4852 916 }
13c31542
TH
917 }
918 }
919
906c06f6 920 if (apply_mask & CGROUP_MASK_BLKIO) {
538b4852
TH
921 bool has_io = cgroup_context_has_io_config(c);
922 bool has_blockio = cgroup_context_has_blockio_config(c);
4ad49000 923
01efdf13 924 if (!is_root) {
64faf04c
TH
925 char buf[DECIMAL_STR_MAX(uint64_t)+1];
926 uint64_t weight;
64faf04c 927
7d862ab8 928 if (has_io) {
128fadc9
TH
929 uint64_t io_weight = cgroup_context_io_weight(c, state);
930
538b4852 931 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9
TH
932
933 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
934 io_weight, weight);
7d862ab8
TH
935 } else if (has_blockio)
936 weight = cgroup_context_blkio_weight(c, state);
937 else
538b4852 938 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
939
940 xsprintf(buf, "%" PRIu64 "\n", weight);
01efdf13 941 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
1aeab12b 942 if (r < 0)
f29ff115
TH
943 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
944 "Failed to set blkio.weight: %m");
4ad49000 945
7d862ab8 946 if (has_io) {
538b4852
TH
947 CGroupIODeviceWeight *w;
948
128fadc9
TH
949 LIST_FOREACH(device_weights, w, c->io_device_weights) {
950 weight = cgroup_weight_io_to_blkio(w->weight);
951
952 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
953 w->weight, weight, w->path);
954
955 cgroup_apply_blkio_device_weight(u, w->path, weight);
956 }
7d862ab8
TH
957 } else if (has_blockio) {
958 CGroupBlockIODeviceWeight *w;
959
7d862ab8
TH
960 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
961 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
538b4852 962 }
4ad49000
LP
963 }
964
7d862ab8 965 if (has_io) {
17ae2780 966 CGroupIODeviceLimit *l;
538b4852 967
17ae2780 968 LIST_FOREACH(device_limits, l, c->io_device_limits) {
128fadc9
TH
969 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
970 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
971
17ae2780 972 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
538b4852 973 }
7d862ab8 974 } else if (has_blockio) {
17ae2780 975 CGroupBlockIODeviceBandwidth *b;
7d862ab8 976
17ae2780
LP
977 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
978 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
d686d8a9 979 }
8e274523
LP
980 }
981
906c06f6 982 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
b4cccbc1
LP
983 if (cg_all_unified() > 0) {
984 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
efdb0237 985
96e131ea 986 if (cgroup_context_has_unified_memory_config(c)) {
da4d897e 987 max = c->memory_max;
96e131ea
WC
988 swap_max = c->memory_swap_max;
989 } else {
da4d897e 990 max = c->memory_limit;
efdb0237 991
128fadc9
TH
992 if (max != CGROUP_LIMIT_MAX)
993 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
994 }
995
48422635 996 cgroup_apply_unified_memory_limit(u, "memory.min", c->memory_min);
f29ff115
TH
997 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
998 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
999 cgroup_apply_unified_memory_limit(u, "memory.max", max);
96e131ea 1000 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
efdb0237 1001 } else {
da4d897e 1002 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
7d862ab8 1003 uint64_t val;
da4d897e 1004
7d862ab8 1005 if (cgroup_context_has_unified_memory_config(c)) {
78a4ee59 1006 val = c->memory_max;
7d862ab8
TH
1007 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
1008 } else
1009 val = c->memory_limit;
128fadc9 1010
78a4ee59
DM
1011 if (val == CGROUP_LIMIT_MAX)
1012 strncpy(buf, "-1\n", sizeof(buf));
1013 else
1014 xsprintf(buf, "%" PRIu64 "\n", val);
1015
da4d897e
TH
1016 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
1017 if (r < 0)
f29ff115
TH
1018 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1019 "Failed to set memory.limit_in_bytes: %m");
da4d897e 1020 }
4ad49000 1021 }
8e274523 1022
906c06f6 1023 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
4ad49000 1024 CGroupDeviceAllow *a;
8e274523 1025
714e2e1d
LP
1026 /* Changing the devices list of a populated cgroup
1027 * might result in EINVAL, hence ignore EINVAL
1028 * here. */
1029
4ad49000
LP
1030 if (c->device_allow || c->device_policy != CGROUP_AUTO)
1031 r = cg_set_attribute("devices", path, "devices.deny", "a");
1032 else
1033 r = cg_set_attribute("devices", path, "devices.allow", "a");
1aeab12b 1034 if (r < 0)
f29ff115
TH
1035 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1036 "Failed to reset devices.list: %m");
fb385181 1037
4ad49000
LP
1038 if (c->device_policy == CGROUP_CLOSED ||
1039 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
1040 static const char auto_devices[] =
7d711efb
LP
1041 "/dev/null\0" "rwm\0"
1042 "/dev/zero\0" "rwm\0"
1043 "/dev/full\0" "rwm\0"
1044 "/dev/random\0" "rwm\0"
1045 "/dev/urandom\0" "rwm\0"
1046 "/dev/tty\0" "rwm\0"
5a7f87a9 1047 "/dev/ptmx\0" "rwm\0"
0d9e7991 1048 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
e7330dfe
DP
1049 "-/run/systemd/inaccessible/chr\0" "rwm\0"
1050 "-/run/systemd/inaccessible/blk\0" "rwm\0";
4ad49000
LP
1051
1052 const char *x, *y;
1053
1054 NULSTR_FOREACH_PAIR(x, y, auto_devices)
1055 whitelist_device(path, x, y);
7d711efb 1056
5a7f87a9 1057 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
7d711efb 1058 whitelist_major(path, "pts", 'c', "rw");
4ad49000
LP
1059 }
1060
1061 LIST_FOREACH(device_allow, a, c->device_allow) {
fb4650aa 1062 char acc[4], *val;
4ad49000
LP
1063 unsigned k = 0;
1064
1065 if (a->r)
1066 acc[k++] = 'r';
1067 if (a->w)
1068 acc[k++] = 'w';
1069 if (a->m)
1070 acc[k++] = 'm';
fb385181 1071
4ad49000
LP
1072 if (k == 0)
1073 continue;
fb385181 1074
4ad49000 1075 acc[k++] = 0;
90060676 1076
27458ed6 1077 if (path_startswith(a->path, "/dev/"))
90060676 1078 whitelist_device(path, a->path, acc);
fb4650aa
ZJS
1079 else if ((val = startswith(a->path, "block-")))
1080 whitelist_major(path, val, 'b', acc);
1081 else if ((val = startswith(a->path, "char-")))
1082 whitelist_major(path, val, 'c', acc);
90060676 1083 else
f29ff115 1084 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
4ad49000
LP
1085 }
1086 }
03a7b521 1087
00b5974f
LP
1088 if (apply_mask & CGROUP_MASK_PIDS) {
1089
1090 if (is_root) {
1091 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1092 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1093 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1094 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1095 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1096 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1097 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1098 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1099 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1100 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1101 * which is desirable so that there's an offical way to release control of the sysctl from
1102 * systemd: set the limit to unbounded and reload. */
1103
1104 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1105 u->manager->sysctl_pid_max_changed = true;
1106 r = procfs_tasks_set_limit(c->tasks_max);
1107 } else if (u->manager->sysctl_pid_max_changed)
1108 r = procfs_tasks_set_limit(TASKS_MAX);
1109 else
1110 r = 0;
03a7b521 1111
00b5974f
LP
1112 if (r < 0)
1113 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1114 "Failed to write to tasks limit sysctls: %m");
03a7b521 1115
00b5974f
LP
1116 } else {
1117 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1118 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
03a7b521 1119
00b5974f
LP
1120 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1121 r = cg_set_attribute("pids", path, "pids.max", buf);
1122 } else
1123 r = cg_set_attribute("pids", path, "pids.max", "max");
1124 if (r < 0)
1125 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1126 "Failed to set pids.max: %m");
1127 }
03a7b521 1128 }
906c06f6
DM
1129
1130 if (apply_bpf)
0f2d84d2 1131 cgroup_apply_firewall(u);
fb385181
LP
1132}
1133
efdb0237
LP
1134CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1135 CGroupMask mask = 0;
8e274523 1136
4ad49000 1137 /* Figure out which controllers we need */
8e274523 1138
b2f8b02e 1139 if (c->cpu_accounting ||
66ebf6c0
TH
1140 cgroup_context_has_cpu_weight(c) ||
1141 cgroup_context_has_cpu_shares(c) ||
3a43da28 1142 c->cpu_quota_per_sec_usec != USEC_INFINITY)
efdb0237 1143 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
ecedd90f 1144
538b4852
TH
1145 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1146 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 1147
4ad49000 1148 if (c->memory_accounting ||
da4d897e
TH
1149 c->memory_limit != CGROUP_LIMIT_MAX ||
1150 cgroup_context_has_unified_memory_config(c))
efdb0237 1151 mask |= CGROUP_MASK_MEMORY;
8e274523 1152
a931ad47
LP
1153 if (c->device_allow ||
1154 c->device_policy != CGROUP_AUTO)
3905f127 1155 mask |= CGROUP_MASK_DEVICES;
4ad49000 1156
03a7b521 1157 if (c->tasks_accounting ||
8793fa25 1158 c->tasks_max != CGROUP_LIMIT_MAX)
03a7b521
LP
1159 mask |= CGROUP_MASK_PIDS;
1160
4ad49000 1161 return mask;
8e274523
LP
1162}
1163
efdb0237 1164CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 1165 CGroupContext *c;
8e274523 1166
efdb0237
LP
1167 /* Returns the mask of controllers the unit needs for itself */
1168
4ad49000
LP
1169 c = unit_get_cgroup_context(u);
1170 if (!c)
1171 return 0;
8e274523 1172
64e844e5 1173 return cgroup_context_get_mask(c) | unit_get_delegate_mask(u);
02638280
LP
1174}
1175
1176CGroupMask unit_get_delegate_mask(Unit *u) {
1177 CGroupContext *c;
1178
1179 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1180 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
19af675e 1181 *
02638280 1182 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
a931ad47 1183
1d9cc876 1184 if (!unit_cgroup_delegate(u))
02638280
LP
1185 return 0;
1186
1187 if (cg_all_unified() <= 0) {
a931ad47
LP
1188 ExecContext *e;
1189
1190 e = unit_get_exec_context(u);
02638280
LP
1191 if (e && !exec_context_maintains_privileges(e))
1192 return 0;
a931ad47
LP
1193 }
1194
1d9cc876 1195 assert_se(c = unit_get_cgroup_context(u));
02638280 1196 return c->delegate_controllers;
8e274523
LP
1197}
1198
efdb0237 1199CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 1200 assert(u);
bc432dc7 1201
02638280 1202 /* Returns the mask of controllers all of the unit's children require, merged */
efdb0237 1203
bc432dc7
LP
1204 if (u->cgroup_members_mask_valid)
1205 return u->cgroup_members_mask;
1206
64e844e5 1207 u->cgroup_members_mask = 0;
bc432dc7
LP
1208
1209 if (u->type == UNIT_SLICE) {
eef85c4a 1210 void *v;
bc432dc7
LP
1211 Unit *member;
1212 Iterator i;
1213
eef85c4a 1214 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
bc432dc7
LP
1215
1216 if (member == u)
1217 continue;
1218
d4fdc205 1219 if (UNIT_DEREF(member->slice) != u)
bc432dc7
LP
1220 continue;
1221
31604970 1222 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
bc432dc7
LP
1223 }
1224 }
1225
1226 u->cgroup_members_mask_valid = true;
6414b7c9 1227 return u->cgroup_members_mask;
246aa6dd
LP
1228}
1229
efdb0237 1230CGroupMask unit_get_siblings_mask(Unit *u) {
4ad49000 1231 assert(u);
246aa6dd 1232
efdb0237
LP
1233 /* Returns the mask of controllers all of the unit's siblings
1234 * require, i.e. the members mask of the unit's parent slice
1235 * if there is one. */
1236
bc432dc7 1237 if (UNIT_ISSET(u->slice))
637f421e 1238 return unit_get_members_mask(UNIT_DEREF(u->slice));
4ad49000 1239
64e844e5 1240 return unit_get_subtree_mask(u); /* we are the top-level slice */
246aa6dd
LP
1241}
1242
efdb0237
LP
1243CGroupMask unit_get_subtree_mask(Unit *u) {
1244
1245 /* Returns the mask of this subtree, meaning of the group
1246 * itself and its children. */
1247
1248 return unit_get_own_mask(u) | unit_get_members_mask(u);
1249}
1250
1251CGroupMask unit_get_target_mask(Unit *u) {
1252 CGroupMask mask;
1253
1254 /* This returns the cgroup mask of all controllers to enable
1255 * for a specific cgroup, i.e. everything it needs itself,
1256 * plus all that its children need, plus all that its siblings
1257 * need. This is primarily useful on the legacy cgroup
1258 * hierarchy, where we need to duplicate each cgroup in each
1259 * hierarchy that shall be enabled for it. */
6414b7c9 1260
efdb0237
LP
1261 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1262 mask &= u->manager->cgroup_supported;
1263
1264 return mask;
1265}
1266
1267CGroupMask unit_get_enable_mask(Unit *u) {
1268 CGroupMask mask;
1269
1270 /* This returns the cgroup mask of all controllers to enable
1271 * for the children of a specific cgroup. This is primarily
1272 * useful for the unified cgroup hierarchy, where each cgroup
1273 * controls which controllers are enabled for its children. */
1274
1275 mask = unit_get_members_mask(u);
6414b7c9
DS
1276 mask &= u->manager->cgroup_supported;
1277
1278 return mask;
1279}
1280
906c06f6
DM
1281bool unit_get_needs_bpf(Unit *u) {
1282 CGroupContext *c;
1283 Unit *p;
1284 assert(u);
1285
906c06f6
DM
1286 c = unit_get_cgroup_context(u);
1287 if (!c)
1288 return false;
1289
1290 if (c->ip_accounting ||
1291 c->ip_address_allow ||
1292 c->ip_address_deny)
1293 return true;
1294
1295 /* If any parent slice has an IP access list defined, it applies too */
1296 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1297 c = unit_get_cgroup_context(p);
1298 if (!c)
1299 return false;
1300
1301 if (c->ip_address_allow ||
1302 c->ip_address_deny)
1303 return true;
1304 }
1305
1306 return false;
1307}
1308
6414b7c9
DS
1309/* Recurse from a unit up through its containing slices, propagating
1310 * mask bits upward. A unit is also member of itself. */
bc432dc7 1311void unit_update_cgroup_members_masks(Unit *u) {
efdb0237 1312 CGroupMask m;
bc432dc7
LP
1313 bool more;
1314
1315 assert(u);
1316
1317 /* Calculate subtree mask */
efdb0237 1318 m = unit_get_subtree_mask(u);
bc432dc7
LP
1319
1320 /* See if anything changed from the previous invocation. If
1321 * not, we're done. */
1322 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1323 return;
1324
1325 more =
1326 u->cgroup_subtree_mask_valid &&
1327 ((m & ~u->cgroup_subtree_mask) != 0) &&
1328 ((~m & u->cgroup_subtree_mask) == 0);
1329
1330 u->cgroup_subtree_mask = m;
1331 u->cgroup_subtree_mask_valid = true;
1332
6414b7c9
DS
1333 if (UNIT_ISSET(u->slice)) {
1334 Unit *s = UNIT_DEREF(u->slice);
bc432dc7
LP
1335
1336 if (more)
1337 /* There's more set now than before. We
1338 * propagate the new mask to the parent's mask
1339 * (not caring if it actually was valid or
1340 * not). */
1341
1342 s->cgroup_members_mask |= m;
1343
1344 else
1345 /* There's less set now than before (or we
1346 * don't know), we need to recalculate
1347 * everything, so let's invalidate the
1348 * parent's members mask */
1349
1350 s->cgroup_members_mask_valid = false;
1351
1352 /* And now make sure that this change also hits our
1353 * grandparents */
1354 unit_update_cgroup_members_masks(s);
6414b7c9
DS
1355 }
1356}
1357
6592b975 1358const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
03b90d4b 1359
6592b975 1360 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
03b90d4b
LP
1361
1362 while (u) {
6592b975 1363
03b90d4b
LP
1364 if (u->cgroup_path &&
1365 u->cgroup_realized &&
d94a24ca 1366 FLAGS_SET(u->cgroup_realized_mask, mask))
03b90d4b
LP
1367 return u->cgroup_path;
1368
1369 u = UNIT_DEREF(u->slice);
1370 }
1371
1372 return NULL;
1373}
1374
6592b975
LP
1375static const char *migrate_callback(CGroupMask mask, void *userdata) {
1376 return unit_get_realized_cgroup_path(userdata, mask);
1377}
1378
efdb0237
LP
1379char *unit_default_cgroup_path(Unit *u) {
1380 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1381 int r;
1382
1383 assert(u);
1384
1385 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1386 return strdup(u->manager->cgroup_root);
1387
1388 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1389 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1390 if (r < 0)
1391 return NULL;
1392 }
1393
1394 escaped = cg_escape(u->id);
1395 if (!escaped)
1396 return NULL;
1397
1398 if (slice)
605405c6
ZJS
1399 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1400 escaped);
efdb0237 1401 else
605405c6 1402 return strjoin(u->manager->cgroup_root, "/", escaped);
efdb0237
LP
1403}
1404
1405int unit_set_cgroup_path(Unit *u, const char *path) {
1406 _cleanup_free_ char *p = NULL;
1407 int r;
1408
1409 assert(u);
1410
1411 if (path) {
1412 p = strdup(path);
1413 if (!p)
1414 return -ENOMEM;
1415 } else
1416 p = NULL;
1417
1418 if (streq_ptr(u->cgroup_path, p))
1419 return 0;
1420
1421 if (p) {
1422 r = hashmap_put(u->manager->cgroup_unit, p, u);
1423 if (r < 0)
1424 return r;
1425 }
1426
1427 unit_release_cgroup(u);
1428
ae2a15bc 1429 u->cgroup_path = TAKE_PTR(p);
efdb0237
LP
1430
1431 return 1;
1432}
1433
1434int unit_watch_cgroup(Unit *u) {
ab2c3861 1435 _cleanup_free_ char *events = NULL;
efdb0237
LP
1436 int r;
1437
1438 assert(u);
1439
1440 if (!u->cgroup_path)
1441 return 0;
1442
1443 if (u->cgroup_inotify_wd >= 0)
1444 return 0;
1445
1446 /* Only applies to the unified hierarchy */
c22800e4 1447 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1448 if (r < 0)
1449 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1450 if (r == 0)
efdb0237
LP
1451 return 0;
1452
1453 /* Don't watch the root slice, it's pointless. */
1454 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1455 return 0;
1456
1457 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1458 if (r < 0)
1459 return log_oom();
1460
ab2c3861 1461 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
efdb0237
LP
1462 if (r < 0)
1463 return log_oom();
1464
ab2c3861 1465 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
efdb0237
LP
1466 if (u->cgroup_inotify_wd < 0) {
1467
1468 if (errno == ENOENT) /* If the directory is already
1469 * gone we don't need to track
1470 * it, so this is not an error */
1471 return 0;
1472
1473 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1474 }
1475
1476 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1477 if (r < 0)
1478 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1479
1480 return 0;
1481}
1482
a4634b21
LP
1483int unit_pick_cgroup_path(Unit *u) {
1484 _cleanup_free_ char *path = NULL;
1485 int r;
1486
1487 assert(u);
1488
1489 if (u->cgroup_path)
1490 return 0;
1491
1492 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1493 return -EINVAL;
1494
1495 path = unit_default_cgroup_path(u);
1496 if (!path)
1497 return log_oom();
1498
1499 r = unit_set_cgroup_path(u, path);
1500 if (r == -EEXIST)
1501 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1502 if (r < 0)
1503 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1504
1505 return 0;
1506}
1507
efdb0237
LP
1508static int unit_create_cgroup(
1509 Unit *u,
1510 CGroupMask target_mask,
906c06f6
DM
1511 CGroupMask enable_mask,
1512 bool needs_bpf) {
efdb0237 1513
0cd385d3 1514 CGroupContext *c;
bc432dc7 1515 int r;
65be7e06 1516 bool created;
64747e2d 1517
4ad49000 1518 assert(u);
64747e2d 1519
0cd385d3
LP
1520 c = unit_get_cgroup_context(u);
1521 if (!c)
1522 return 0;
1523
a4634b21
LP
1524 /* Figure out our cgroup path */
1525 r = unit_pick_cgroup_path(u);
1526 if (r < 0)
1527 return r;
b58b8e11 1528
03b90d4b 1529 /* First, create our own group */
efdb0237 1530 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
23bbb0de 1531 if (r < 0)
efdb0237 1532 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
65be7e06 1533 created = !!r;
efdb0237
LP
1534
1535 /* Start watching it */
1536 (void) unit_watch_cgroup(u);
1537
65be7e06
ZJS
1538 /* Preserve enabled controllers in delegated units, adjust others. */
1539 if (created || !unit_cgroup_delegate(u)) {
1540
1541 /* Enable all controllers we need */
1542 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1543 if (r < 0)
1544 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m",
1545 u->cgroup_path);
1546 }
03b90d4b
LP
1547
1548 /* Keep track that this is now realized */
4ad49000 1549 u->cgroup_realized = true;
efdb0237 1550 u->cgroup_realized_mask = target_mask;
ccf78df1 1551 u->cgroup_enabled_mask = enable_mask;
906c06f6 1552 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
4ad49000 1553
1d9cc876 1554 if (u->type != UNIT_SLICE && !unit_cgroup_delegate(u)) {
0cd385d3
LP
1555
1556 /* Then, possibly move things over, but not if
1557 * subgroups may contain processes, which is the case
1558 * for slice and delegation units. */
1559 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1560 if (r < 0)
efdb0237 1561 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
0cd385d3 1562 }
03b90d4b 1563
64747e2d
LP
1564 return 0;
1565}
1566
6592b975
LP
1567static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
1568 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1569 char *pp;
7b3fd631 1570 int r;
6592b975 1571
7b3fd631
LP
1572 assert(u);
1573
6592b975
LP
1574 if (MANAGER_IS_SYSTEM(u->manager))
1575 return -EINVAL;
1576
1577 if (!u->manager->system_bus)
1578 return -EIO;
1579
1580 if (!u->cgroup_path)
1581 return -EINVAL;
1582
1583 /* Determine this unit's cgroup path relative to our cgroup root */
1584 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
1585 if (!pp)
1586 return -EINVAL;
1587
1588 pp = strjoina("/", pp, suffix_path);
858d36c1 1589 path_simplify(pp, false);
6592b975
LP
1590
1591 r = sd_bus_call_method(u->manager->system_bus,
1592 "org.freedesktop.systemd1",
1593 "/org/freedesktop/systemd1",
1594 "org.freedesktop.systemd1.Manager",
1595 "AttachProcessesToUnit",
1596 &error, NULL,
1597 "ssau",
1598 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
7b3fd631 1599 if (r < 0)
6592b975
LP
1600 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
1601
1602 return 0;
1603}
1604
1605int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
1606 CGroupMask delegated_mask;
1607 const char *p;
1608 Iterator i;
1609 void *pidp;
1610 int r, q;
1611
1612 assert(u);
1613
1614 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1615 return -EINVAL;
1616
1617 if (set_isempty(pids))
1618 return 0;
7b3fd631 1619
6592b975 1620 r = unit_realize_cgroup(u);
7b3fd631
LP
1621 if (r < 0)
1622 return r;
1623
6592b975
LP
1624 if (isempty(suffix_path))
1625 p = u->cgroup_path;
1626 else
1627 p = strjoina(u->cgroup_path, "/", suffix_path);
1628
1629 delegated_mask = unit_get_delegate_mask(u);
1630
1631 r = 0;
1632 SET_FOREACH(pidp, pids, i) {
1633 pid_t pid = PTR_TO_PID(pidp);
1634 CGroupController c;
1635
1636 /* First, attach the PID to the main cgroup hierarchy */
1637 q = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
1638 if (q < 0) {
1639 log_unit_debug_errno(u, q, "Couldn't move process " PID_FMT " to requested cgroup '%s': %m", pid, p);
1640
1641 if (MANAGER_IS_USER(u->manager) && IN_SET(q, -EPERM, -EACCES)) {
1642 int z;
1643
1644 /* If we are in a user instance, and we can't move the process ourselves due to
1645 * permission problems, let's ask the system instance about it instead. Since it's more
1646 * privileged it might be able to move the process across the leaves of a subtree who's
1647 * top node is not owned by us. */
1648
1649 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
1650 if (z < 0)
1651 log_unit_debug_errno(u, z, "Couldn't move process " PID_FMT " to requested cgroup '%s' via the system bus either: %m", pid, p);
1652 else
1653 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1654 }
1655
1656 if (r >= 0)
1657 r = q; /* Remember first error */
1658
1659 continue;
1660 }
1661
1662 q = cg_all_unified();
1663 if (q < 0)
1664 return q;
1665 if (q > 0)
1666 continue;
1667
1668 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1669 * innermost realized one */
1670
1671 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1672 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1673 const char *realized;
1674
1675 if (!(u->manager->cgroup_supported & bit))
1676 continue;
1677
1678 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1679 if (delegated_mask & u->cgroup_realized_mask & bit) {
1680 q = cg_attach(cgroup_controller_to_string(c), p, pid);
1681 if (q >= 0)
1682 continue; /* Success! */
1683
1684 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1685 pid, p, cgroup_controller_to_string(c));
1686 }
1687
1688 /* So this controller is either not delegate or realized, or something else weird happened. In
1689 * that case let's attach the PID at least to the closest cgroup up the tree that is
1690 * realized. */
1691 realized = unit_get_realized_cgroup_path(u, bit);
1692 if (!realized)
1693 continue; /* Not even realized in the root slice? Then let's not bother */
1694
1695 q = cg_attach(cgroup_controller_to_string(c), realized, pid);
1696 if (q < 0)
1697 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
1698 pid, realized, cgroup_controller_to_string(c));
1699 }
1700 }
1701
1702 return r;
7b3fd631
LP
1703}
1704
4b58153d
LP
1705static void cgroup_xattr_apply(Unit *u) {
1706 char ids[SD_ID128_STRING_MAX];
1707 int r;
1708
1709 assert(u);
1710
1711 if (!MANAGER_IS_SYSTEM(u->manager))
1712 return;
1713
1714 if (sd_id128_is_null(u->invocation_id))
1715 return;
1716
1717 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1718 "trusted.invocation_id",
1719 sd_id128_to_string(u->invocation_id, ids), 32,
1720 0);
1721 if (r < 0)
0fb84499 1722 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
4b58153d
LP
1723}
1724
906c06f6
DM
1725static bool unit_has_mask_realized(
1726 Unit *u,
1727 CGroupMask target_mask,
1728 CGroupMask enable_mask,
1729 bool needs_bpf) {
1730
bc432dc7
LP
1731 assert(u);
1732
906c06f6
DM
1733 return u->cgroup_realized &&
1734 u->cgroup_realized_mask == target_mask &&
1735 u->cgroup_enabled_mask == enable_mask &&
1736 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1737 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
6414b7c9
DS
1738}
1739
2aa57a65
LP
1740static void unit_add_to_cgroup_realize_queue(Unit *u) {
1741 assert(u);
1742
1743 if (u->in_cgroup_realize_queue)
1744 return;
1745
1746 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1747 u->in_cgroup_realize_queue = true;
1748}
1749
1750static void unit_remove_from_cgroup_realize_queue(Unit *u) {
1751 assert(u);
1752
1753 if (!u->in_cgroup_realize_queue)
1754 return;
1755
1756 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1757 u->in_cgroup_realize_queue = false;
1758}
1759
6414b7c9
DS
1760/* Check if necessary controllers and attributes for a unit are in place.
1761 *
1762 * If so, do nothing.
1763 * If not, create paths, move processes over, and set attributes.
1764 *
1765 * Returns 0 on success and < 0 on failure. */
db785129 1766static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 1767 CGroupMask target_mask, enable_mask;
906c06f6 1768 bool needs_bpf, apply_bpf;
6414b7c9 1769 int r;
64747e2d 1770
4ad49000 1771 assert(u);
64747e2d 1772
2aa57a65 1773 unit_remove_from_cgroup_realize_queue(u);
64747e2d 1774
efdb0237 1775 target_mask = unit_get_target_mask(u);
ccf78df1 1776 enable_mask = unit_get_enable_mask(u);
906c06f6 1777 needs_bpf = unit_get_needs_bpf(u);
ccf78df1 1778
906c06f6 1779 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
0a1eb06d 1780 return 0;
64747e2d 1781
906c06f6
DM
1782 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1783 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1784 * this will trickle down properly to cgroupfs. */
1785 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1786
4ad49000 1787 /* First, realize parents */
6414b7c9 1788 if (UNIT_ISSET(u->slice)) {
db785129 1789 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
6414b7c9
DS
1790 if (r < 0)
1791 return r;
1792 }
4ad49000
LP
1793
1794 /* And then do the real work */
906c06f6 1795 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
6414b7c9
DS
1796 if (r < 0)
1797 return r;
1798
1799 /* Finally, apply the necessary attributes. */
906c06f6 1800 cgroup_context_apply(u, target_mask, apply_bpf, state);
4b58153d 1801 cgroup_xattr_apply(u);
6414b7c9
DS
1802
1803 return 0;
64747e2d
LP
1804}
1805
91a6073e 1806unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
db785129 1807 ManagerState state;
4ad49000 1808 unsigned n = 0;
db785129 1809 Unit *i;
6414b7c9 1810 int r;
ecedd90f 1811
91a6073e
LP
1812 assert(m);
1813
db785129
LP
1814 state = manager_state(m);
1815
91a6073e
LP
1816 while ((i = m->cgroup_realize_queue)) {
1817 assert(i->in_cgroup_realize_queue);
ecedd90f 1818
2aa57a65
LP
1819 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
1820 /* Maybe things changed, and the unit is not actually active anymore? */
1821 unit_remove_from_cgroup_realize_queue(i);
1822 continue;
1823 }
1824
db785129 1825 r = unit_realize_cgroup_now(i, state);
6414b7c9 1826 if (r < 0)
efdb0237 1827 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 1828
4ad49000
LP
1829 n++;
1830 }
ecedd90f 1831
4ad49000 1832 return n;
8e274523
LP
1833}
1834
91a6073e 1835static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
4ad49000 1836 Unit *slice;
ca949c9d 1837
4ad49000
LP
1838 /* This adds the siblings of the specified unit and the
1839 * siblings of all parent units to the cgroup queue. (But
1840 * neither the specified unit itself nor the parents.) */
1841
1842 while ((slice = UNIT_DEREF(u->slice))) {
1843 Iterator i;
1844 Unit *m;
eef85c4a 1845 void *v;
8f53a7b8 1846
eef85c4a 1847 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
4ad49000
LP
1848 if (m == u)
1849 continue;
8e274523 1850
6414b7c9
DS
1851 /* Skip units that have a dependency on the slice
1852 * but aren't actually in it. */
4ad49000 1853 if (UNIT_DEREF(m->slice) != slice)
50159e6a 1854 continue;
8e274523 1855
6414b7c9
DS
1856 /* No point in doing cgroup application for units
1857 * without active processes. */
1858 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1859 continue;
1860
1861 /* If the unit doesn't need any new controllers
1862 * and has current ones realized, it doesn't need
1863 * any changes. */
906c06f6
DM
1864 if (unit_has_mask_realized(m,
1865 unit_get_target_mask(m),
1866 unit_get_enable_mask(m),
1867 unit_get_needs_bpf(m)))
6414b7c9
DS
1868 continue;
1869
91a6073e 1870 unit_add_to_cgroup_realize_queue(m);
50159e6a
LP
1871 }
1872
4ad49000 1873 u = slice;
8e274523 1874 }
4ad49000
LP
1875}
1876
0a1eb06d 1877int unit_realize_cgroup(Unit *u) {
4ad49000
LP
1878 assert(u);
1879
35b7ff80 1880 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 1881 return 0;
8e274523 1882
4ad49000
LP
1883 /* So, here's the deal: when realizing the cgroups for this
1884 * unit, we need to first create all parents, but there's more
1885 * actually: for the weight-based controllers we also need to
1886 * make sure that all our siblings (i.e. units that are in the
73e231ab 1887 * same slice as we are) have cgroups, too. Otherwise, things
4ad49000
LP
1888 * would become very uneven as each of their processes would
1889 * get as much resources as all our group together. This call
1890 * will synchronously create the parent cgroups, but will
1891 * defer work on the siblings to the next event loop
1892 * iteration. */
ca949c9d 1893
4ad49000 1894 /* Add all sibling slices to the cgroup queue. */
91a6073e 1895 unit_add_siblings_to_cgroup_realize_queue(u);
4ad49000 1896
6414b7c9 1897 /* And realize this one now (and apply the values) */
db785129 1898 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
1899}
1900
efdb0237
LP
1901void unit_release_cgroup(Unit *u) {
1902 assert(u);
1903
1904 /* Forgets all cgroup details for this cgroup */
1905
1906 if (u->cgroup_path) {
1907 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1908 u->cgroup_path = mfree(u->cgroup_path);
1909 }
1910
1911 if (u->cgroup_inotify_wd >= 0) {
1912 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1913 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1914
1915 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1916 u->cgroup_inotify_wd = -1;
1917 }
1918}
1919
1920void unit_prune_cgroup(Unit *u) {
8e274523 1921 int r;
efdb0237 1922 bool is_root_slice;
8e274523 1923
4ad49000 1924 assert(u);
8e274523 1925
efdb0237
LP
1926 /* Removes the cgroup, if empty and possible, and stops watching it. */
1927
4ad49000
LP
1928 if (!u->cgroup_path)
1929 return;
8e274523 1930
fe700f46
LP
1931 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1932
efdb0237
LP
1933 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1934
1935 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
dab5bf85 1936 if (r < 0) {
f29ff115 1937 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
dab5bf85
RL
1938 return;
1939 }
8e274523 1940
efdb0237
LP
1941 if (is_root_slice)
1942 return;
1943
1944 unit_release_cgroup(u);
0a1eb06d 1945
4ad49000 1946 u->cgroup_realized = false;
bc432dc7 1947 u->cgroup_realized_mask = 0;
ccf78df1 1948 u->cgroup_enabled_mask = 0;
8e274523
LP
1949}
1950
efdb0237 1951int unit_search_main_pid(Unit *u, pid_t *ret) {
4ad49000
LP
1952 _cleanup_fclose_ FILE *f = NULL;
1953 pid_t pid = 0, npid, mypid;
efdb0237 1954 int r;
4ad49000
LP
1955
1956 assert(u);
efdb0237 1957 assert(ret);
4ad49000
LP
1958
1959 if (!u->cgroup_path)
efdb0237 1960 return -ENXIO;
4ad49000 1961
efdb0237
LP
1962 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1963 if (r < 0)
1964 return r;
4ad49000 1965
df0ff127 1966 mypid = getpid_cached();
4ad49000
LP
1967 while (cg_read_pid(f, &npid) > 0) {
1968 pid_t ppid;
1969
1970 if (npid == pid)
1971 continue;
8e274523 1972
4ad49000 1973 /* Ignore processes that aren't our kids */
6bc73acb 1974 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
4ad49000 1975 continue;
8e274523 1976
efdb0237 1977 if (pid != 0)
4ad49000
LP
1978 /* Dang, there's more than one daemonized PID
1979 in this group, so we don't know what process
1980 is the main process. */
efdb0237
LP
1981
1982 return -ENODATA;
8e274523 1983
4ad49000 1984 pid = npid;
8e274523
LP
1985 }
1986
efdb0237
LP
1987 *ret = pid;
1988 return 0;
1989}
1990
1991static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 1992 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
1993 _cleanup_fclose_ FILE *f = NULL;
1994 int ret = 0, r;
1995
1996 assert(u);
1997 assert(path);
1998
1999 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
2000 if (r < 0)
2001 ret = r;
2002 else {
2003 pid_t pid;
2004
2005 while ((r = cg_read_pid(f, &pid)) > 0) {
2006 r = unit_watch_pid(u, pid);
2007 if (r < 0 && ret >= 0)
2008 ret = r;
2009 }
2010
2011 if (r < 0 && ret >= 0)
2012 ret = r;
2013 }
2014
2015 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
2016 if (r < 0) {
2017 if (ret >= 0)
2018 ret = r;
2019 } else {
2020 char *fn;
2021
2022 while ((r = cg_read_subgroup(d, &fn)) > 0) {
2023 _cleanup_free_ char *p = NULL;
2024
605405c6 2025 p = strjoin(path, "/", fn);
efdb0237
LP
2026 free(fn);
2027
2028 if (!p)
2029 return -ENOMEM;
2030
2031 r = unit_watch_pids_in_path(u, p);
2032 if (r < 0 && ret >= 0)
2033 ret = r;
2034 }
2035
2036 if (r < 0 && ret >= 0)
2037 ret = r;
2038 }
2039
2040 return ret;
2041}
2042
11aef522
LP
2043int unit_synthesize_cgroup_empty_event(Unit *u) {
2044 int r;
2045
2046 assert(u);
2047
2048 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2049 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2050 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2051
2052 if (!u->cgroup_path)
2053 return -ENOENT;
2054
2055 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2056 if (r < 0)
2057 return r;
2058 if (r > 0) /* On unified we have reliable notifications, and don't need this */
2059 return 0;
2060
2061 if (!set_isempty(u->pids))
2062 return 0;
2063
2064 unit_add_to_cgroup_empty_queue(u);
2065 return 0;
2066}
2067
efdb0237 2068int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
2069 int r;
2070
efdb0237
LP
2071 assert(u);
2072
2073 /* Adds all PIDs from our cgroup to the set of PIDs we
2074 * watch. This is a fallback logic for cases where we do not
2075 * get reliable cgroup empty notifications: we try to use
2076 * SIGCHLD as replacement. */
2077
2078 if (!u->cgroup_path)
2079 return -ENOENT;
2080
c22800e4 2081 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2082 if (r < 0)
2083 return r;
2084 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
2085 return 0;
2086
2087 return unit_watch_pids_in_path(u, u->cgroup_path);
2088}
2089
09e24654
LP
2090static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
2091 Manager *m = userdata;
2092 Unit *u;
efdb0237
LP
2093 int r;
2094
09e24654
LP
2095 assert(s);
2096 assert(m);
efdb0237 2097
09e24654
LP
2098 u = m->cgroup_empty_queue;
2099 if (!u)
efdb0237
LP
2100 return 0;
2101
09e24654
LP
2102 assert(u->in_cgroup_empty_queue);
2103 u->in_cgroup_empty_queue = false;
2104 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
2105
2106 if (m->cgroup_empty_queue) {
2107 /* More stuff queued, let's make sure we remain enabled */
2108 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2109 if (r < 0)
19a691a9 2110 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
09e24654 2111 }
efdb0237
LP
2112
2113 unit_add_to_gc_queue(u);
2114
2115 if (UNIT_VTABLE(u)->notify_cgroup_empty)
2116 UNIT_VTABLE(u)->notify_cgroup_empty(u);
2117
2118 return 0;
2119}
2120
09e24654
LP
2121void unit_add_to_cgroup_empty_queue(Unit *u) {
2122 int r;
2123
2124 assert(u);
2125
2126 /* Note that there are four different ways how cgroup empty events reach us:
2127 *
2128 * 1. On the unified hierarchy we get an inotify event on the cgroup
2129 *
2130 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2131 *
2132 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2133 *
2134 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2135 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2136 *
2137 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2138 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2139 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2140 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2141 * case for scope units). */
2142
2143 if (u->in_cgroup_empty_queue)
2144 return;
2145
2146 /* Let's verify that the cgroup is really empty */
2147 if (!u->cgroup_path)
2148 return;
2149 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2150 if (r < 0) {
2151 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
2152 return;
2153 }
2154 if (r == 0)
2155 return;
2156
2157 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
2158 u->in_cgroup_empty_queue = true;
2159
2160 /* Trigger the defer event */
2161 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
2162 if (r < 0)
2163 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
2164}
2165
efdb0237
LP
2166static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2167 Manager *m = userdata;
2168
2169 assert(s);
2170 assert(fd >= 0);
2171 assert(m);
2172
2173 for (;;) {
2174 union inotify_event_buffer buffer;
2175 struct inotify_event *e;
2176 ssize_t l;
2177
2178 l = read(fd, &buffer, sizeof(buffer));
2179 if (l < 0) {
47249640 2180 if (IN_SET(errno, EINTR, EAGAIN))
efdb0237
LP
2181 return 0;
2182
2183 return log_error_errno(errno, "Failed to read control group inotify events: %m");
2184 }
2185
2186 FOREACH_INOTIFY_EVENT(e, buffer, l) {
2187 Unit *u;
2188
2189 if (e->wd < 0)
2190 /* Queue overflow has no watch descriptor */
2191 continue;
2192
2193 if (e->mask & IN_IGNORED)
2194 /* The watch was just removed */
2195 continue;
2196
2197 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
2198 if (!u) /* Not that inotify might deliver
2199 * events for a watch even after it
2200 * was removed, because it was queued
2201 * before the removal. Let's ignore
2202 * this here safely. */
2203 continue;
2204
09e24654 2205 unit_add_to_cgroup_empty_queue(u);
efdb0237
LP
2206 }
2207 }
8e274523
LP
2208}
2209
8e274523 2210int manager_setup_cgroup(Manager *m) {
9444b1f2 2211 _cleanup_free_ char *path = NULL;
10bd3e2e 2212 const char *scope_path;
efdb0237 2213 CGroupController c;
b4cccbc1 2214 int r, all_unified;
efdb0237 2215 char *e;
8e274523
LP
2216
2217 assert(m);
2218
35d2e7ec 2219 /* 1. Determine hierarchy */
efdb0237 2220 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 2221 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
2222 if (r < 0)
2223 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 2224
efdb0237
LP
2225 /* Chop off the init scope, if we are already located in it */
2226 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 2227
efdb0237
LP
2228 /* LEGACY: Also chop off the system slice if we are in
2229 * it. This is to support live upgrades from older systemd
2230 * versions where PID 1 was moved there. Also see
2231 * cg_get_root_path(). */
463d0d15 2232 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 2233 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 2234 if (!e)
efdb0237 2235 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 2236 }
efdb0237
LP
2237 if (e)
2238 *e = 0;
7ccfb64a 2239
7546145e
LP
2240 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2241 * easily prepend it everywhere. */
2242 delete_trailing_chars(m->cgroup_root, "/");
8e274523 2243
35d2e7ec 2244 /* 2. Show data */
9444b1f2 2245 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
2246 if (r < 0)
2247 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 2248
415fc41c
TH
2249 r = cg_unified_flush();
2250 if (r < 0)
2251 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 2252
b4cccbc1 2253 all_unified = cg_all_unified();
d4c819ed
ZJS
2254 if (all_unified < 0)
2255 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
2256 if (all_unified > 0)
efdb0237 2257 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 2258 else {
c22800e4 2259 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2260 if (r < 0)
2261 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2262 if (r > 0)
2263 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2264 else
2265 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2266 }
efdb0237 2267
09e24654
LP
2268 /* 3. Allocate cgroup empty defer event source */
2269 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2270 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2271 if (r < 0)
2272 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2273
2274 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2275 if (r < 0)
2276 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2277
2278 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2279 if (r < 0)
2280 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2281
2282 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2283
2284 /* 4. Install notifier inotify object, or agent */
10bd3e2e 2285 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 2286
09e24654 2287 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
efdb0237 2288
10bd3e2e
LP
2289 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2290 safe_close(m->cgroup_inotify_fd);
efdb0237 2291
10bd3e2e
LP
2292 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2293 if (m->cgroup_inotify_fd < 0)
2294 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 2295
10bd3e2e
LP
2296 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2297 if (r < 0)
2298 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 2299
10bd3e2e
LP
2300 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2301 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
09e24654 2302 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
10bd3e2e
LP
2303 if (r < 0)
2304 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 2305
10bd3e2e 2306 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 2307
10bd3e2e 2308 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
efdb0237 2309
10bd3e2e
LP
2310 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2311 * since it does not generate events when control groups with children run empty. */
8e274523 2312
10bd3e2e 2313 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
23bbb0de 2314 if (r < 0)
10bd3e2e
LP
2315 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2316 else if (r > 0)
2317 log_debug("Installed release agent.");
2318 else if (r == 0)
2319 log_debug("Release agent already installed.");
2320 }
efdb0237 2321
09e24654 2322 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
10bd3e2e
LP
2323 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2324 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
aa77e234
MS
2325 if (r >= 0) {
2326 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2327 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2328 if (r < 0)
2329 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
c6c18be3 2330
aa77e234
MS
2331 /* 6. And pin it, so that it cannot be unmounted */
2332 safe_close(m->pin_cgroupfs_fd);
2333 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2334 if (m->pin_cgroupfs_fd < 0)
2335 return log_error_errno(errno, "Failed to open pin file: %m");
0d8c31ff 2336
b4dec49f 2337 } else if (!m->test_run_flags)
aa77e234 2338 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
10bd3e2e 2339
09e24654 2340 /* 7. Always enable hierarchical support if it exists... */
10bd3e2e
LP
2341 if (!all_unified && m->test_run_flags == 0)
2342 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 2343
09e24654 2344 /* 8. Figure out which controllers are supported, and log about it */
efdb0237
LP
2345 r = cg_mask_supported(&m->cgroup_supported);
2346 if (r < 0)
2347 return log_error_errno(r, "Failed to determine supported controllers: %m");
efdb0237 2348 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
eee0a1e4 2349 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 2350
a32360f1 2351 return 0;
8e274523
LP
2352}
2353
c6c18be3 2354void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
2355 assert(m);
2356
9444b1f2
LP
2357 /* We can't really delete the group, since we are in it. But
2358 * let's trim it. */
f6c63f6f 2359 if (delete && m->cgroup_root && m->test_run_flags != MANAGER_TEST_RUN_MINIMAL)
efdb0237
LP
2360 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2361
09e24654
LP
2362 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2363
efdb0237
LP
2364 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2365
2366 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2367 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 2368
03e334a1 2369 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 2370
efdb0237 2371 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
2372}
2373
4ad49000 2374Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 2375 char *p;
4ad49000 2376 Unit *u;
acb14d31
LP
2377
2378 assert(m);
2379 assert(cgroup);
acb14d31 2380
4ad49000
LP
2381 u = hashmap_get(m->cgroup_unit, cgroup);
2382 if (u)
2383 return u;
acb14d31 2384
8e70580b 2385 p = strdupa(cgroup);
acb14d31
LP
2386 for (;;) {
2387 char *e;
2388
2389 e = strrchr(p, '/');
efdb0237
LP
2390 if (!e || e == p)
2391 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
2392
2393 *e = 0;
2394
4ad49000
LP
2395 u = hashmap_get(m->cgroup_unit, p);
2396 if (u)
2397 return u;
acb14d31
LP
2398 }
2399}
2400
b3ac818b 2401Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
4ad49000 2402 _cleanup_free_ char *cgroup = NULL;
8e274523 2403
8c47c732
LP
2404 assert(m);
2405
62a76913 2406 if (!pid_is_valid(pid))
b3ac818b
LP
2407 return NULL;
2408
62a76913 2409 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
b3ac818b
LP
2410 return NULL;
2411
2412 return manager_get_unit_by_cgroup(m, cgroup);
2413}
2414
2415Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
62a76913 2416 Unit *u, **array;
b3ac818b
LP
2417
2418 assert(m);
2419
62a76913
LP
2420 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2421 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2422 * relevant one as children of the process will be assigned to that one, too, before all else. */
2423
2424 if (!pid_is_valid(pid))
8c47c732
LP
2425 return NULL;
2426
2ca9d979 2427 if (pid == getpid_cached())
efdb0237
LP
2428 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2429
62a76913 2430 u = manager_get_unit_by_pid_cgroup(m, pid);
5fe8876b
LP
2431 if (u)
2432 return u;
2433
62a76913 2434 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
5fe8876b
LP
2435 if (u)
2436 return u;
2437
62a76913
LP
2438 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
2439 if (array)
2440 return array[0];
2441
2442 return NULL;
6dde1f33 2443}
4fbf50b3 2444
4ad49000
LP
2445int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2446 Unit *u;
4fbf50b3 2447
4ad49000
LP
2448 assert(m);
2449 assert(cgroup);
4fbf50b3 2450
09e24654
LP
2451 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2452 * or from the --system instance */
2453
d8fdc620
LP
2454 log_debug("Got cgroup empty notification for: %s", cgroup);
2455
4ad49000 2456 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
2457 if (!u)
2458 return 0;
b56c28c3 2459
09e24654
LP
2460 unit_add_to_cgroup_empty_queue(u);
2461 return 1;
5ad096b3
LP
2462}
2463
2464int unit_get_memory_current(Unit *u, uint64_t *ret) {
2465 _cleanup_free_ char *v = NULL;
2466 int r;
2467
2468 assert(u);
2469 assert(ret);
2470
2e4025c0 2471 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
cf3b4be1
LP
2472 return -ENODATA;
2473
5ad096b3
LP
2474 if (!u->cgroup_path)
2475 return -ENODATA;
2476
1f73aa00
LP
2477 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2478 if (unit_has_root_cgroup(u))
2479 return procfs_memory_get_current(ret);
2480
efdb0237 2481 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
2482 return -ENODATA;
2483
b4cccbc1
LP
2484 r = cg_all_unified();
2485 if (r < 0)
2486 return r;
2487 if (r > 0)
efdb0237 2488 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
b4cccbc1
LP
2489 else
2490 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
5ad096b3
LP
2491 if (r == -ENOENT)
2492 return -ENODATA;
2493 if (r < 0)
2494 return r;
2495
2496 return safe_atou64(v, ret);
2497}
2498
03a7b521
LP
2499int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2500 _cleanup_free_ char *v = NULL;
2501 int r;
2502
2503 assert(u);
2504 assert(ret);
2505
2e4025c0 2506 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
cf3b4be1
LP
2507 return -ENODATA;
2508
03a7b521
LP
2509 if (!u->cgroup_path)
2510 return -ENODATA;
2511
c36a69f4
LP
2512 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2513 if (unit_has_root_cgroup(u))
2514 return procfs_tasks_get_current(ret);
2515
1f73aa00
LP
2516 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2517 return -ENODATA;
2518
03a7b521
LP
2519 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2520 if (r == -ENOENT)
2521 return -ENODATA;
2522 if (r < 0)
2523 return r;
2524
2525 return safe_atou64(v, ret);
2526}
2527
5ad096b3
LP
2528static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2529 _cleanup_free_ char *v = NULL;
2530 uint64_t ns;
2531 int r;
2532
2533 assert(u);
2534 assert(ret);
2535
2536 if (!u->cgroup_path)
2537 return -ENODATA;
2538
1f73aa00
LP
2539 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2540 if (unit_has_root_cgroup(u))
2541 return procfs_cpu_get_usage(ret);
2542
b4cccbc1
LP
2543 r = cg_all_unified();
2544 if (r < 0)
2545 return r;
2546 if (r > 0) {
66ebf6c0
TH
2547 _cleanup_free_ char *val = NULL;
2548 uint64_t us;
5ad096b3 2549
66ebf6c0
TH
2550 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2551 return -ENODATA;
5ad096b3 2552
b734a4ff 2553 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
66ebf6c0
TH
2554 if (r < 0)
2555 return r;
b734a4ff
LP
2556 if (IN_SET(r, -ENOENT, -ENXIO))
2557 return -ENODATA;
66ebf6c0
TH
2558
2559 r = safe_atou64(val, &us);
2560 if (r < 0)
2561 return r;
2562
2563 ns = us * NSEC_PER_USEC;
2564 } else {
2565 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2566 return -ENODATA;
2567
2568 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2569 if (r == -ENOENT)
2570 return -ENODATA;
2571 if (r < 0)
2572 return r;
2573
2574 r = safe_atou64(v, &ns);
2575 if (r < 0)
2576 return r;
2577 }
5ad096b3
LP
2578
2579 *ret = ns;
2580 return 0;
2581}
2582
2583int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2584 nsec_t ns;
2585 int r;
2586
fe700f46
LP
2587 assert(u);
2588
2589 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2590 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2591 * call this function with a NULL return value. */
2592
2e4025c0 2593 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
cf3b4be1
LP
2594 return -ENODATA;
2595
5ad096b3 2596 r = unit_get_cpu_usage_raw(u, &ns);
fe700f46
LP
2597 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2598 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2599 * cached value. */
2600
2601 if (ret)
2602 *ret = u->cpu_usage_last;
2603 return 0;
2604 }
5ad096b3
LP
2605 if (r < 0)
2606 return r;
2607
66ebf6c0
TH
2608 if (ns > u->cpu_usage_base)
2609 ns -= u->cpu_usage_base;
5ad096b3
LP
2610 else
2611 ns = 0;
2612
fe700f46
LP
2613 u->cpu_usage_last = ns;
2614 if (ret)
2615 *ret = ns;
2616
5ad096b3
LP
2617 return 0;
2618}
2619
906c06f6
DM
2620int unit_get_ip_accounting(
2621 Unit *u,
2622 CGroupIPAccountingMetric metric,
2623 uint64_t *ret) {
2624
6b659ed8 2625 uint64_t value;
906c06f6
DM
2626 int fd, r;
2627
2628 assert(u);
2629 assert(metric >= 0);
2630 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2631 assert(ret);
2632
2e4025c0 2633 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
cf3b4be1
LP
2634 return -ENODATA;
2635
906c06f6
DM
2636 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2637 u->ip_accounting_ingress_map_fd :
2638 u->ip_accounting_egress_map_fd;
906c06f6
DM
2639 if (fd < 0)
2640 return -ENODATA;
2641
2642 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
6b659ed8 2643 r = bpf_firewall_read_accounting(fd, &value, NULL);
906c06f6 2644 else
6b659ed8
LP
2645 r = bpf_firewall_read_accounting(fd, NULL, &value);
2646 if (r < 0)
2647 return r;
2648
2649 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2650 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2651 * ip_accounting_extra[] field, and add them in here transparently. */
2652
2653 *ret = value + u->ip_accounting_extra[metric];
906c06f6
DM
2654
2655 return r;
2656}
2657
2658int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
2659 nsec_t ns;
2660 int r;
2661
2662 assert(u);
2663
fe700f46
LP
2664 u->cpu_usage_last = NSEC_INFINITY;
2665
5ad096b3
LP
2666 r = unit_get_cpu_usage_raw(u, &ns);
2667 if (r < 0) {
66ebf6c0 2668 u->cpu_usage_base = 0;
5ad096b3 2669 return r;
b56c28c3 2670 }
2633eb83 2671
66ebf6c0 2672 u->cpu_usage_base = ns;
4ad49000 2673 return 0;
4fbf50b3
LP
2674}
2675
906c06f6
DM
2676int unit_reset_ip_accounting(Unit *u) {
2677 int r = 0, q = 0;
2678
2679 assert(u);
2680
2681 if (u->ip_accounting_ingress_map_fd >= 0)
2682 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2683
2684 if (u->ip_accounting_egress_map_fd >= 0)
2685 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2686
6b659ed8
LP
2687 zero(u->ip_accounting_extra);
2688
906c06f6
DM
2689 return r < 0 ? r : q;
2690}
2691
e7ab4d1a
LP
2692void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2693 assert(u);
2694
2695 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2696 return;
2697
2698 if (m == 0)
2699 return;
2700
538b4852
TH
2701 /* always invalidate compat pairs together */
2702 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2703 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2704
7cce4fb7
LP
2705 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2706 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2707
60c728ad 2708 if ((u->cgroup_realized_mask & m) == 0) /* NOP? */
e7ab4d1a
LP
2709 return;
2710
2711 u->cgroup_realized_mask &= ~m;
91a6073e 2712 unit_add_to_cgroup_realize_queue(u);
e7ab4d1a
LP
2713}
2714
906c06f6
DM
2715void unit_invalidate_cgroup_bpf(Unit *u) {
2716 assert(u);
2717
2718 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2719 return;
2720
60c728ad 2721 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED) /* NOP? */
906c06f6
DM
2722 return;
2723
2724 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
91a6073e 2725 unit_add_to_cgroup_realize_queue(u);
906c06f6
DM
2726
2727 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2728 * list of our children includes our own. */
2729 if (u->type == UNIT_SLICE) {
2730 Unit *member;
2731 Iterator i;
eef85c4a 2732 void *v;
906c06f6 2733
eef85c4a 2734 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
906c06f6
DM
2735 if (member == u)
2736 continue;
2737
2738 if (UNIT_DEREF(member->slice) != u)
2739 continue;
2740
2741 unit_invalidate_cgroup_bpf(member);
2742 }
2743 }
2744}
2745
1d9cc876
LP
2746bool unit_cgroup_delegate(Unit *u) {
2747 CGroupContext *c;
2748
2749 assert(u);
2750
2751 if (!UNIT_VTABLE(u)->can_delegate)
2752 return false;
2753
2754 c = unit_get_cgroup_context(u);
2755 if (!c)
2756 return false;
2757
2758 return c->delegate;
2759}
2760
e7ab4d1a
LP
2761void manager_invalidate_startup_units(Manager *m) {
2762 Iterator i;
2763 Unit *u;
2764
2765 assert(m);
2766
2767 SET_FOREACH(u, m->startup_units, i)
13c31542 2768 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
e7ab4d1a
LP
2769}
2770
4ad49000
LP
2771static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2772 [CGROUP_AUTO] = "auto",
2773 [CGROUP_CLOSED] = "closed",
2774 [CGROUP_STRICT] = "strict",
2775};
4fbf50b3 2776
4ad49000 2777DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);