]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
core: rework how we track service and scope PIDs
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
8e274523
LP
2/***
3 This file is part of systemd.
4
4ad49000 5 Copyright 2013 Lennart Poettering
8e274523
LP
6***/
7
c6c18be3 8#include <fcntl.h>
e41969e3 9#include <fnmatch.h>
8c6db833 10
b5efdb8a 11#include "alloc-util.h"
18c528e9 12#include "blockdev-util.h"
906c06f6 13#include "bpf-firewall.h"
6592b975 14#include "bus-error.h"
03a7b521 15#include "cgroup-util.h"
3ffd4af2
LP
16#include "cgroup.h"
17#include "fd-util.h"
0d39fa9c 18#include "fileio.h"
77601719 19#include "fs-util.h"
6bedfcbb 20#include "parse-util.h"
9eb977db 21#include "path-util.h"
03a7b521 22#include "process-util.h"
c36a69f4 23#include "procfs-util.h"
9444b1f2 24#include "special.h"
906c06f6 25#include "stdio-util.h"
8b43440b 26#include "string-table.h"
07630cea 27#include "string-util.h"
cc6271f1 28#include "virt.h"
8e274523 29
9a054909
LP
30#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
31
cc6271f1
LP
32bool manager_owns_root_cgroup(Manager *m) {
33 assert(m);
34
35 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
36 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
37 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
38 * we run in any kind of container virtualization. */
39
40 if (detect_container() > 0)
41 return false;
42
57ea45e1 43 return empty_or_root(m->cgroup_root);
cc6271f1
LP
44}
45
f3725e64
LP
46bool unit_has_root_cgroup(Unit *u) {
47 assert(u);
48
cc6271f1
LP
49 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
50 * the manager manages the root cgroup. */
f3725e64 51
cc6271f1 52 if (!manager_owns_root_cgroup(u->manager))
f3725e64
LP
53 return false;
54
cc6271f1 55 return unit_has_name(u, SPECIAL_ROOT_SLICE);
f3725e64
LP
56}
57
2b40998d 58static void cgroup_compat_warn(void) {
128fadc9
TH
59 static bool cgroup_compat_warned = false;
60
61 if (cgroup_compat_warned)
62 return;
63
cc6271f1
LP
64 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
65 "See cgroup-compat debug messages for details.");
66
128fadc9
TH
67 cgroup_compat_warned = true;
68}
69
70#define log_cgroup_compat(unit, fmt, ...) do { \
71 cgroup_compat_warn(); \
72 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 73 } while (false)
128fadc9 74
4ad49000
LP
75void cgroup_context_init(CGroupContext *c) {
76 assert(c);
77
78 /* Initialize everything to the kernel defaults, assuming the
79 * structure is preinitialized to 0 */
80
66ebf6c0
TH
81 c->cpu_weight = CGROUP_WEIGHT_INVALID;
82 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
83 c->cpu_quota_per_sec_usec = USEC_INFINITY;
84
d53d9474
LP
85 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
86 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
d53d9474 87
da4d897e
TH
88 c->memory_high = CGROUP_LIMIT_MAX;
89 c->memory_max = CGROUP_LIMIT_MAX;
96e131ea 90 c->memory_swap_max = CGROUP_LIMIT_MAX;
da4d897e
TH
91
92 c->memory_limit = CGROUP_LIMIT_MAX;
b2f8b02e 93
13c31542
TH
94 c->io_weight = CGROUP_WEIGHT_INVALID;
95 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
96
d53d9474
LP
97 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
98 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
99
100 c->tasks_max = (uint64_t) -1;
4ad49000 101}
8e274523 102
4ad49000
LP
103void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
104 assert(c);
105 assert(a);
106
71fda00f 107 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
108 free(a->path);
109 free(a);
110}
111
13c31542
TH
112void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
113 assert(c);
114 assert(w);
115
116 LIST_REMOVE(device_weights, c->io_device_weights, w);
117 free(w->path);
118 free(w);
119}
120
121void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
122 assert(c);
123 assert(l);
124
125 LIST_REMOVE(device_limits, c->io_device_limits, l);
126 free(l->path);
127 free(l);
128}
129
4ad49000
LP
130void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
131 assert(c);
132 assert(w);
133
71fda00f 134 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
135 free(w->path);
136 free(w);
137}
138
139void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
140 assert(c);
8e274523 141 assert(b);
8e274523 142
71fda00f 143 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
144 free(b->path);
145 free(b);
146}
147
148void cgroup_context_done(CGroupContext *c) {
149 assert(c);
150
13c31542
TH
151 while (c->io_device_weights)
152 cgroup_context_free_io_device_weight(c, c->io_device_weights);
153
154 while (c->io_device_limits)
155 cgroup_context_free_io_device_limit(c, c->io_device_limits);
156
4ad49000
LP
157 while (c->blockio_device_weights)
158 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
159
160 while (c->blockio_device_bandwidths)
161 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
162
163 while (c->device_allow)
164 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f
DM
165
166 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
167 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
4ad49000
LP
168}
169
170void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
13c31542
TH
171 CGroupIODeviceLimit *il;
172 CGroupIODeviceWeight *iw;
4ad49000
LP
173 CGroupBlockIODeviceBandwidth *b;
174 CGroupBlockIODeviceWeight *w;
175 CGroupDeviceAllow *a;
c21c9906 176 IPAddressAccessItem *iaai;
9a054909 177 char u[FORMAT_TIMESPAN_MAX];
4ad49000
LP
178
179 assert(c);
180 assert(f);
181
182 prefix = strempty(prefix);
183
184 fprintf(f,
185 "%sCPUAccounting=%s\n"
13c31542 186 "%sIOAccounting=%s\n"
4ad49000
LP
187 "%sBlockIOAccounting=%s\n"
188 "%sMemoryAccounting=%s\n"
d53d9474 189 "%sTasksAccounting=%s\n"
c21c9906 190 "%sIPAccounting=%s\n"
66ebf6c0
TH
191 "%sCPUWeight=%" PRIu64 "\n"
192 "%sStartupCPUWeight=%" PRIu64 "\n"
d53d9474
LP
193 "%sCPUShares=%" PRIu64 "\n"
194 "%sStartupCPUShares=%" PRIu64 "\n"
b2f8b02e 195 "%sCPUQuotaPerSecSec=%s\n"
13c31542
TH
196 "%sIOWeight=%" PRIu64 "\n"
197 "%sStartupIOWeight=%" PRIu64 "\n"
d53d9474
LP
198 "%sBlockIOWeight=%" PRIu64 "\n"
199 "%sStartupBlockIOWeight=%" PRIu64 "\n"
da4d897e
TH
200 "%sMemoryLow=%" PRIu64 "\n"
201 "%sMemoryHigh=%" PRIu64 "\n"
202 "%sMemoryMax=%" PRIu64 "\n"
96e131ea 203 "%sMemorySwapMax=%" PRIu64 "\n"
4ad49000 204 "%sMemoryLimit=%" PRIu64 "\n"
03a7b521 205 "%sTasksMax=%" PRIu64 "\n"
a931ad47
LP
206 "%sDevicePolicy=%s\n"
207 "%sDelegate=%s\n",
4ad49000 208 prefix, yes_no(c->cpu_accounting),
13c31542 209 prefix, yes_no(c->io_accounting),
4ad49000
LP
210 prefix, yes_no(c->blockio_accounting),
211 prefix, yes_no(c->memory_accounting),
d53d9474 212 prefix, yes_no(c->tasks_accounting),
c21c9906 213 prefix, yes_no(c->ip_accounting),
66ebf6c0
TH
214 prefix, c->cpu_weight,
215 prefix, c->startup_cpu_weight,
4ad49000 216 prefix, c->cpu_shares,
95ae05c0 217 prefix, c->startup_cpu_shares,
b1d6dcf5 218 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
13c31542
TH
219 prefix, c->io_weight,
220 prefix, c->startup_io_weight,
4ad49000 221 prefix, c->blockio_weight,
95ae05c0 222 prefix, c->startup_blockio_weight,
da4d897e
TH
223 prefix, c->memory_low,
224 prefix, c->memory_high,
225 prefix, c->memory_max,
96e131ea 226 prefix, c->memory_swap_max,
4ad49000 227 prefix, c->memory_limit,
03a7b521 228 prefix, c->tasks_max,
a931ad47
LP
229 prefix, cgroup_device_policy_to_string(c->device_policy),
230 prefix, yes_no(c->delegate));
4ad49000 231
02638280
LP
232 if (c->delegate) {
233 _cleanup_free_ char *t = NULL;
234
235 (void) cg_mask_to_string(c->delegate_controllers, &t);
236
47a78d41 237 fprintf(f, "%sDelegateControllers=%s\n",
02638280
LP
238 prefix,
239 strempty(t));
240 }
241
4ad49000
LP
242 LIST_FOREACH(device_allow, a, c->device_allow)
243 fprintf(f,
244 "%sDeviceAllow=%s %s%s%s\n",
245 prefix,
246 a->path,
247 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
248
13c31542
TH
249 LIST_FOREACH(device_weights, iw, c->io_device_weights)
250 fprintf(f,
251 "%sIODeviceWeight=%s %" PRIu64,
252 prefix,
253 iw->path,
254 iw->weight);
255
256 LIST_FOREACH(device_limits, il, c->io_device_limits) {
257 char buf[FORMAT_BYTES_MAX];
9be57249
TH
258 CGroupIOLimitType type;
259
260 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
261 if (il->limits[type] != cgroup_io_limit_defaults[type])
262 fprintf(f,
263 "%s%s=%s %s\n",
264 prefix,
265 cgroup_io_limit_type_to_string(type),
266 il->path,
267 format_bytes(buf, sizeof(buf), il->limits[type]));
13c31542
TH
268 }
269
4ad49000
LP
270 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
271 fprintf(f,
d53d9474 272 "%sBlockIODeviceWeight=%s %" PRIu64,
4ad49000
LP
273 prefix,
274 w->path,
275 w->weight);
276
277 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
278 char buf[FORMAT_BYTES_MAX];
279
979d0311
TH
280 if (b->rbps != CGROUP_LIMIT_MAX)
281 fprintf(f,
282 "%sBlockIOReadBandwidth=%s %s\n",
283 prefix,
284 b->path,
285 format_bytes(buf, sizeof(buf), b->rbps));
286 if (b->wbps != CGROUP_LIMIT_MAX)
287 fprintf(f,
288 "%sBlockIOWriteBandwidth=%s %s\n",
289 prefix,
290 b->path,
291 format_bytes(buf, sizeof(buf), b->wbps));
4ad49000 292 }
c21c9906
LP
293
294 LIST_FOREACH(items, iaai, c->ip_address_allow) {
295 _cleanup_free_ char *k = NULL;
296
297 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
298 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
299 }
300
301 LIST_FOREACH(items, iaai, c->ip_address_deny) {
302 _cleanup_free_ char *k = NULL;
303
304 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
305 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
306 }
4ad49000
LP
307}
308
13c31542 309static int lookup_block_device(const char *p, dev_t *dev) {
4ad49000 310 struct stat st;
4ad49000
LP
311
312 assert(p);
313 assert(dev);
314
b1c05b98 315 if (stat(p, &st) < 0)
4a62c710 316 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
8e274523 317
4ad49000
LP
318 if (S_ISBLK(st.st_mode))
319 *dev = st.st_rdev;
320 else if (major(st.st_dev) != 0) {
321 /* If this is not a device node then find the block
322 * device this file is stored on */
323 *dev = st.st_dev;
324
325 /* If this is a partition, try to get the originating
326 * block device */
18c528e9 327 (void) block_get_whole_disk(*dev, dev);
4ad49000
LP
328 } else {
329 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
330 return -ENODEV;
331 }
8e274523 332
8e274523 333 return 0;
8e274523
LP
334}
335
4ad49000
LP
336static int whitelist_device(const char *path, const char *node, const char *acc) {
337 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
338 struct stat st;
b200489b 339 bool ignore_notfound;
8c6db833 340 int r;
8e274523 341
4ad49000
LP
342 assert(path);
343 assert(acc);
8e274523 344
b200489b
DR
345 if (node[0] == '-') {
346 /* Non-existent paths starting with "-" must be silently ignored */
347 node++;
348 ignore_notfound = true;
349 } else
350 ignore_notfound = false;
351
4ad49000 352 if (stat(node, &st) < 0) {
b200489b 353 if (errno == ENOENT && ignore_notfound)
e7330dfe
DP
354 return 0;
355
356 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
4ad49000
LP
357 }
358
359 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
360 log_warning("%s is not a device.", node);
361 return -ENODEV;
362 }
363
364 sprintf(buf,
365 "%c %u:%u %s",
366 S_ISCHR(st.st_mode) ? 'c' : 'b',
367 major(st.st_rdev), minor(st.st_rdev),
368 acc);
369
370 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 371 if (r < 0)
077ba06e 372 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 373 "Failed to set devices.allow on %s: %m", path);
4ad49000
LP
374
375 return r;
8e274523
LP
376}
377
90060676
LP
378static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
379 _cleanup_fclose_ FILE *f = NULL;
380 char line[LINE_MAX];
381 bool good = false;
382 int r;
383
384 assert(path);
385 assert(acc);
4c701096 386 assert(IN_SET(type, 'b', 'c'));
90060676
LP
387
388 f = fopen("/proc/devices", "re");
4a62c710
MS
389 if (!f)
390 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
90060676
LP
391
392 FOREACH_LINE(line, f, goto fail) {
393 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
394 unsigned maj;
395
396 truncate_nl(line);
397
398 if (type == 'c' && streq(line, "Character devices:")) {
399 good = true;
400 continue;
401 }
402
403 if (type == 'b' && streq(line, "Block devices:")) {
404 good = true;
405 continue;
406 }
407
408 if (isempty(line)) {
409 good = false;
410 continue;
411 }
412
413 if (!good)
414 continue;
415
416 p = strstrip(line);
417
418 w = strpbrk(p, WHITESPACE);
419 if (!w)
420 continue;
421 *w = 0;
422
423 r = safe_atou(p, &maj);
424 if (r < 0)
425 continue;
426 if (maj <= 0)
427 continue;
428
429 w++;
430 w += strspn(w, WHITESPACE);
e41969e3
LP
431
432 if (fnmatch(name, w, 0) != 0)
90060676
LP
433 continue;
434
435 sprintf(buf,
436 "%c %u:* %s",
437 type,
438 maj,
439 acc);
440
441 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 442 if (r < 0)
077ba06e 443 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 444 "Failed to set devices.allow on %s: %m", path);
90060676
LP
445 }
446
447 return 0;
448
449fail:
25f027c5 450 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
90060676
LP
451}
452
66ebf6c0
TH
453static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
454 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
455 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
456}
457
458static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
459 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
460 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
461}
462
463static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
464 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
465 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
466 return c->startup_cpu_weight;
467 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
468 return c->cpu_weight;
469 else
470 return CGROUP_WEIGHT_DEFAULT;
471}
472
473static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
474 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
475 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
476 return c->startup_cpu_shares;
477 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
478 return c->cpu_shares;
479 else
480 return CGROUP_CPU_SHARES_DEFAULT;
481}
482
483static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
484 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
485 int r;
486
487 xsprintf(buf, "%" PRIu64 "\n", weight);
488 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
489 if (r < 0)
490 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
491 "Failed to set cpu.weight: %m");
492
493 if (quota != USEC_INFINITY)
494 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
495 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
496 else
497 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
498
499 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
500
501 if (r < 0)
502 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
503 "Failed to set cpu.max: %m");
504}
505
506static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
507 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
508 int r;
509
510 xsprintf(buf, "%" PRIu64 "\n", shares);
511 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
512 if (r < 0)
513 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
514 "Failed to set cpu.shares: %m");
515
516 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
517 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
518 if (r < 0)
519 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
520 "Failed to set cpu.cfs_period_us: %m");
521
522 if (quota != USEC_INFINITY) {
523 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
524 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
525 } else
526 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
527 if (r < 0)
528 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
529 "Failed to set cpu.cfs_quota_us: %m");
530}
531
532static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
533 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
534 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
535}
536
537static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
538 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
539 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
540}
541
508c45da 542static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
543 return c->io_accounting ||
544 c->io_weight != CGROUP_WEIGHT_INVALID ||
545 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
546 c->io_device_weights ||
547 c->io_device_limits;
548}
549
508c45da 550static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
551 return c->blockio_accounting ||
552 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
553 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
554 c->blockio_device_weights ||
555 c->blockio_device_bandwidths;
556}
557
508c45da 558static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
559 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
560 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
561 return c->startup_io_weight;
562 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
563 return c->io_weight;
564 else
565 return CGROUP_WEIGHT_DEFAULT;
566}
567
508c45da 568static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
569 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
570 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
571 return c->startup_blockio_weight;
572 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
573 return c->blockio_weight;
574 else
575 return CGROUP_BLKIO_WEIGHT_DEFAULT;
576}
577
508c45da 578static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
579 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
580 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
581}
582
508c45da 583static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
584 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
585 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
586}
587
f29ff115 588static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
589 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
590 dev_t dev;
591 int r;
592
593 r = lookup_block_device(dev_path, &dev);
594 if (r < 0)
595 return;
596
597 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
f29ff115 598 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
64faf04c 599 if (r < 0)
f29ff115
TH
600 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
601 "Failed to set io.weight: %m");
64faf04c
TH
602}
603
f29ff115 604static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
605 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
606 dev_t dev;
607 int r;
608
609 r = lookup_block_device(dev_path, &dev);
610 if (r < 0)
611 return;
612
613 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
f29ff115 614 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
64faf04c 615 if (r < 0)
f29ff115
TH
616 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
617 "Failed to set blkio.weight_device: %m");
64faf04c
TH
618}
619
f29ff115 620static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
64faf04c
TH
621 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
622 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
623 CGroupIOLimitType type;
624 dev_t dev;
625 unsigned n = 0;
626 int r;
627
628 r = lookup_block_device(dev_path, &dev);
629 if (r < 0)
630 return 0;
631
632 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) {
633 if (limits[type] != cgroup_io_limit_defaults[type]) {
634 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
635 n++;
636 } else {
637 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
638 }
639 }
640
641 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
642 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
643 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
f29ff115 644 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
64faf04c 645 if (r < 0)
f29ff115
TH
646 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
647 "Failed to set io.max: %m");
64faf04c
TH
648 return n;
649}
650
f29ff115 651static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
652 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
653 dev_t dev;
654 unsigned n = 0;
655 int r;
656
657 r = lookup_block_device(dev_path, &dev);
658 if (r < 0)
659 return 0;
660
661 if (rbps != CGROUP_LIMIT_MAX)
662 n++;
663 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
f29ff115 664 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
64faf04c 665 if (r < 0)
f29ff115
TH
666 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
667 "Failed to set blkio.throttle.read_bps_device: %m");
64faf04c
TH
668
669 if (wbps != CGROUP_LIMIT_MAX)
670 n++;
671 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
f29ff115 672 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
64faf04c 673 if (r < 0)
f29ff115
TH
674 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
675 "Failed to set blkio.throttle.write_bps_device: %m");
64faf04c
TH
676
677 return n;
678}
679
da4d897e 680static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
96e131ea 681 return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
da4d897e
TH
682}
683
f29ff115 684static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
da4d897e
TH
685 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
686 int r;
687
688 if (v != CGROUP_LIMIT_MAX)
689 xsprintf(buf, "%" PRIu64 "\n", v);
690
f29ff115 691 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
da4d897e 692 if (r < 0)
f29ff115
TH
693 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
694 "Failed to set %s: %m", file);
da4d897e
TH
695}
696
0f2d84d2 697static void cgroup_apply_firewall(Unit *u) {
0f2d84d2
LP
698 assert(u);
699
acf7f253 700 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
906c06f6 701
acf7f253 702 if (bpf_firewall_compile(u) < 0)
906c06f6
DM
703 return;
704
705 (void) bpf_firewall_install(u);
906c06f6
DM
706}
707
708static void cgroup_context_apply(
709 Unit *u,
710 CGroupMask apply_mask,
711 bool apply_bpf,
712 ManagerState state) {
713
f29ff115
TH
714 const char *path;
715 CGroupContext *c;
01efdf13 716 bool is_root;
4ad49000
LP
717 int r;
718
f29ff115
TH
719 assert(u);
720
906c06f6
DM
721 /* Nothing to do? Exit early! */
722 if (apply_mask == 0 && !apply_bpf)
4ad49000 723 return;
8e274523 724
f3725e64
LP
725 /* Some cgroup attributes are not supported on the root cgroup, hence silently ignore */
726 is_root = unit_has_root_cgroup(u);
727
728 assert_se(c = unit_get_cgroup_context(u));
729 assert_se(path = u->cgroup_path);
730
731 if (is_root) /* Make sure we don't try to display messages with an empty path. */
6da13913 732 path = "/";
01efdf13 733
714e2e1d
LP
734 /* We generally ignore errors caused by read-only mounted
735 * cgroup trees (assuming we are running in a container then),
736 * and missing cgroups, i.e. EROFS and ENOENT. */
737
906c06f6
DM
738 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
739 bool has_weight, has_shares;
740
741 has_weight = cgroup_context_has_cpu_weight(c);
742 has_shares = cgroup_context_has_cpu_shares(c);
8e274523 743
b4cccbc1 744 if (cg_all_unified() > 0) {
66ebf6c0 745 uint64_t weight;
b2f8b02e 746
66ebf6c0
TH
747 if (has_weight)
748 weight = cgroup_context_cpu_weight(c, state);
749 else if (has_shares) {
750 uint64_t shares = cgroup_context_cpu_shares(c, state);
b2f8b02e 751
66ebf6c0
TH
752 weight = cgroup_cpu_shares_to_weight(shares);
753
754 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
755 shares, weight, path);
756 } else
757 weight = CGROUP_WEIGHT_DEFAULT;
758
759 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
760 } else {
761 uint64_t shares;
762
7d862ab8 763 if (has_weight) {
66ebf6c0
TH
764 uint64_t weight = cgroup_context_cpu_weight(c, state);
765
766 shares = cgroup_cpu_weight_to_shares(weight);
767
768 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
769 weight, shares, path);
7d862ab8
TH
770 } else if (has_shares)
771 shares = cgroup_context_cpu_shares(c, state);
772 else
66ebf6c0
TH
773 shares = CGROUP_CPU_SHARES_DEFAULT;
774
775 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
776 }
4ad49000
LP
777 }
778
906c06f6 779 if (apply_mask & CGROUP_MASK_IO) {
538b4852
TH
780 bool has_io = cgroup_context_has_io_config(c);
781 bool has_blockio = cgroup_context_has_blockio_config(c);
13c31542
TH
782
783 if (!is_root) {
64faf04c
TH
784 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
785 uint64_t weight;
13c31542 786
538b4852
TH
787 if (has_io)
788 weight = cgroup_context_io_weight(c, state);
128fadc9
TH
789 else if (has_blockio) {
790 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
791
792 weight = cgroup_weight_blkio_to_io(blkio_weight);
793
794 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
795 blkio_weight, weight);
796 } else
538b4852 797 weight = CGROUP_WEIGHT_DEFAULT;
13c31542
TH
798
799 xsprintf(buf, "default %" PRIu64 "\n", weight);
800 r = cg_set_attribute("io", path, "io.weight", buf);
801 if (r < 0)
f29ff115
TH
802 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
803 "Failed to set io.weight: %m");
13c31542 804
538b4852
TH
805 if (has_io) {
806 CGroupIODeviceWeight *w;
807
808 /* FIXME: no way to reset this list */
809 LIST_FOREACH(device_weights, w, c->io_device_weights)
f29ff115 810 cgroup_apply_io_device_weight(u, w->path, w->weight);
538b4852
TH
811 } else if (has_blockio) {
812 CGroupBlockIODeviceWeight *w;
813
814 /* FIXME: no way to reset this list */
128fadc9
TH
815 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
816 weight = cgroup_weight_blkio_to_io(w->weight);
817
818 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
819 w->weight, weight, w->path);
820
821 cgroup_apply_io_device_weight(u, w->path, weight);
822 }
538b4852 823 }
13c31542
TH
824 }
825
64faf04c 826 /* Apply limits and free ones without config. */
538b4852
TH
827 if (has_io) {
828 CGroupIODeviceLimit *l, *next;
829
830 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
f29ff115 831 if (!cgroup_apply_io_device_limit(u, l->path, l->limits))
538b4852
TH
832 cgroup_context_free_io_device_limit(c, l);
833 }
834 } else if (has_blockio) {
835 CGroupBlockIODeviceBandwidth *b, *next;
836
837 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
838 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
839 CGroupIOLimitType type;
840
841 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
842 limits[type] = cgroup_io_limit_defaults[type];
843
844 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
845 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
846
128fadc9
TH
847 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
848 b->rbps, b->wbps, b->path);
849
f29ff115 850 if (!cgroup_apply_io_device_limit(u, b->path, limits))
538b4852
TH
851 cgroup_context_free_blockio_device_bandwidth(c, b);
852 }
13c31542
TH
853 }
854 }
855
906c06f6 856 if (apply_mask & CGROUP_MASK_BLKIO) {
538b4852
TH
857 bool has_io = cgroup_context_has_io_config(c);
858 bool has_blockio = cgroup_context_has_blockio_config(c);
4ad49000 859
01efdf13 860 if (!is_root) {
64faf04c
TH
861 char buf[DECIMAL_STR_MAX(uint64_t)+1];
862 uint64_t weight;
64faf04c 863
7d862ab8 864 if (has_io) {
128fadc9
TH
865 uint64_t io_weight = cgroup_context_io_weight(c, state);
866
538b4852 867 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9
TH
868
869 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
870 io_weight, weight);
7d862ab8
TH
871 } else if (has_blockio)
872 weight = cgroup_context_blkio_weight(c, state);
873 else
538b4852 874 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
875
876 xsprintf(buf, "%" PRIu64 "\n", weight);
01efdf13 877 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
1aeab12b 878 if (r < 0)
f29ff115
TH
879 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
880 "Failed to set blkio.weight: %m");
4ad49000 881
7d862ab8 882 if (has_io) {
538b4852
TH
883 CGroupIODeviceWeight *w;
884
885 /* FIXME: no way to reset this list */
128fadc9
TH
886 LIST_FOREACH(device_weights, w, c->io_device_weights) {
887 weight = cgroup_weight_io_to_blkio(w->weight);
888
889 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
890 w->weight, weight, w->path);
891
892 cgroup_apply_blkio_device_weight(u, w->path, weight);
893 }
7d862ab8
TH
894 } else if (has_blockio) {
895 CGroupBlockIODeviceWeight *w;
896
897 /* FIXME: no way to reset this list */
898 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
899 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
538b4852 900 }
4ad49000
LP
901 }
902
64faf04c 903 /* Apply limits and free ones without config. */
7d862ab8 904 if (has_io) {
538b4852
TH
905 CGroupIODeviceLimit *l, *next;
906
907 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
128fadc9
TH
908 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
909 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
910
f29ff115 911 if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]))
538b4852
TH
912 cgroup_context_free_io_device_limit(c, l);
913 }
7d862ab8
TH
914 } else if (has_blockio) {
915 CGroupBlockIODeviceBandwidth *b, *next;
916
917 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths)
918 if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps))
919 cgroup_context_free_blockio_device_bandwidth(c, b);
d686d8a9 920 }
8e274523
LP
921 }
922
906c06f6 923 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
b4cccbc1
LP
924 if (cg_all_unified() > 0) {
925 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
efdb0237 926
96e131ea 927 if (cgroup_context_has_unified_memory_config(c)) {
da4d897e 928 max = c->memory_max;
96e131ea
WC
929 swap_max = c->memory_swap_max;
930 } else {
da4d897e 931 max = c->memory_limit;
efdb0237 932
128fadc9
TH
933 if (max != CGROUP_LIMIT_MAX)
934 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
935 }
936
f29ff115
TH
937 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
938 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
939 cgroup_apply_unified_memory_limit(u, "memory.max", max);
96e131ea 940 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
efdb0237 941 } else {
da4d897e 942 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
7d862ab8 943 uint64_t val;
da4d897e 944
7d862ab8 945 if (cgroup_context_has_unified_memory_config(c)) {
78a4ee59 946 val = c->memory_max;
7d862ab8
TH
947 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
948 } else
949 val = c->memory_limit;
128fadc9 950
78a4ee59
DM
951 if (val == CGROUP_LIMIT_MAX)
952 strncpy(buf, "-1\n", sizeof(buf));
953 else
954 xsprintf(buf, "%" PRIu64 "\n", val);
955
da4d897e
TH
956 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
957 if (r < 0)
f29ff115
TH
958 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
959 "Failed to set memory.limit_in_bytes: %m");
da4d897e 960 }
4ad49000 961 }
8e274523 962
906c06f6 963 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
4ad49000 964 CGroupDeviceAllow *a;
8e274523 965
714e2e1d
LP
966 /* Changing the devices list of a populated cgroup
967 * might result in EINVAL, hence ignore EINVAL
968 * here. */
969
4ad49000
LP
970 if (c->device_allow || c->device_policy != CGROUP_AUTO)
971 r = cg_set_attribute("devices", path, "devices.deny", "a");
972 else
973 r = cg_set_attribute("devices", path, "devices.allow", "a");
1aeab12b 974 if (r < 0)
f29ff115
TH
975 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
976 "Failed to reset devices.list: %m");
fb385181 977
4ad49000
LP
978 if (c->device_policy == CGROUP_CLOSED ||
979 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
980 static const char auto_devices[] =
7d711efb
LP
981 "/dev/null\0" "rwm\0"
982 "/dev/zero\0" "rwm\0"
983 "/dev/full\0" "rwm\0"
984 "/dev/random\0" "rwm\0"
985 "/dev/urandom\0" "rwm\0"
986 "/dev/tty\0" "rwm\0"
5a7f87a9 987 "/dev/ptmx\0" "rwm\0"
0d9e7991 988 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
e7330dfe
DP
989 "-/run/systemd/inaccessible/chr\0" "rwm\0"
990 "-/run/systemd/inaccessible/blk\0" "rwm\0";
4ad49000
LP
991
992 const char *x, *y;
993
994 NULSTR_FOREACH_PAIR(x, y, auto_devices)
995 whitelist_device(path, x, y);
7d711efb 996
5a7f87a9 997 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
7d711efb 998 whitelist_major(path, "pts", 'c', "rw");
4ad49000
LP
999 }
1000
1001 LIST_FOREACH(device_allow, a, c->device_allow) {
fb4650aa 1002 char acc[4], *val;
4ad49000
LP
1003 unsigned k = 0;
1004
1005 if (a->r)
1006 acc[k++] = 'r';
1007 if (a->w)
1008 acc[k++] = 'w';
1009 if (a->m)
1010 acc[k++] = 'm';
fb385181 1011
4ad49000
LP
1012 if (k == 0)
1013 continue;
fb385181 1014
4ad49000 1015 acc[k++] = 0;
90060676 1016
27458ed6 1017 if (path_startswith(a->path, "/dev/"))
90060676 1018 whitelist_device(path, a->path, acc);
fb4650aa
ZJS
1019 else if ((val = startswith(a->path, "block-")))
1020 whitelist_major(path, val, 'b', acc);
1021 else if ((val = startswith(a->path, "char-")))
1022 whitelist_major(path, val, 'c', acc);
90060676 1023 else
f29ff115 1024 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
4ad49000
LP
1025 }
1026 }
03a7b521 1027
00b5974f
LP
1028 if (apply_mask & CGROUP_MASK_PIDS) {
1029
1030 if (is_root) {
1031 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1032 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1033 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1034 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1035 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1036 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1037 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1038 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1039 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1040 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1041 * which is desirable so that there's an offical way to release control of the sysctl from
1042 * systemd: set the limit to unbounded and reload. */
1043
1044 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1045 u->manager->sysctl_pid_max_changed = true;
1046 r = procfs_tasks_set_limit(c->tasks_max);
1047 } else if (u->manager->sysctl_pid_max_changed)
1048 r = procfs_tasks_set_limit(TASKS_MAX);
1049 else
1050 r = 0;
03a7b521 1051
00b5974f
LP
1052 if (r < 0)
1053 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1054 "Failed to write to tasks limit sysctls: %m");
03a7b521 1055
00b5974f
LP
1056 } else {
1057 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1058 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
03a7b521 1059
00b5974f
LP
1060 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1061 r = cg_set_attribute("pids", path, "pids.max", buf);
1062 } else
1063 r = cg_set_attribute("pids", path, "pids.max", "max");
1064 if (r < 0)
1065 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1066 "Failed to set pids.max: %m");
1067 }
03a7b521 1068 }
906c06f6
DM
1069
1070 if (apply_bpf)
0f2d84d2 1071 cgroup_apply_firewall(u);
fb385181
LP
1072}
1073
efdb0237
LP
1074CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1075 CGroupMask mask = 0;
8e274523 1076
4ad49000 1077 /* Figure out which controllers we need */
8e274523 1078
b2f8b02e 1079 if (c->cpu_accounting ||
66ebf6c0
TH
1080 cgroup_context_has_cpu_weight(c) ||
1081 cgroup_context_has_cpu_shares(c) ||
3a43da28 1082 c->cpu_quota_per_sec_usec != USEC_INFINITY)
efdb0237 1083 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
ecedd90f 1084
538b4852
TH
1085 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1086 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 1087
4ad49000 1088 if (c->memory_accounting ||
da4d897e
TH
1089 c->memory_limit != CGROUP_LIMIT_MAX ||
1090 cgroup_context_has_unified_memory_config(c))
efdb0237 1091 mask |= CGROUP_MASK_MEMORY;
8e274523 1092
a931ad47
LP
1093 if (c->device_allow ||
1094 c->device_policy != CGROUP_AUTO)
3905f127 1095 mask |= CGROUP_MASK_DEVICES;
4ad49000 1096
03a7b521 1097 if (c->tasks_accounting ||
8793fa25 1098 c->tasks_max != CGROUP_LIMIT_MAX)
03a7b521
LP
1099 mask |= CGROUP_MASK_PIDS;
1100
4ad49000 1101 return mask;
8e274523
LP
1102}
1103
efdb0237 1104CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 1105 CGroupContext *c;
8e274523 1106
efdb0237
LP
1107 /* Returns the mask of controllers the unit needs for itself */
1108
4ad49000
LP
1109 c = unit_get_cgroup_context(u);
1110 if (!c)
1111 return 0;
8e274523 1112
64e844e5 1113 return cgroup_context_get_mask(c) | unit_get_delegate_mask(u);
02638280
LP
1114}
1115
1116CGroupMask unit_get_delegate_mask(Unit *u) {
1117 CGroupContext *c;
1118
1119 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1120 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
19af675e 1121 *
02638280 1122 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
a931ad47 1123
1d9cc876 1124 if (!unit_cgroup_delegate(u))
02638280
LP
1125 return 0;
1126
1127 if (cg_all_unified() <= 0) {
a931ad47
LP
1128 ExecContext *e;
1129
1130 e = unit_get_exec_context(u);
02638280
LP
1131 if (e && !exec_context_maintains_privileges(e))
1132 return 0;
a931ad47
LP
1133 }
1134
1d9cc876 1135 assert_se(c = unit_get_cgroup_context(u));
02638280 1136 return c->delegate_controllers;
8e274523
LP
1137}
1138
efdb0237 1139CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 1140 assert(u);
bc432dc7 1141
02638280 1142 /* Returns the mask of controllers all of the unit's children require, merged */
efdb0237 1143
bc432dc7
LP
1144 if (u->cgroup_members_mask_valid)
1145 return u->cgroup_members_mask;
1146
64e844e5 1147 u->cgroup_members_mask = 0;
bc432dc7
LP
1148
1149 if (u->type == UNIT_SLICE) {
eef85c4a 1150 void *v;
bc432dc7
LP
1151 Unit *member;
1152 Iterator i;
1153
eef85c4a 1154 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
bc432dc7
LP
1155
1156 if (member == u)
1157 continue;
1158
d4fdc205 1159 if (UNIT_DEREF(member->slice) != u)
bc432dc7
LP
1160 continue;
1161
31604970 1162 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
bc432dc7
LP
1163 }
1164 }
1165
1166 u->cgroup_members_mask_valid = true;
6414b7c9 1167 return u->cgroup_members_mask;
246aa6dd
LP
1168}
1169
efdb0237 1170CGroupMask unit_get_siblings_mask(Unit *u) {
4ad49000 1171 assert(u);
246aa6dd 1172
efdb0237
LP
1173 /* Returns the mask of controllers all of the unit's siblings
1174 * require, i.e. the members mask of the unit's parent slice
1175 * if there is one. */
1176
bc432dc7 1177 if (UNIT_ISSET(u->slice))
637f421e 1178 return unit_get_members_mask(UNIT_DEREF(u->slice));
4ad49000 1179
64e844e5 1180 return unit_get_subtree_mask(u); /* we are the top-level slice */
246aa6dd
LP
1181}
1182
efdb0237
LP
1183CGroupMask unit_get_subtree_mask(Unit *u) {
1184
1185 /* Returns the mask of this subtree, meaning of the group
1186 * itself and its children. */
1187
1188 return unit_get_own_mask(u) | unit_get_members_mask(u);
1189}
1190
1191CGroupMask unit_get_target_mask(Unit *u) {
1192 CGroupMask mask;
1193
1194 /* This returns the cgroup mask of all controllers to enable
1195 * for a specific cgroup, i.e. everything it needs itself,
1196 * plus all that its children need, plus all that its siblings
1197 * need. This is primarily useful on the legacy cgroup
1198 * hierarchy, where we need to duplicate each cgroup in each
1199 * hierarchy that shall be enabled for it. */
6414b7c9 1200
efdb0237
LP
1201 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1202 mask &= u->manager->cgroup_supported;
1203
1204 return mask;
1205}
1206
1207CGroupMask unit_get_enable_mask(Unit *u) {
1208 CGroupMask mask;
1209
1210 /* This returns the cgroup mask of all controllers to enable
1211 * for the children of a specific cgroup. This is primarily
1212 * useful for the unified cgroup hierarchy, where each cgroup
1213 * controls which controllers are enabled for its children. */
1214
1215 mask = unit_get_members_mask(u);
6414b7c9
DS
1216 mask &= u->manager->cgroup_supported;
1217
1218 return mask;
1219}
1220
906c06f6
DM
1221bool unit_get_needs_bpf(Unit *u) {
1222 CGroupContext *c;
1223 Unit *p;
1224 assert(u);
1225
906c06f6
DM
1226 c = unit_get_cgroup_context(u);
1227 if (!c)
1228 return false;
1229
1230 if (c->ip_accounting ||
1231 c->ip_address_allow ||
1232 c->ip_address_deny)
1233 return true;
1234
1235 /* If any parent slice has an IP access list defined, it applies too */
1236 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1237 c = unit_get_cgroup_context(p);
1238 if (!c)
1239 return false;
1240
1241 if (c->ip_address_allow ||
1242 c->ip_address_deny)
1243 return true;
1244 }
1245
1246 return false;
1247}
1248
6414b7c9
DS
1249/* Recurse from a unit up through its containing slices, propagating
1250 * mask bits upward. A unit is also member of itself. */
bc432dc7 1251void unit_update_cgroup_members_masks(Unit *u) {
efdb0237 1252 CGroupMask m;
bc432dc7
LP
1253 bool more;
1254
1255 assert(u);
1256
1257 /* Calculate subtree mask */
efdb0237 1258 m = unit_get_subtree_mask(u);
bc432dc7
LP
1259
1260 /* See if anything changed from the previous invocation. If
1261 * not, we're done. */
1262 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1263 return;
1264
1265 more =
1266 u->cgroup_subtree_mask_valid &&
1267 ((m & ~u->cgroup_subtree_mask) != 0) &&
1268 ((~m & u->cgroup_subtree_mask) == 0);
1269
1270 u->cgroup_subtree_mask = m;
1271 u->cgroup_subtree_mask_valid = true;
1272
6414b7c9
DS
1273 if (UNIT_ISSET(u->slice)) {
1274 Unit *s = UNIT_DEREF(u->slice);
bc432dc7
LP
1275
1276 if (more)
1277 /* There's more set now than before. We
1278 * propagate the new mask to the parent's mask
1279 * (not caring if it actually was valid or
1280 * not). */
1281
1282 s->cgroup_members_mask |= m;
1283
1284 else
1285 /* There's less set now than before (or we
1286 * don't know), we need to recalculate
1287 * everything, so let's invalidate the
1288 * parent's members mask */
1289
1290 s->cgroup_members_mask_valid = false;
1291
1292 /* And now make sure that this change also hits our
1293 * grandparents */
1294 unit_update_cgroup_members_masks(s);
6414b7c9
DS
1295 }
1296}
1297
6592b975 1298const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
03b90d4b 1299
6592b975 1300 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
03b90d4b
LP
1301
1302 while (u) {
6592b975 1303
03b90d4b
LP
1304 if (u->cgroup_path &&
1305 u->cgroup_realized &&
d94a24ca 1306 FLAGS_SET(u->cgroup_realized_mask, mask))
03b90d4b
LP
1307 return u->cgroup_path;
1308
1309 u = UNIT_DEREF(u->slice);
1310 }
1311
1312 return NULL;
1313}
1314
6592b975
LP
1315static const char *migrate_callback(CGroupMask mask, void *userdata) {
1316 return unit_get_realized_cgroup_path(userdata, mask);
1317}
1318
efdb0237
LP
1319char *unit_default_cgroup_path(Unit *u) {
1320 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1321 int r;
1322
1323 assert(u);
1324
1325 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1326 return strdup(u->manager->cgroup_root);
1327
1328 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1329 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1330 if (r < 0)
1331 return NULL;
1332 }
1333
1334 escaped = cg_escape(u->id);
1335 if (!escaped)
1336 return NULL;
1337
1338 if (slice)
605405c6
ZJS
1339 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1340 escaped);
efdb0237 1341 else
605405c6 1342 return strjoin(u->manager->cgroup_root, "/", escaped);
efdb0237
LP
1343}
1344
1345int unit_set_cgroup_path(Unit *u, const char *path) {
1346 _cleanup_free_ char *p = NULL;
1347 int r;
1348
1349 assert(u);
1350
1351 if (path) {
1352 p = strdup(path);
1353 if (!p)
1354 return -ENOMEM;
1355 } else
1356 p = NULL;
1357
1358 if (streq_ptr(u->cgroup_path, p))
1359 return 0;
1360
1361 if (p) {
1362 r = hashmap_put(u->manager->cgroup_unit, p, u);
1363 if (r < 0)
1364 return r;
1365 }
1366
1367 unit_release_cgroup(u);
1368
ae2a15bc 1369 u->cgroup_path = TAKE_PTR(p);
efdb0237
LP
1370
1371 return 1;
1372}
1373
1374int unit_watch_cgroup(Unit *u) {
ab2c3861 1375 _cleanup_free_ char *events = NULL;
efdb0237
LP
1376 int r;
1377
1378 assert(u);
1379
1380 if (!u->cgroup_path)
1381 return 0;
1382
1383 if (u->cgroup_inotify_wd >= 0)
1384 return 0;
1385
1386 /* Only applies to the unified hierarchy */
c22800e4 1387 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1388 if (r < 0)
1389 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1390 if (r == 0)
efdb0237
LP
1391 return 0;
1392
1393 /* Don't watch the root slice, it's pointless. */
1394 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1395 return 0;
1396
1397 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1398 if (r < 0)
1399 return log_oom();
1400
ab2c3861 1401 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
efdb0237
LP
1402 if (r < 0)
1403 return log_oom();
1404
ab2c3861 1405 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
efdb0237
LP
1406 if (u->cgroup_inotify_wd < 0) {
1407
1408 if (errno == ENOENT) /* If the directory is already
1409 * gone we don't need to track
1410 * it, so this is not an error */
1411 return 0;
1412
1413 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1414 }
1415
1416 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1417 if (r < 0)
1418 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1419
1420 return 0;
1421}
1422
a4634b21
LP
1423int unit_pick_cgroup_path(Unit *u) {
1424 _cleanup_free_ char *path = NULL;
1425 int r;
1426
1427 assert(u);
1428
1429 if (u->cgroup_path)
1430 return 0;
1431
1432 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1433 return -EINVAL;
1434
1435 path = unit_default_cgroup_path(u);
1436 if (!path)
1437 return log_oom();
1438
1439 r = unit_set_cgroup_path(u, path);
1440 if (r == -EEXIST)
1441 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1442 if (r < 0)
1443 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1444
1445 return 0;
1446}
1447
efdb0237
LP
1448static int unit_create_cgroup(
1449 Unit *u,
1450 CGroupMask target_mask,
906c06f6
DM
1451 CGroupMask enable_mask,
1452 bool needs_bpf) {
efdb0237 1453
0cd385d3 1454 CGroupContext *c;
bc432dc7 1455 int r;
64747e2d 1456
4ad49000 1457 assert(u);
64747e2d 1458
0cd385d3
LP
1459 c = unit_get_cgroup_context(u);
1460 if (!c)
1461 return 0;
1462
a4634b21
LP
1463 /* Figure out our cgroup path */
1464 r = unit_pick_cgroup_path(u);
1465 if (r < 0)
1466 return r;
b58b8e11 1467
03b90d4b 1468 /* First, create our own group */
efdb0237 1469 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
23bbb0de 1470 if (r < 0)
efdb0237
LP
1471 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1472
1473 /* Start watching it */
1474 (void) unit_watch_cgroup(u);
1475
1476 /* Enable all controllers we need */
1477 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1478 if (r < 0)
1479 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
03b90d4b
LP
1480
1481 /* Keep track that this is now realized */
4ad49000 1482 u->cgroup_realized = true;
efdb0237 1483 u->cgroup_realized_mask = target_mask;
ccf78df1 1484 u->cgroup_enabled_mask = enable_mask;
906c06f6 1485 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
4ad49000 1486
1d9cc876 1487 if (u->type != UNIT_SLICE && !unit_cgroup_delegate(u)) {
0cd385d3
LP
1488
1489 /* Then, possibly move things over, but not if
1490 * subgroups may contain processes, which is the case
1491 * for slice and delegation units. */
1492 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1493 if (r < 0)
efdb0237 1494 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
0cd385d3 1495 }
03b90d4b 1496
64747e2d
LP
1497 return 0;
1498}
1499
6592b975
LP
1500static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
1501 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1502 char *pp;
7b3fd631 1503 int r;
6592b975 1504
7b3fd631
LP
1505 assert(u);
1506
6592b975
LP
1507 if (MANAGER_IS_SYSTEM(u->manager))
1508 return -EINVAL;
1509
1510 if (!u->manager->system_bus)
1511 return -EIO;
1512
1513 if (!u->cgroup_path)
1514 return -EINVAL;
1515
1516 /* Determine this unit's cgroup path relative to our cgroup root */
1517 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
1518 if (!pp)
1519 return -EINVAL;
1520
1521 pp = strjoina("/", pp, suffix_path);
858d36c1 1522 path_simplify(pp, false);
6592b975
LP
1523
1524 r = sd_bus_call_method(u->manager->system_bus,
1525 "org.freedesktop.systemd1",
1526 "/org/freedesktop/systemd1",
1527 "org.freedesktop.systemd1.Manager",
1528 "AttachProcessesToUnit",
1529 &error, NULL,
1530 "ssau",
1531 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
7b3fd631 1532 if (r < 0)
6592b975
LP
1533 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
1534
1535 return 0;
1536}
1537
1538int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
1539 CGroupMask delegated_mask;
1540 const char *p;
1541 Iterator i;
1542 void *pidp;
1543 int r, q;
1544
1545 assert(u);
1546
1547 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1548 return -EINVAL;
1549
1550 if (set_isempty(pids))
1551 return 0;
7b3fd631 1552
6592b975 1553 r = unit_realize_cgroup(u);
7b3fd631
LP
1554 if (r < 0)
1555 return r;
1556
6592b975
LP
1557 if (isempty(suffix_path))
1558 p = u->cgroup_path;
1559 else
1560 p = strjoina(u->cgroup_path, "/", suffix_path);
1561
1562 delegated_mask = unit_get_delegate_mask(u);
1563
1564 r = 0;
1565 SET_FOREACH(pidp, pids, i) {
1566 pid_t pid = PTR_TO_PID(pidp);
1567 CGroupController c;
1568
1569 /* First, attach the PID to the main cgroup hierarchy */
1570 q = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
1571 if (q < 0) {
1572 log_unit_debug_errno(u, q, "Couldn't move process " PID_FMT " to requested cgroup '%s': %m", pid, p);
1573
1574 if (MANAGER_IS_USER(u->manager) && IN_SET(q, -EPERM, -EACCES)) {
1575 int z;
1576
1577 /* If we are in a user instance, and we can't move the process ourselves due to
1578 * permission problems, let's ask the system instance about it instead. Since it's more
1579 * privileged it might be able to move the process across the leaves of a subtree who's
1580 * top node is not owned by us. */
1581
1582 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
1583 if (z < 0)
1584 log_unit_debug_errno(u, z, "Couldn't move process " PID_FMT " to requested cgroup '%s' via the system bus either: %m", pid, p);
1585 else
1586 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1587 }
1588
1589 if (r >= 0)
1590 r = q; /* Remember first error */
1591
1592 continue;
1593 }
1594
1595 q = cg_all_unified();
1596 if (q < 0)
1597 return q;
1598 if (q > 0)
1599 continue;
1600
1601 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1602 * innermost realized one */
1603
1604 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1605 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1606 const char *realized;
1607
1608 if (!(u->manager->cgroup_supported & bit))
1609 continue;
1610
1611 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1612 if (delegated_mask & u->cgroup_realized_mask & bit) {
1613 q = cg_attach(cgroup_controller_to_string(c), p, pid);
1614 if (q >= 0)
1615 continue; /* Success! */
1616
1617 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1618 pid, p, cgroup_controller_to_string(c));
1619 }
1620
1621 /* So this controller is either not delegate or realized, or something else weird happened. In
1622 * that case let's attach the PID at least to the closest cgroup up the tree that is
1623 * realized. */
1624 realized = unit_get_realized_cgroup_path(u, bit);
1625 if (!realized)
1626 continue; /* Not even realized in the root slice? Then let's not bother */
1627
1628 q = cg_attach(cgroup_controller_to_string(c), realized, pid);
1629 if (q < 0)
1630 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
1631 pid, realized, cgroup_controller_to_string(c));
1632 }
1633 }
1634
1635 return r;
7b3fd631
LP
1636}
1637
4b58153d
LP
1638static void cgroup_xattr_apply(Unit *u) {
1639 char ids[SD_ID128_STRING_MAX];
1640 int r;
1641
1642 assert(u);
1643
1644 if (!MANAGER_IS_SYSTEM(u->manager))
1645 return;
1646
1647 if (sd_id128_is_null(u->invocation_id))
1648 return;
1649
1650 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1651 "trusted.invocation_id",
1652 sd_id128_to_string(u->invocation_id, ids), 32,
1653 0);
1654 if (r < 0)
0fb84499 1655 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
4b58153d
LP
1656}
1657
906c06f6
DM
1658static bool unit_has_mask_realized(
1659 Unit *u,
1660 CGroupMask target_mask,
1661 CGroupMask enable_mask,
1662 bool needs_bpf) {
1663
bc432dc7
LP
1664 assert(u);
1665
906c06f6
DM
1666 return u->cgroup_realized &&
1667 u->cgroup_realized_mask == target_mask &&
1668 u->cgroup_enabled_mask == enable_mask &&
1669 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1670 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
6414b7c9
DS
1671}
1672
2aa57a65
LP
1673static void unit_add_to_cgroup_realize_queue(Unit *u) {
1674 assert(u);
1675
1676 if (u->in_cgroup_realize_queue)
1677 return;
1678
1679 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1680 u->in_cgroup_realize_queue = true;
1681}
1682
1683static void unit_remove_from_cgroup_realize_queue(Unit *u) {
1684 assert(u);
1685
1686 if (!u->in_cgroup_realize_queue)
1687 return;
1688
1689 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1690 u->in_cgroup_realize_queue = false;
1691}
1692
6414b7c9
DS
1693/* Check if necessary controllers and attributes for a unit are in place.
1694 *
1695 * If so, do nothing.
1696 * If not, create paths, move processes over, and set attributes.
1697 *
1698 * Returns 0 on success and < 0 on failure. */
db785129 1699static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 1700 CGroupMask target_mask, enable_mask;
906c06f6 1701 bool needs_bpf, apply_bpf;
6414b7c9 1702 int r;
64747e2d 1703
4ad49000 1704 assert(u);
64747e2d 1705
2aa57a65 1706 unit_remove_from_cgroup_realize_queue(u);
64747e2d 1707
efdb0237 1708 target_mask = unit_get_target_mask(u);
ccf78df1 1709 enable_mask = unit_get_enable_mask(u);
906c06f6 1710 needs_bpf = unit_get_needs_bpf(u);
ccf78df1 1711
906c06f6 1712 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
0a1eb06d 1713 return 0;
64747e2d 1714
906c06f6
DM
1715 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1716 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1717 * this will trickle down properly to cgroupfs. */
1718 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1719
4ad49000 1720 /* First, realize parents */
6414b7c9 1721 if (UNIT_ISSET(u->slice)) {
db785129 1722 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
6414b7c9
DS
1723 if (r < 0)
1724 return r;
1725 }
4ad49000
LP
1726
1727 /* And then do the real work */
906c06f6 1728 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
6414b7c9
DS
1729 if (r < 0)
1730 return r;
1731
1732 /* Finally, apply the necessary attributes. */
906c06f6 1733 cgroup_context_apply(u, target_mask, apply_bpf, state);
4b58153d 1734 cgroup_xattr_apply(u);
6414b7c9
DS
1735
1736 return 0;
64747e2d
LP
1737}
1738
91a6073e 1739unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
db785129 1740 ManagerState state;
4ad49000 1741 unsigned n = 0;
db785129 1742 Unit *i;
6414b7c9 1743 int r;
ecedd90f 1744
91a6073e
LP
1745 assert(m);
1746
db785129
LP
1747 state = manager_state(m);
1748
91a6073e
LP
1749 while ((i = m->cgroup_realize_queue)) {
1750 assert(i->in_cgroup_realize_queue);
ecedd90f 1751
2aa57a65
LP
1752 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
1753 /* Maybe things changed, and the unit is not actually active anymore? */
1754 unit_remove_from_cgroup_realize_queue(i);
1755 continue;
1756 }
1757
db785129 1758 r = unit_realize_cgroup_now(i, state);
6414b7c9 1759 if (r < 0)
efdb0237 1760 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 1761
4ad49000
LP
1762 n++;
1763 }
ecedd90f 1764
4ad49000 1765 return n;
8e274523
LP
1766}
1767
91a6073e 1768static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
4ad49000 1769 Unit *slice;
ca949c9d 1770
4ad49000
LP
1771 /* This adds the siblings of the specified unit and the
1772 * siblings of all parent units to the cgroup queue. (But
1773 * neither the specified unit itself nor the parents.) */
1774
1775 while ((slice = UNIT_DEREF(u->slice))) {
1776 Iterator i;
1777 Unit *m;
eef85c4a 1778 void *v;
8f53a7b8 1779
eef85c4a 1780 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
4ad49000
LP
1781 if (m == u)
1782 continue;
8e274523 1783
6414b7c9
DS
1784 /* Skip units that have a dependency on the slice
1785 * but aren't actually in it. */
4ad49000 1786 if (UNIT_DEREF(m->slice) != slice)
50159e6a 1787 continue;
8e274523 1788
6414b7c9
DS
1789 /* No point in doing cgroup application for units
1790 * without active processes. */
1791 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1792 continue;
1793
1794 /* If the unit doesn't need any new controllers
1795 * and has current ones realized, it doesn't need
1796 * any changes. */
906c06f6
DM
1797 if (unit_has_mask_realized(m,
1798 unit_get_target_mask(m),
1799 unit_get_enable_mask(m),
1800 unit_get_needs_bpf(m)))
6414b7c9
DS
1801 continue;
1802
91a6073e 1803 unit_add_to_cgroup_realize_queue(m);
50159e6a
LP
1804 }
1805
4ad49000 1806 u = slice;
8e274523 1807 }
4ad49000
LP
1808}
1809
0a1eb06d 1810int unit_realize_cgroup(Unit *u) {
4ad49000
LP
1811 assert(u);
1812
35b7ff80 1813 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 1814 return 0;
8e274523 1815
4ad49000
LP
1816 /* So, here's the deal: when realizing the cgroups for this
1817 * unit, we need to first create all parents, but there's more
1818 * actually: for the weight-based controllers we also need to
1819 * make sure that all our siblings (i.e. units that are in the
73e231ab 1820 * same slice as we are) have cgroups, too. Otherwise, things
4ad49000
LP
1821 * would become very uneven as each of their processes would
1822 * get as much resources as all our group together. This call
1823 * will synchronously create the parent cgroups, but will
1824 * defer work on the siblings to the next event loop
1825 * iteration. */
ca949c9d 1826
4ad49000 1827 /* Add all sibling slices to the cgroup queue. */
91a6073e 1828 unit_add_siblings_to_cgroup_realize_queue(u);
4ad49000 1829
6414b7c9 1830 /* And realize this one now (and apply the values) */
db785129 1831 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
1832}
1833
efdb0237
LP
1834void unit_release_cgroup(Unit *u) {
1835 assert(u);
1836
1837 /* Forgets all cgroup details for this cgroup */
1838
1839 if (u->cgroup_path) {
1840 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1841 u->cgroup_path = mfree(u->cgroup_path);
1842 }
1843
1844 if (u->cgroup_inotify_wd >= 0) {
1845 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1846 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1847
1848 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1849 u->cgroup_inotify_wd = -1;
1850 }
1851}
1852
1853void unit_prune_cgroup(Unit *u) {
8e274523 1854 int r;
efdb0237 1855 bool is_root_slice;
8e274523 1856
4ad49000 1857 assert(u);
8e274523 1858
efdb0237
LP
1859 /* Removes the cgroup, if empty and possible, and stops watching it. */
1860
4ad49000
LP
1861 if (!u->cgroup_path)
1862 return;
8e274523 1863
fe700f46
LP
1864 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1865
efdb0237
LP
1866 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1867
1868 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
dab5bf85 1869 if (r < 0) {
f29ff115 1870 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
dab5bf85
RL
1871 return;
1872 }
8e274523 1873
efdb0237
LP
1874 if (is_root_slice)
1875 return;
1876
1877 unit_release_cgroup(u);
0a1eb06d 1878
4ad49000 1879 u->cgroup_realized = false;
bc432dc7 1880 u->cgroup_realized_mask = 0;
ccf78df1 1881 u->cgroup_enabled_mask = 0;
8e274523
LP
1882}
1883
efdb0237 1884int unit_search_main_pid(Unit *u, pid_t *ret) {
4ad49000
LP
1885 _cleanup_fclose_ FILE *f = NULL;
1886 pid_t pid = 0, npid, mypid;
efdb0237 1887 int r;
4ad49000
LP
1888
1889 assert(u);
efdb0237 1890 assert(ret);
4ad49000
LP
1891
1892 if (!u->cgroup_path)
efdb0237 1893 return -ENXIO;
4ad49000 1894
efdb0237
LP
1895 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1896 if (r < 0)
1897 return r;
4ad49000 1898
df0ff127 1899 mypid = getpid_cached();
4ad49000
LP
1900 while (cg_read_pid(f, &npid) > 0) {
1901 pid_t ppid;
1902
1903 if (npid == pid)
1904 continue;
8e274523 1905
4ad49000 1906 /* Ignore processes that aren't our kids */
6bc73acb 1907 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
4ad49000 1908 continue;
8e274523 1909
efdb0237 1910 if (pid != 0)
4ad49000
LP
1911 /* Dang, there's more than one daemonized PID
1912 in this group, so we don't know what process
1913 is the main process. */
efdb0237
LP
1914
1915 return -ENODATA;
8e274523 1916
4ad49000 1917 pid = npid;
8e274523
LP
1918 }
1919
efdb0237
LP
1920 *ret = pid;
1921 return 0;
1922}
1923
1924static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 1925 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
1926 _cleanup_fclose_ FILE *f = NULL;
1927 int ret = 0, r;
1928
1929 assert(u);
1930 assert(path);
1931
1932 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1933 if (r < 0)
1934 ret = r;
1935 else {
1936 pid_t pid;
1937
1938 while ((r = cg_read_pid(f, &pid)) > 0) {
1939 r = unit_watch_pid(u, pid);
1940 if (r < 0 && ret >= 0)
1941 ret = r;
1942 }
1943
1944 if (r < 0 && ret >= 0)
1945 ret = r;
1946 }
1947
1948 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1949 if (r < 0) {
1950 if (ret >= 0)
1951 ret = r;
1952 } else {
1953 char *fn;
1954
1955 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1956 _cleanup_free_ char *p = NULL;
1957
605405c6 1958 p = strjoin(path, "/", fn);
efdb0237
LP
1959 free(fn);
1960
1961 if (!p)
1962 return -ENOMEM;
1963
1964 r = unit_watch_pids_in_path(u, p);
1965 if (r < 0 && ret >= 0)
1966 ret = r;
1967 }
1968
1969 if (r < 0 && ret >= 0)
1970 ret = r;
1971 }
1972
1973 return ret;
1974}
1975
11aef522
LP
1976int unit_synthesize_cgroup_empty_event(Unit *u) {
1977 int r;
1978
1979 assert(u);
1980
1981 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
1982 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
1983 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
1984
1985 if (!u->cgroup_path)
1986 return -ENOENT;
1987
1988 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1989 if (r < 0)
1990 return r;
1991 if (r > 0) /* On unified we have reliable notifications, and don't need this */
1992 return 0;
1993
1994 if (!set_isempty(u->pids))
1995 return 0;
1996
1997 unit_add_to_cgroup_empty_queue(u);
1998 return 0;
1999}
2000
efdb0237 2001int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
2002 int r;
2003
efdb0237
LP
2004 assert(u);
2005
2006 /* Adds all PIDs from our cgroup to the set of PIDs we
2007 * watch. This is a fallback logic for cases where we do not
2008 * get reliable cgroup empty notifications: we try to use
2009 * SIGCHLD as replacement. */
2010
2011 if (!u->cgroup_path)
2012 return -ENOENT;
2013
c22800e4 2014 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2015 if (r < 0)
2016 return r;
2017 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
2018 return 0;
2019
2020 return unit_watch_pids_in_path(u, u->cgroup_path);
2021}
2022
09e24654
LP
2023static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
2024 Manager *m = userdata;
2025 Unit *u;
efdb0237
LP
2026 int r;
2027
09e24654
LP
2028 assert(s);
2029 assert(m);
efdb0237 2030
09e24654
LP
2031 u = m->cgroup_empty_queue;
2032 if (!u)
efdb0237
LP
2033 return 0;
2034
09e24654
LP
2035 assert(u->in_cgroup_empty_queue);
2036 u->in_cgroup_empty_queue = false;
2037 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
2038
2039 if (m->cgroup_empty_queue) {
2040 /* More stuff queued, let's make sure we remain enabled */
2041 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2042 if (r < 0)
19a691a9 2043 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
09e24654 2044 }
efdb0237
LP
2045
2046 unit_add_to_gc_queue(u);
2047
2048 if (UNIT_VTABLE(u)->notify_cgroup_empty)
2049 UNIT_VTABLE(u)->notify_cgroup_empty(u);
2050
2051 return 0;
2052}
2053
09e24654
LP
2054void unit_add_to_cgroup_empty_queue(Unit *u) {
2055 int r;
2056
2057 assert(u);
2058
2059 /* Note that there are four different ways how cgroup empty events reach us:
2060 *
2061 * 1. On the unified hierarchy we get an inotify event on the cgroup
2062 *
2063 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2064 *
2065 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2066 *
2067 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2068 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2069 *
2070 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2071 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2072 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2073 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2074 * case for scope units). */
2075
2076 if (u->in_cgroup_empty_queue)
2077 return;
2078
2079 /* Let's verify that the cgroup is really empty */
2080 if (!u->cgroup_path)
2081 return;
2082 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2083 if (r < 0) {
2084 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
2085 return;
2086 }
2087 if (r == 0)
2088 return;
2089
2090 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
2091 u->in_cgroup_empty_queue = true;
2092
2093 /* Trigger the defer event */
2094 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
2095 if (r < 0)
2096 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
2097}
2098
efdb0237
LP
2099static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2100 Manager *m = userdata;
2101
2102 assert(s);
2103 assert(fd >= 0);
2104 assert(m);
2105
2106 for (;;) {
2107 union inotify_event_buffer buffer;
2108 struct inotify_event *e;
2109 ssize_t l;
2110
2111 l = read(fd, &buffer, sizeof(buffer));
2112 if (l < 0) {
47249640 2113 if (IN_SET(errno, EINTR, EAGAIN))
efdb0237
LP
2114 return 0;
2115
2116 return log_error_errno(errno, "Failed to read control group inotify events: %m");
2117 }
2118
2119 FOREACH_INOTIFY_EVENT(e, buffer, l) {
2120 Unit *u;
2121
2122 if (e->wd < 0)
2123 /* Queue overflow has no watch descriptor */
2124 continue;
2125
2126 if (e->mask & IN_IGNORED)
2127 /* The watch was just removed */
2128 continue;
2129
2130 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
2131 if (!u) /* Not that inotify might deliver
2132 * events for a watch even after it
2133 * was removed, because it was queued
2134 * before the removal. Let's ignore
2135 * this here safely. */
2136 continue;
2137
09e24654 2138 unit_add_to_cgroup_empty_queue(u);
efdb0237
LP
2139 }
2140 }
8e274523
LP
2141}
2142
8e274523 2143int manager_setup_cgroup(Manager *m) {
9444b1f2 2144 _cleanup_free_ char *path = NULL;
10bd3e2e 2145 const char *scope_path;
efdb0237 2146 CGroupController c;
b4cccbc1 2147 int r, all_unified;
efdb0237 2148 char *e;
8e274523
LP
2149
2150 assert(m);
2151
35d2e7ec 2152 /* 1. Determine hierarchy */
efdb0237 2153 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 2154 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
2155 if (r < 0)
2156 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 2157
efdb0237
LP
2158 /* Chop off the init scope, if we are already located in it */
2159 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 2160
efdb0237
LP
2161 /* LEGACY: Also chop off the system slice if we are in
2162 * it. This is to support live upgrades from older systemd
2163 * versions where PID 1 was moved there. Also see
2164 * cg_get_root_path(). */
463d0d15 2165 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 2166 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 2167 if (!e)
efdb0237 2168 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 2169 }
efdb0237
LP
2170 if (e)
2171 *e = 0;
7ccfb64a 2172
7546145e
LP
2173 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2174 * easily prepend it everywhere. */
2175 delete_trailing_chars(m->cgroup_root, "/");
8e274523 2176
35d2e7ec 2177 /* 2. Show data */
9444b1f2 2178 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
2179 if (r < 0)
2180 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 2181
415fc41c
TH
2182 r = cg_unified_flush();
2183 if (r < 0)
2184 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 2185
b4cccbc1 2186 all_unified = cg_all_unified();
d4c819ed
ZJS
2187 if (all_unified < 0)
2188 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
2189 if (all_unified > 0)
efdb0237 2190 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 2191 else {
c22800e4 2192 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2193 if (r < 0)
2194 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2195 if (r > 0)
2196 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2197 else
2198 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2199 }
efdb0237 2200
09e24654
LP
2201 /* 3. Allocate cgroup empty defer event source */
2202 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2203 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2204 if (r < 0)
2205 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2206
2207 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2208 if (r < 0)
2209 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2210
2211 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2212 if (r < 0)
2213 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2214
2215 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2216
2217 /* 4. Install notifier inotify object, or agent */
10bd3e2e 2218 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 2219
09e24654 2220 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
efdb0237 2221
10bd3e2e
LP
2222 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2223 safe_close(m->cgroup_inotify_fd);
efdb0237 2224
10bd3e2e
LP
2225 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2226 if (m->cgroup_inotify_fd < 0)
2227 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 2228
10bd3e2e
LP
2229 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2230 if (r < 0)
2231 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 2232
10bd3e2e
LP
2233 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2234 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
09e24654 2235 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
10bd3e2e
LP
2236 if (r < 0)
2237 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 2238
10bd3e2e 2239 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 2240
10bd3e2e 2241 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
efdb0237 2242
10bd3e2e
LP
2243 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2244 * since it does not generate events when control groups with children run empty. */
8e274523 2245
10bd3e2e 2246 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
23bbb0de 2247 if (r < 0)
10bd3e2e
LP
2248 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2249 else if (r > 0)
2250 log_debug("Installed release agent.");
2251 else if (r == 0)
2252 log_debug("Release agent already installed.");
2253 }
efdb0237 2254
09e24654 2255 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
10bd3e2e
LP
2256 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2257 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
aa77e234
MS
2258 if (r >= 0) {
2259 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2260 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2261 if (r < 0)
2262 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
c6c18be3 2263
aa77e234
MS
2264 /* 6. And pin it, so that it cannot be unmounted */
2265 safe_close(m->pin_cgroupfs_fd);
2266 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2267 if (m->pin_cgroupfs_fd < 0)
2268 return log_error_errno(errno, "Failed to open pin file: %m");
0d8c31ff 2269
aa77e234
MS
2270 } else if (r < 0 && !m->test_run_flags)
2271 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
10bd3e2e 2272
09e24654 2273 /* 7. Always enable hierarchical support if it exists... */
10bd3e2e
LP
2274 if (!all_unified && m->test_run_flags == 0)
2275 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 2276
09e24654 2277 /* 8. Figure out which controllers are supported, and log about it */
efdb0237
LP
2278 r = cg_mask_supported(&m->cgroup_supported);
2279 if (r < 0)
2280 return log_error_errno(r, "Failed to determine supported controllers: %m");
efdb0237 2281 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
eee0a1e4 2282 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 2283
a32360f1 2284 return 0;
8e274523
LP
2285}
2286
c6c18be3 2287void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
2288 assert(m);
2289
9444b1f2
LP
2290 /* We can't really delete the group, since we are in it. But
2291 * let's trim it. */
f6c63f6f 2292 if (delete && m->cgroup_root && m->test_run_flags != MANAGER_TEST_RUN_MINIMAL)
efdb0237
LP
2293 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2294
09e24654
LP
2295 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2296
efdb0237
LP
2297 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2298
2299 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2300 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 2301
03e334a1 2302 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 2303
efdb0237 2304 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
2305}
2306
4ad49000 2307Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 2308 char *p;
4ad49000 2309 Unit *u;
acb14d31
LP
2310
2311 assert(m);
2312 assert(cgroup);
acb14d31 2313
4ad49000
LP
2314 u = hashmap_get(m->cgroup_unit, cgroup);
2315 if (u)
2316 return u;
acb14d31 2317
8e70580b 2318 p = strdupa(cgroup);
acb14d31
LP
2319 for (;;) {
2320 char *e;
2321
2322 e = strrchr(p, '/');
efdb0237
LP
2323 if (!e || e == p)
2324 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
2325
2326 *e = 0;
2327
4ad49000
LP
2328 u = hashmap_get(m->cgroup_unit, p);
2329 if (u)
2330 return u;
acb14d31
LP
2331 }
2332}
2333
b3ac818b 2334Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
4ad49000 2335 _cleanup_free_ char *cgroup = NULL;
8e274523 2336
8c47c732
LP
2337 assert(m);
2338
62a76913 2339 if (!pid_is_valid(pid))
b3ac818b
LP
2340 return NULL;
2341
62a76913 2342 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
b3ac818b
LP
2343 return NULL;
2344
2345 return manager_get_unit_by_cgroup(m, cgroup);
2346}
2347
2348Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
62a76913 2349 Unit *u, **array;
b3ac818b
LP
2350
2351 assert(m);
2352
62a76913
LP
2353 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2354 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2355 * relevant one as children of the process will be assigned to that one, too, before all else. */
2356
2357 if (!pid_is_valid(pid))
8c47c732
LP
2358 return NULL;
2359
2ca9d979 2360 if (pid == getpid_cached())
efdb0237
LP
2361 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2362
62a76913 2363 u = manager_get_unit_by_pid_cgroup(m, pid);
5fe8876b
LP
2364 if (u)
2365 return u;
2366
62a76913 2367 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
5fe8876b
LP
2368 if (u)
2369 return u;
2370
62a76913
LP
2371 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
2372 if (array)
2373 return array[0];
2374
2375 return NULL;
6dde1f33 2376}
4fbf50b3 2377
4ad49000
LP
2378int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2379 Unit *u;
4fbf50b3 2380
4ad49000
LP
2381 assert(m);
2382 assert(cgroup);
4fbf50b3 2383
09e24654
LP
2384 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2385 * or from the --system instance */
2386
d8fdc620
LP
2387 log_debug("Got cgroup empty notification for: %s", cgroup);
2388
4ad49000 2389 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
2390 if (!u)
2391 return 0;
b56c28c3 2392
09e24654
LP
2393 unit_add_to_cgroup_empty_queue(u);
2394 return 1;
5ad096b3
LP
2395}
2396
2397int unit_get_memory_current(Unit *u, uint64_t *ret) {
2398 _cleanup_free_ char *v = NULL;
2399 int r;
2400
2401 assert(u);
2402 assert(ret);
2403
2e4025c0 2404 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
cf3b4be1
LP
2405 return -ENODATA;
2406
5ad096b3
LP
2407 if (!u->cgroup_path)
2408 return -ENODATA;
2409
1f73aa00
LP
2410 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2411 if (unit_has_root_cgroup(u))
2412 return procfs_memory_get_current(ret);
2413
efdb0237 2414 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
2415 return -ENODATA;
2416
b4cccbc1
LP
2417 r = cg_all_unified();
2418 if (r < 0)
2419 return r;
2420 if (r > 0)
efdb0237 2421 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
b4cccbc1
LP
2422 else
2423 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
5ad096b3
LP
2424 if (r == -ENOENT)
2425 return -ENODATA;
2426 if (r < 0)
2427 return r;
2428
2429 return safe_atou64(v, ret);
2430}
2431
03a7b521
LP
2432int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2433 _cleanup_free_ char *v = NULL;
2434 int r;
2435
2436 assert(u);
2437 assert(ret);
2438
2e4025c0 2439 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
cf3b4be1
LP
2440 return -ENODATA;
2441
03a7b521
LP
2442 if (!u->cgroup_path)
2443 return -ENODATA;
2444
c36a69f4
LP
2445 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2446 if (unit_has_root_cgroup(u))
2447 return procfs_tasks_get_current(ret);
2448
1f73aa00
LP
2449 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2450 return -ENODATA;
2451
03a7b521
LP
2452 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2453 if (r == -ENOENT)
2454 return -ENODATA;
2455 if (r < 0)
2456 return r;
2457
2458 return safe_atou64(v, ret);
2459}
2460
5ad096b3
LP
2461static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2462 _cleanup_free_ char *v = NULL;
2463 uint64_t ns;
2464 int r;
2465
2466 assert(u);
2467 assert(ret);
2468
2469 if (!u->cgroup_path)
2470 return -ENODATA;
2471
1f73aa00
LP
2472 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2473 if (unit_has_root_cgroup(u))
2474 return procfs_cpu_get_usage(ret);
2475
b4cccbc1
LP
2476 r = cg_all_unified();
2477 if (r < 0)
2478 return r;
2479 if (r > 0) {
66ebf6c0
TH
2480 _cleanup_free_ char *val = NULL;
2481 uint64_t us;
5ad096b3 2482
66ebf6c0
TH
2483 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2484 return -ENODATA;
5ad096b3 2485
b734a4ff 2486 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
66ebf6c0
TH
2487 if (r < 0)
2488 return r;
b734a4ff
LP
2489 if (IN_SET(r, -ENOENT, -ENXIO))
2490 return -ENODATA;
66ebf6c0
TH
2491
2492 r = safe_atou64(val, &us);
2493 if (r < 0)
2494 return r;
2495
2496 ns = us * NSEC_PER_USEC;
2497 } else {
2498 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2499 return -ENODATA;
2500
2501 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2502 if (r == -ENOENT)
2503 return -ENODATA;
2504 if (r < 0)
2505 return r;
2506
2507 r = safe_atou64(v, &ns);
2508 if (r < 0)
2509 return r;
2510 }
5ad096b3
LP
2511
2512 *ret = ns;
2513 return 0;
2514}
2515
2516int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2517 nsec_t ns;
2518 int r;
2519
fe700f46
LP
2520 assert(u);
2521
2522 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2523 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2524 * call this function with a NULL return value. */
2525
2e4025c0 2526 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
cf3b4be1
LP
2527 return -ENODATA;
2528
5ad096b3 2529 r = unit_get_cpu_usage_raw(u, &ns);
fe700f46
LP
2530 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2531 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2532 * cached value. */
2533
2534 if (ret)
2535 *ret = u->cpu_usage_last;
2536 return 0;
2537 }
5ad096b3
LP
2538 if (r < 0)
2539 return r;
2540
66ebf6c0
TH
2541 if (ns > u->cpu_usage_base)
2542 ns -= u->cpu_usage_base;
5ad096b3
LP
2543 else
2544 ns = 0;
2545
fe700f46
LP
2546 u->cpu_usage_last = ns;
2547 if (ret)
2548 *ret = ns;
2549
5ad096b3
LP
2550 return 0;
2551}
2552
906c06f6
DM
2553int unit_get_ip_accounting(
2554 Unit *u,
2555 CGroupIPAccountingMetric metric,
2556 uint64_t *ret) {
2557
6b659ed8 2558 uint64_t value;
906c06f6
DM
2559 int fd, r;
2560
2561 assert(u);
2562 assert(metric >= 0);
2563 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2564 assert(ret);
2565
2e4025c0 2566 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
cf3b4be1
LP
2567 return -ENODATA;
2568
906c06f6
DM
2569 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2570 u->ip_accounting_ingress_map_fd :
2571 u->ip_accounting_egress_map_fd;
906c06f6
DM
2572 if (fd < 0)
2573 return -ENODATA;
2574
2575 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
6b659ed8 2576 r = bpf_firewall_read_accounting(fd, &value, NULL);
906c06f6 2577 else
6b659ed8
LP
2578 r = bpf_firewall_read_accounting(fd, NULL, &value);
2579 if (r < 0)
2580 return r;
2581
2582 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2583 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2584 * ip_accounting_extra[] field, and add them in here transparently. */
2585
2586 *ret = value + u->ip_accounting_extra[metric];
906c06f6
DM
2587
2588 return r;
2589}
2590
2591int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
2592 nsec_t ns;
2593 int r;
2594
2595 assert(u);
2596
fe700f46
LP
2597 u->cpu_usage_last = NSEC_INFINITY;
2598
5ad096b3
LP
2599 r = unit_get_cpu_usage_raw(u, &ns);
2600 if (r < 0) {
66ebf6c0 2601 u->cpu_usage_base = 0;
5ad096b3 2602 return r;
b56c28c3 2603 }
2633eb83 2604
66ebf6c0 2605 u->cpu_usage_base = ns;
4ad49000 2606 return 0;
4fbf50b3
LP
2607}
2608
906c06f6
DM
2609int unit_reset_ip_accounting(Unit *u) {
2610 int r = 0, q = 0;
2611
2612 assert(u);
2613
2614 if (u->ip_accounting_ingress_map_fd >= 0)
2615 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2616
2617 if (u->ip_accounting_egress_map_fd >= 0)
2618 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2619
6b659ed8
LP
2620 zero(u->ip_accounting_extra);
2621
906c06f6
DM
2622 return r < 0 ? r : q;
2623}
2624
e7ab4d1a
LP
2625void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2626 assert(u);
2627
2628 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2629 return;
2630
2631 if (m == 0)
2632 return;
2633
538b4852
TH
2634 /* always invalidate compat pairs together */
2635 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2636 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2637
7cce4fb7
LP
2638 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2639 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2640
60c728ad 2641 if ((u->cgroup_realized_mask & m) == 0) /* NOP? */
e7ab4d1a
LP
2642 return;
2643
2644 u->cgroup_realized_mask &= ~m;
91a6073e 2645 unit_add_to_cgroup_realize_queue(u);
e7ab4d1a
LP
2646}
2647
906c06f6
DM
2648void unit_invalidate_cgroup_bpf(Unit *u) {
2649 assert(u);
2650
2651 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2652 return;
2653
60c728ad 2654 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED) /* NOP? */
906c06f6
DM
2655 return;
2656
2657 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
91a6073e 2658 unit_add_to_cgroup_realize_queue(u);
906c06f6
DM
2659
2660 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2661 * list of our children includes our own. */
2662 if (u->type == UNIT_SLICE) {
2663 Unit *member;
2664 Iterator i;
eef85c4a 2665 void *v;
906c06f6 2666
eef85c4a 2667 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
906c06f6
DM
2668 if (member == u)
2669 continue;
2670
2671 if (UNIT_DEREF(member->slice) != u)
2672 continue;
2673
2674 unit_invalidate_cgroup_bpf(member);
2675 }
2676 }
2677}
2678
1d9cc876
LP
2679bool unit_cgroup_delegate(Unit *u) {
2680 CGroupContext *c;
2681
2682 assert(u);
2683
2684 if (!UNIT_VTABLE(u)->can_delegate)
2685 return false;
2686
2687 c = unit_get_cgroup_context(u);
2688 if (!c)
2689 return false;
2690
2691 return c->delegate;
2692}
2693
e7ab4d1a
LP
2694void manager_invalidate_startup_units(Manager *m) {
2695 Iterator i;
2696 Unit *u;
2697
2698 assert(m);
2699
2700 SET_FOREACH(u, m->startup_units, i)
13c31542 2701 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
e7ab4d1a
LP
2702}
2703
4ad49000
LP
2704static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2705 [CGROUP_AUTO] = "auto",
2706 [CGROUP_CLOSED] = "closed",
2707 [CGROUP_STRICT] = "strict",
2708};
4fbf50b3 2709
4ad49000 2710DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);