]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
core: add MemoryMin
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
8e274523 2
c6c18be3 3#include <fcntl.h>
e41969e3 4#include <fnmatch.h>
8c6db833 5
b5efdb8a 6#include "alloc-util.h"
18c528e9 7#include "blockdev-util.h"
906c06f6 8#include "bpf-firewall.h"
45c2e068 9#include "btrfs-util.h"
6592b975 10#include "bus-error.h"
03a7b521 11#include "cgroup-util.h"
3ffd4af2
LP
12#include "cgroup.h"
13#include "fd-util.h"
0d39fa9c 14#include "fileio.h"
77601719 15#include "fs-util.h"
6bedfcbb 16#include "parse-util.h"
9eb977db 17#include "path-util.h"
03a7b521 18#include "process-util.h"
c36a69f4 19#include "procfs-util.h"
9444b1f2 20#include "special.h"
906c06f6 21#include "stdio-util.h"
8b43440b 22#include "string-table.h"
07630cea 23#include "string-util.h"
cc6271f1 24#include "virt.h"
8e274523 25
9a054909
LP
26#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
27
cc6271f1
LP
28bool manager_owns_root_cgroup(Manager *m) {
29 assert(m);
30
31 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
32 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
33 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
34 * we run in any kind of container virtualization. */
35
36 if (detect_container() > 0)
37 return false;
38
57ea45e1 39 return empty_or_root(m->cgroup_root);
cc6271f1
LP
40}
41
f3725e64
LP
42bool unit_has_root_cgroup(Unit *u) {
43 assert(u);
44
cc6271f1
LP
45 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
46 * the manager manages the root cgroup. */
f3725e64 47
cc6271f1 48 if (!manager_owns_root_cgroup(u->manager))
f3725e64
LP
49 return false;
50
cc6271f1 51 return unit_has_name(u, SPECIAL_ROOT_SLICE);
f3725e64
LP
52}
53
2b40998d 54static void cgroup_compat_warn(void) {
128fadc9
TH
55 static bool cgroup_compat_warned = false;
56
57 if (cgroup_compat_warned)
58 return;
59
cc6271f1
LP
60 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
61 "See cgroup-compat debug messages for details.");
62
128fadc9
TH
63 cgroup_compat_warned = true;
64}
65
66#define log_cgroup_compat(unit, fmt, ...) do { \
67 cgroup_compat_warn(); \
68 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 69 } while (false)
128fadc9 70
4ad49000
LP
71void cgroup_context_init(CGroupContext *c) {
72 assert(c);
73
74 /* Initialize everything to the kernel defaults, assuming the
75 * structure is preinitialized to 0 */
76
66ebf6c0
TH
77 c->cpu_weight = CGROUP_WEIGHT_INVALID;
78 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
79 c->cpu_quota_per_sec_usec = USEC_INFINITY;
80
d53d9474
LP
81 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
82 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
d53d9474 83
da4d897e
TH
84 c->memory_high = CGROUP_LIMIT_MAX;
85 c->memory_max = CGROUP_LIMIT_MAX;
96e131ea 86 c->memory_swap_max = CGROUP_LIMIT_MAX;
da4d897e
TH
87
88 c->memory_limit = CGROUP_LIMIT_MAX;
b2f8b02e 89
13c31542
TH
90 c->io_weight = CGROUP_WEIGHT_INVALID;
91 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
92
d53d9474
LP
93 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
94 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
95
96 c->tasks_max = (uint64_t) -1;
4ad49000 97}
8e274523 98
4ad49000
LP
99void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
100 assert(c);
101 assert(a);
102
71fda00f 103 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
104 free(a->path);
105 free(a);
106}
107
13c31542
TH
108void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
109 assert(c);
110 assert(w);
111
112 LIST_REMOVE(device_weights, c->io_device_weights, w);
113 free(w->path);
114 free(w);
115}
116
117void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
118 assert(c);
119 assert(l);
120
121 LIST_REMOVE(device_limits, c->io_device_limits, l);
122 free(l->path);
123 free(l);
124}
125
4ad49000
LP
126void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
127 assert(c);
128 assert(w);
129
71fda00f 130 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
131 free(w->path);
132 free(w);
133}
134
135void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
136 assert(c);
8e274523 137 assert(b);
8e274523 138
71fda00f 139 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
140 free(b->path);
141 free(b);
142}
143
144void cgroup_context_done(CGroupContext *c) {
145 assert(c);
146
13c31542
TH
147 while (c->io_device_weights)
148 cgroup_context_free_io_device_weight(c, c->io_device_weights);
149
150 while (c->io_device_limits)
151 cgroup_context_free_io_device_limit(c, c->io_device_limits);
152
4ad49000
LP
153 while (c->blockio_device_weights)
154 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
155
156 while (c->blockio_device_bandwidths)
157 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
158
159 while (c->device_allow)
160 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f
DM
161
162 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
163 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
4ad49000
LP
164}
165
166void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
13c31542
TH
167 CGroupIODeviceLimit *il;
168 CGroupIODeviceWeight *iw;
4ad49000
LP
169 CGroupBlockIODeviceBandwidth *b;
170 CGroupBlockIODeviceWeight *w;
171 CGroupDeviceAllow *a;
c21c9906 172 IPAddressAccessItem *iaai;
9a054909 173 char u[FORMAT_TIMESPAN_MAX];
4ad49000
LP
174
175 assert(c);
176 assert(f);
177
178 prefix = strempty(prefix);
179
180 fprintf(f,
181 "%sCPUAccounting=%s\n"
13c31542 182 "%sIOAccounting=%s\n"
4ad49000
LP
183 "%sBlockIOAccounting=%s\n"
184 "%sMemoryAccounting=%s\n"
d53d9474 185 "%sTasksAccounting=%s\n"
c21c9906 186 "%sIPAccounting=%s\n"
66ebf6c0
TH
187 "%sCPUWeight=%" PRIu64 "\n"
188 "%sStartupCPUWeight=%" PRIu64 "\n"
d53d9474
LP
189 "%sCPUShares=%" PRIu64 "\n"
190 "%sStartupCPUShares=%" PRIu64 "\n"
b2f8b02e 191 "%sCPUQuotaPerSecSec=%s\n"
13c31542
TH
192 "%sIOWeight=%" PRIu64 "\n"
193 "%sStartupIOWeight=%" PRIu64 "\n"
d53d9474
LP
194 "%sBlockIOWeight=%" PRIu64 "\n"
195 "%sStartupBlockIOWeight=%" PRIu64 "\n"
48422635 196 "%sMemoryMin=%" PRIu64 "\n"
da4d897e
TH
197 "%sMemoryLow=%" PRIu64 "\n"
198 "%sMemoryHigh=%" PRIu64 "\n"
199 "%sMemoryMax=%" PRIu64 "\n"
96e131ea 200 "%sMemorySwapMax=%" PRIu64 "\n"
4ad49000 201 "%sMemoryLimit=%" PRIu64 "\n"
03a7b521 202 "%sTasksMax=%" PRIu64 "\n"
a931ad47
LP
203 "%sDevicePolicy=%s\n"
204 "%sDelegate=%s\n",
4ad49000 205 prefix, yes_no(c->cpu_accounting),
13c31542 206 prefix, yes_no(c->io_accounting),
4ad49000
LP
207 prefix, yes_no(c->blockio_accounting),
208 prefix, yes_no(c->memory_accounting),
d53d9474 209 prefix, yes_no(c->tasks_accounting),
c21c9906 210 prefix, yes_no(c->ip_accounting),
66ebf6c0
TH
211 prefix, c->cpu_weight,
212 prefix, c->startup_cpu_weight,
4ad49000 213 prefix, c->cpu_shares,
95ae05c0 214 prefix, c->startup_cpu_shares,
b1d6dcf5 215 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
13c31542
TH
216 prefix, c->io_weight,
217 prefix, c->startup_io_weight,
4ad49000 218 prefix, c->blockio_weight,
95ae05c0 219 prefix, c->startup_blockio_weight,
48422635 220 prefix, c->memory_min,
da4d897e
TH
221 prefix, c->memory_low,
222 prefix, c->memory_high,
223 prefix, c->memory_max,
96e131ea 224 prefix, c->memory_swap_max,
4ad49000 225 prefix, c->memory_limit,
03a7b521 226 prefix, c->tasks_max,
a931ad47
LP
227 prefix, cgroup_device_policy_to_string(c->device_policy),
228 prefix, yes_no(c->delegate));
4ad49000 229
02638280
LP
230 if (c->delegate) {
231 _cleanup_free_ char *t = NULL;
232
233 (void) cg_mask_to_string(c->delegate_controllers, &t);
234
47a78d41 235 fprintf(f, "%sDelegateControllers=%s\n",
02638280
LP
236 prefix,
237 strempty(t));
238 }
239
4ad49000
LP
240 LIST_FOREACH(device_allow, a, c->device_allow)
241 fprintf(f,
242 "%sDeviceAllow=%s %s%s%s\n",
243 prefix,
244 a->path,
245 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
246
13c31542
TH
247 LIST_FOREACH(device_weights, iw, c->io_device_weights)
248 fprintf(f,
249 "%sIODeviceWeight=%s %" PRIu64,
250 prefix,
251 iw->path,
252 iw->weight);
253
254 LIST_FOREACH(device_limits, il, c->io_device_limits) {
255 char buf[FORMAT_BYTES_MAX];
9be57249
TH
256 CGroupIOLimitType type;
257
258 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
259 if (il->limits[type] != cgroup_io_limit_defaults[type])
260 fprintf(f,
261 "%s%s=%s %s\n",
262 prefix,
263 cgroup_io_limit_type_to_string(type),
264 il->path,
265 format_bytes(buf, sizeof(buf), il->limits[type]));
13c31542
TH
266 }
267
4ad49000
LP
268 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
269 fprintf(f,
d53d9474 270 "%sBlockIODeviceWeight=%s %" PRIu64,
4ad49000
LP
271 prefix,
272 w->path,
273 w->weight);
274
275 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
276 char buf[FORMAT_BYTES_MAX];
277
979d0311
TH
278 if (b->rbps != CGROUP_LIMIT_MAX)
279 fprintf(f,
280 "%sBlockIOReadBandwidth=%s %s\n",
281 prefix,
282 b->path,
283 format_bytes(buf, sizeof(buf), b->rbps));
284 if (b->wbps != CGROUP_LIMIT_MAX)
285 fprintf(f,
286 "%sBlockIOWriteBandwidth=%s %s\n",
287 prefix,
288 b->path,
289 format_bytes(buf, sizeof(buf), b->wbps));
4ad49000 290 }
c21c9906
LP
291
292 LIST_FOREACH(items, iaai, c->ip_address_allow) {
293 _cleanup_free_ char *k = NULL;
294
295 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
296 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
297 }
298
299 LIST_FOREACH(items, iaai, c->ip_address_deny) {
300 _cleanup_free_ char *k = NULL;
301
302 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
303 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
304 }
4ad49000
LP
305}
306
45c2e068 307static int lookup_block_device(const char *p, dev_t *ret) {
4ad49000 308 struct stat st;
45c2e068 309 int r;
4ad49000
LP
310
311 assert(p);
45c2e068 312 assert(ret);
4ad49000 313
b1c05b98 314 if (stat(p, &st) < 0)
45c2e068 315 return log_warning_errno(errno, "Couldn't stat device '%s': %m", p);
8e274523 316
4ad49000 317 if (S_ISBLK(st.st_mode))
45c2e068
LP
318 *ret = st.st_rdev;
319 else if (major(st.st_dev) != 0)
320 *ret = st.st_dev; /* If this is not a device node then use the block device this file is stored on */
321 else {
322 /* If this is btrfs, getting the backing block device is a bit harder */
323 r = btrfs_get_block_device(p, ret);
324 if (r < 0 && r != -ENOTTY)
325 return log_warning_errno(r, "Failed to determine block device backing btrfs file system '%s': %m", p);
326 if (r == -ENOTTY) {
327 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p);
328 return -ENODEV;
329 }
4ad49000 330 }
8e274523 331
45c2e068
LP
332 /* If this is a LUKS device, try to get the originating block device */
333 (void) block_get_originating(*ret, ret);
334
335 /* If this is a partition, try to get the originating block device */
336 (void) block_get_whole_disk(*ret, ret);
8e274523 337 return 0;
8e274523
LP
338}
339
4ad49000
LP
340static int whitelist_device(const char *path, const char *node, const char *acc) {
341 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
342 struct stat st;
b200489b 343 bool ignore_notfound;
8c6db833 344 int r;
8e274523 345
4ad49000
LP
346 assert(path);
347 assert(acc);
8e274523 348
b200489b
DR
349 if (node[0] == '-') {
350 /* Non-existent paths starting with "-" must be silently ignored */
351 node++;
352 ignore_notfound = true;
353 } else
354 ignore_notfound = false;
355
4ad49000 356 if (stat(node, &st) < 0) {
b200489b 357 if (errno == ENOENT && ignore_notfound)
e7330dfe
DP
358 return 0;
359
360 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
4ad49000
LP
361 }
362
363 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
364 log_warning("%s is not a device.", node);
365 return -ENODEV;
366 }
367
368 sprintf(buf,
369 "%c %u:%u %s",
370 S_ISCHR(st.st_mode) ? 'c' : 'b',
371 major(st.st_rdev), minor(st.st_rdev),
372 acc);
373
374 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 375 if (r < 0)
077ba06e 376 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 377 "Failed to set devices.allow on %s: %m", path);
4ad49000
LP
378
379 return r;
8e274523
LP
380}
381
90060676
LP
382static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
383 _cleanup_fclose_ FILE *f = NULL;
384 char line[LINE_MAX];
385 bool good = false;
386 int r;
387
388 assert(path);
389 assert(acc);
4c701096 390 assert(IN_SET(type, 'b', 'c'));
90060676
LP
391
392 f = fopen("/proc/devices", "re");
4a62c710
MS
393 if (!f)
394 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
90060676
LP
395
396 FOREACH_LINE(line, f, goto fail) {
397 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
398 unsigned maj;
399
400 truncate_nl(line);
401
402 if (type == 'c' && streq(line, "Character devices:")) {
403 good = true;
404 continue;
405 }
406
407 if (type == 'b' && streq(line, "Block devices:")) {
408 good = true;
409 continue;
410 }
411
412 if (isempty(line)) {
413 good = false;
414 continue;
415 }
416
417 if (!good)
418 continue;
419
420 p = strstrip(line);
421
422 w = strpbrk(p, WHITESPACE);
423 if (!w)
424 continue;
425 *w = 0;
426
427 r = safe_atou(p, &maj);
428 if (r < 0)
429 continue;
430 if (maj <= 0)
431 continue;
432
433 w++;
434 w += strspn(w, WHITESPACE);
e41969e3
LP
435
436 if (fnmatch(name, w, 0) != 0)
90060676
LP
437 continue;
438
439 sprintf(buf,
440 "%c %u:* %s",
441 type,
442 maj,
443 acc);
444
445 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 446 if (r < 0)
077ba06e 447 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 448 "Failed to set devices.allow on %s: %m", path);
90060676
LP
449 }
450
451 return 0;
452
453fail:
25f027c5 454 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
90060676
LP
455}
456
66ebf6c0
TH
457static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
458 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
459 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
460}
461
462static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
463 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
464 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
465}
466
467static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
468 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
469 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
470 return c->startup_cpu_weight;
471 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
472 return c->cpu_weight;
473 else
474 return CGROUP_WEIGHT_DEFAULT;
475}
476
477static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
478 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
479 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
480 return c->startup_cpu_shares;
481 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
482 return c->cpu_shares;
483 else
484 return CGROUP_CPU_SHARES_DEFAULT;
485}
486
487static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
488 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
489 int r;
490
491 xsprintf(buf, "%" PRIu64 "\n", weight);
492 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
493 if (r < 0)
494 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
495 "Failed to set cpu.weight: %m");
496
497 if (quota != USEC_INFINITY)
498 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
499 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
500 else
501 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
502
503 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
504
505 if (r < 0)
506 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
507 "Failed to set cpu.max: %m");
508}
509
510static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
511 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
512 int r;
513
514 xsprintf(buf, "%" PRIu64 "\n", shares);
515 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
516 if (r < 0)
517 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
518 "Failed to set cpu.shares: %m");
519
520 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
521 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
522 if (r < 0)
523 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
524 "Failed to set cpu.cfs_period_us: %m");
525
526 if (quota != USEC_INFINITY) {
527 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
528 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
529 } else
530 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
531 if (r < 0)
532 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
533 "Failed to set cpu.cfs_quota_us: %m");
534}
535
536static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
537 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
538 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
539}
540
541static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
542 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
543 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
544}
545
508c45da 546static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
547 return c->io_accounting ||
548 c->io_weight != CGROUP_WEIGHT_INVALID ||
549 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
550 c->io_device_weights ||
551 c->io_device_limits;
552}
553
508c45da 554static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
555 return c->blockio_accounting ||
556 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
557 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
558 c->blockio_device_weights ||
559 c->blockio_device_bandwidths;
560}
561
508c45da 562static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
563 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
564 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
565 return c->startup_io_weight;
566 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
567 return c->io_weight;
568 else
569 return CGROUP_WEIGHT_DEFAULT;
570}
571
508c45da 572static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
573 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
574 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
575 return c->startup_blockio_weight;
576 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
577 return c->blockio_weight;
578 else
579 return CGROUP_BLKIO_WEIGHT_DEFAULT;
580}
581
508c45da 582static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
583 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
584 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
585}
586
508c45da 587static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
588 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
589 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
590}
591
f29ff115 592static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
593 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
594 dev_t dev;
595 int r;
596
597 r = lookup_block_device(dev_path, &dev);
598 if (r < 0)
599 return;
600
601 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
f29ff115 602 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
64faf04c 603 if (r < 0)
f29ff115
TH
604 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
605 "Failed to set io.weight: %m");
64faf04c
TH
606}
607
f29ff115 608static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
609 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
610 dev_t dev;
611 int r;
612
613 r = lookup_block_device(dev_path, &dev);
614 if (r < 0)
615 return;
616
617 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
f29ff115 618 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
64faf04c 619 if (r < 0)
f29ff115
TH
620 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
621 "Failed to set blkio.weight_device: %m");
64faf04c
TH
622}
623
17ae2780 624static void cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
64faf04c
TH
625 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
626 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
627 CGroupIOLimitType type;
628 dev_t dev;
64faf04c
TH
629 int r;
630
631 r = lookup_block_device(dev_path, &dev);
632 if (r < 0)
17ae2780 633 return;
64faf04c 634
17ae2780
LP
635 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
636 if (limits[type] != cgroup_io_limit_defaults[type])
64faf04c 637 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
17ae2780 638 else
64faf04c 639 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
64faf04c
TH
640
641 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
642 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
643 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
f29ff115 644 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
64faf04c 645 if (r < 0)
f29ff115
TH
646 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
647 "Failed to set io.max: %m");
64faf04c
TH
648}
649
17ae2780 650static void cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
651 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
652 dev_t dev;
64faf04c
TH
653 int r;
654
655 r = lookup_block_device(dev_path, &dev);
656 if (r < 0)
17ae2780 657 return;
64faf04c 658
64faf04c 659 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
f29ff115 660 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
64faf04c 661 if (r < 0)
f29ff115
TH
662 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
663 "Failed to set blkio.throttle.read_bps_device: %m");
64faf04c 664
64faf04c 665 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
f29ff115 666 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
64faf04c 667 if (r < 0)
f29ff115
TH
668 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
669 "Failed to set blkio.throttle.write_bps_device: %m");
64faf04c
TH
670}
671
da4d897e 672static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
48422635 673 return c->memory_min > 0 || c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
da4d897e
TH
674}
675
f29ff115 676static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
da4d897e
TH
677 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
678 int r;
679
680 if (v != CGROUP_LIMIT_MAX)
681 xsprintf(buf, "%" PRIu64 "\n", v);
682
f29ff115 683 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
da4d897e 684 if (r < 0)
f29ff115
TH
685 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
686 "Failed to set %s: %m", file);
da4d897e
TH
687}
688
0f2d84d2 689static void cgroup_apply_firewall(Unit *u) {
0f2d84d2
LP
690 assert(u);
691
acf7f253 692 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
906c06f6 693
acf7f253 694 if (bpf_firewall_compile(u) < 0)
906c06f6
DM
695 return;
696
697 (void) bpf_firewall_install(u);
906c06f6
DM
698}
699
700static void cgroup_context_apply(
701 Unit *u,
702 CGroupMask apply_mask,
703 bool apply_bpf,
704 ManagerState state) {
705
f29ff115
TH
706 const char *path;
707 CGroupContext *c;
01efdf13 708 bool is_root;
4ad49000
LP
709 int r;
710
f29ff115
TH
711 assert(u);
712
906c06f6
DM
713 /* Nothing to do? Exit early! */
714 if (apply_mask == 0 && !apply_bpf)
4ad49000 715 return;
8e274523 716
f3725e64
LP
717 /* Some cgroup attributes are not supported on the root cgroup, hence silently ignore */
718 is_root = unit_has_root_cgroup(u);
719
720 assert_se(c = unit_get_cgroup_context(u));
721 assert_se(path = u->cgroup_path);
722
723 if (is_root) /* Make sure we don't try to display messages with an empty path. */
6da13913 724 path = "/";
01efdf13 725
714e2e1d
LP
726 /* We generally ignore errors caused by read-only mounted
727 * cgroup trees (assuming we are running in a container then),
728 * and missing cgroups, i.e. EROFS and ENOENT. */
729
906c06f6
DM
730 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
731 bool has_weight, has_shares;
732
733 has_weight = cgroup_context_has_cpu_weight(c);
734 has_shares = cgroup_context_has_cpu_shares(c);
8e274523 735
b4cccbc1 736 if (cg_all_unified() > 0) {
66ebf6c0 737 uint64_t weight;
b2f8b02e 738
66ebf6c0
TH
739 if (has_weight)
740 weight = cgroup_context_cpu_weight(c, state);
741 else if (has_shares) {
742 uint64_t shares = cgroup_context_cpu_shares(c, state);
b2f8b02e 743
66ebf6c0
TH
744 weight = cgroup_cpu_shares_to_weight(shares);
745
746 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
747 shares, weight, path);
748 } else
749 weight = CGROUP_WEIGHT_DEFAULT;
750
751 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
752 } else {
753 uint64_t shares;
754
7d862ab8 755 if (has_weight) {
66ebf6c0
TH
756 uint64_t weight = cgroup_context_cpu_weight(c, state);
757
758 shares = cgroup_cpu_weight_to_shares(weight);
759
760 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
761 weight, shares, path);
7d862ab8
TH
762 } else if (has_shares)
763 shares = cgroup_context_cpu_shares(c, state);
764 else
66ebf6c0
TH
765 shares = CGROUP_CPU_SHARES_DEFAULT;
766
767 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
768 }
4ad49000
LP
769 }
770
906c06f6 771 if (apply_mask & CGROUP_MASK_IO) {
538b4852
TH
772 bool has_io = cgroup_context_has_io_config(c);
773 bool has_blockio = cgroup_context_has_blockio_config(c);
13c31542
TH
774
775 if (!is_root) {
64faf04c
TH
776 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
777 uint64_t weight;
13c31542 778
538b4852
TH
779 if (has_io)
780 weight = cgroup_context_io_weight(c, state);
128fadc9
TH
781 else if (has_blockio) {
782 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
783
784 weight = cgroup_weight_blkio_to_io(blkio_weight);
785
786 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
787 blkio_weight, weight);
788 } else
538b4852 789 weight = CGROUP_WEIGHT_DEFAULT;
13c31542
TH
790
791 xsprintf(buf, "default %" PRIu64 "\n", weight);
792 r = cg_set_attribute("io", path, "io.weight", buf);
793 if (r < 0)
f29ff115
TH
794 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
795 "Failed to set io.weight: %m");
13c31542 796
538b4852
TH
797 if (has_io) {
798 CGroupIODeviceWeight *w;
799
800 /* FIXME: no way to reset this list */
801 LIST_FOREACH(device_weights, w, c->io_device_weights)
f29ff115 802 cgroup_apply_io_device_weight(u, w->path, w->weight);
538b4852
TH
803 } else if (has_blockio) {
804 CGroupBlockIODeviceWeight *w;
805
806 /* FIXME: no way to reset this list */
128fadc9
TH
807 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
808 weight = cgroup_weight_blkio_to_io(w->weight);
809
810 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
811 w->weight, weight, w->path);
812
813 cgroup_apply_io_device_weight(u, w->path, weight);
814 }
538b4852 815 }
13c31542
TH
816 }
817
64faf04c 818 /* Apply limits and free ones without config. */
538b4852 819 if (has_io) {
17ae2780
LP
820 CGroupIODeviceLimit *l;
821
822 LIST_FOREACH(device_limits, l, c->io_device_limits)
823 cgroup_apply_io_device_limit(u, l->path, l->limits);
538b4852 824
538b4852 825 } else if (has_blockio) {
17ae2780 826 CGroupBlockIODeviceBandwidth *b;
538b4852 827
17ae2780 828 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
538b4852
TH
829 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
830 CGroupIOLimitType type;
831
832 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
833 limits[type] = cgroup_io_limit_defaults[type];
834
835 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
836 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
837
128fadc9
TH
838 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
839 b->rbps, b->wbps, b->path);
840
17ae2780 841 cgroup_apply_io_device_limit(u, b->path, limits);
538b4852 842 }
13c31542
TH
843 }
844 }
845
906c06f6 846 if (apply_mask & CGROUP_MASK_BLKIO) {
538b4852
TH
847 bool has_io = cgroup_context_has_io_config(c);
848 bool has_blockio = cgroup_context_has_blockio_config(c);
4ad49000 849
01efdf13 850 if (!is_root) {
64faf04c
TH
851 char buf[DECIMAL_STR_MAX(uint64_t)+1];
852 uint64_t weight;
64faf04c 853
7d862ab8 854 if (has_io) {
128fadc9
TH
855 uint64_t io_weight = cgroup_context_io_weight(c, state);
856
538b4852 857 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9
TH
858
859 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
860 io_weight, weight);
7d862ab8
TH
861 } else if (has_blockio)
862 weight = cgroup_context_blkio_weight(c, state);
863 else
538b4852 864 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
865
866 xsprintf(buf, "%" PRIu64 "\n", weight);
01efdf13 867 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
1aeab12b 868 if (r < 0)
f29ff115
TH
869 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
870 "Failed to set blkio.weight: %m");
4ad49000 871
7d862ab8 872 if (has_io) {
538b4852
TH
873 CGroupIODeviceWeight *w;
874
875 /* FIXME: no way to reset this list */
128fadc9
TH
876 LIST_FOREACH(device_weights, w, c->io_device_weights) {
877 weight = cgroup_weight_io_to_blkio(w->weight);
878
879 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
880 w->weight, weight, w->path);
881
882 cgroup_apply_blkio_device_weight(u, w->path, weight);
883 }
7d862ab8
TH
884 } else if (has_blockio) {
885 CGroupBlockIODeviceWeight *w;
886
887 /* FIXME: no way to reset this list */
888 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
889 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
538b4852 890 }
4ad49000
LP
891 }
892
64faf04c 893 /* Apply limits and free ones without config. */
7d862ab8 894 if (has_io) {
17ae2780 895 CGroupIODeviceLimit *l;
538b4852 896
17ae2780 897 LIST_FOREACH(device_limits, l, c->io_device_limits) {
128fadc9
TH
898 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
899 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
900
17ae2780 901 cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]);
538b4852 902 }
7d862ab8 903 } else if (has_blockio) {
17ae2780 904 CGroupBlockIODeviceBandwidth *b;
7d862ab8 905
17ae2780
LP
906 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths)
907 cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps);
d686d8a9 908 }
8e274523
LP
909 }
910
906c06f6 911 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
b4cccbc1
LP
912 if (cg_all_unified() > 0) {
913 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
efdb0237 914
96e131ea 915 if (cgroup_context_has_unified_memory_config(c)) {
da4d897e 916 max = c->memory_max;
96e131ea
WC
917 swap_max = c->memory_swap_max;
918 } else {
da4d897e 919 max = c->memory_limit;
efdb0237 920
128fadc9
TH
921 if (max != CGROUP_LIMIT_MAX)
922 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
923 }
924
48422635 925 cgroup_apply_unified_memory_limit(u, "memory.min", c->memory_min);
f29ff115
TH
926 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
927 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
928 cgroup_apply_unified_memory_limit(u, "memory.max", max);
96e131ea 929 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
efdb0237 930 } else {
da4d897e 931 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
7d862ab8 932 uint64_t val;
da4d897e 933
7d862ab8 934 if (cgroup_context_has_unified_memory_config(c)) {
78a4ee59 935 val = c->memory_max;
7d862ab8
TH
936 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
937 } else
938 val = c->memory_limit;
128fadc9 939
78a4ee59
DM
940 if (val == CGROUP_LIMIT_MAX)
941 strncpy(buf, "-1\n", sizeof(buf));
942 else
943 xsprintf(buf, "%" PRIu64 "\n", val);
944
da4d897e
TH
945 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
946 if (r < 0)
f29ff115
TH
947 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
948 "Failed to set memory.limit_in_bytes: %m");
da4d897e 949 }
4ad49000 950 }
8e274523 951
906c06f6 952 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
4ad49000 953 CGroupDeviceAllow *a;
8e274523 954
714e2e1d
LP
955 /* Changing the devices list of a populated cgroup
956 * might result in EINVAL, hence ignore EINVAL
957 * here. */
958
4ad49000
LP
959 if (c->device_allow || c->device_policy != CGROUP_AUTO)
960 r = cg_set_attribute("devices", path, "devices.deny", "a");
961 else
962 r = cg_set_attribute("devices", path, "devices.allow", "a");
1aeab12b 963 if (r < 0)
f29ff115
TH
964 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
965 "Failed to reset devices.list: %m");
fb385181 966
4ad49000
LP
967 if (c->device_policy == CGROUP_CLOSED ||
968 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
969 static const char auto_devices[] =
7d711efb
LP
970 "/dev/null\0" "rwm\0"
971 "/dev/zero\0" "rwm\0"
972 "/dev/full\0" "rwm\0"
973 "/dev/random\0" "rwm\0"
974 "/dev/urandom\0" "rwm\0"
975 "/dev/tty\0" "rwm\0"
5a7f87a9 976 "/dev/ptmx\0" "rwm\0"
0d9e7991 977 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
e7330dfe
DP
978 "-/run/systemd/inaccessible/chr\0" "rwm\0"
979 "-/run/systemd/inaccessible/blk\0" "rwm\0";
4ad49000
LP
980
981 const char *x, *y;
982
983 NULSTR_FOREACH_PAIR(x, y, auto_devices)
984 whitelist_device(path, x, y);
7d711efb 985
5a7f87a9 986 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
7d711efb 987 whitelist_major(path, "pts", 'c', "rw");
4ad49000
LP
988 }
989
990 LIST_FOREACH(device_allow, a, c->device_allow) {
fb4650aa 991 char acc[4], *val;
4ad49000
LP
992 unsigned k = 0;
993
994 if (a->r)
995 acc[k++] = 'r';
996 if (a->w)
997 acc[k++] = 'w';
998 if (a->m)
999 acc[k++] = 'm';
fb385181 1000
4ad49000
LP
1001 if (k == 0)
1002 continue;
fb385181 1003
4ad49000 1004 acc[k++] = 0;
90060676 1005
27458ed6 1006 if (path_startswith(a->path, "/dev/"))
90060676 1007 whitelist_device(path, a->path, acc);
fb4650aa
ZJS
1008 else if ((val = startswith(a->path, "block-")))
1009 whitelist_major(path, val, 'b', acc);
1010 else if ((val = startswith(a->path, "char-")))
1011 whitelist_major(path, val, 'c', acc);
90060676 1012 else
f29ff115 1013 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
4ad49000
LP
1014 }
1015 }
03a7b521 1016
00b5974f
LP
1017 if (apply_mask & CGROUP_MASK_PIDS) {
1018
1019 if (is_root) {
1020 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1021 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1022 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1023 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1024 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1025 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1026 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1027 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1028 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1029 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1030 * which is desirable so that there's an offical way to release control of the sysctl from
1031 * systemd: set the limit to unbounded and reload. */
1032
1033 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1034 u->manager->sysctl_pid_max_changed = true;
1035 r = procfs_tasks_set_limit(c->tasks_max);
1036 } else if (u->manager->sysctl_pid_max_changed)
1037 r = procfs_tasks_set_limit(TASKS_MAX);
1038 else
1039 r = 0;
03a7b521 1040
00b5974f
LP
1041 if (r < 0)
1042 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1043 "Failed to write to tasks limit sysctls: %m");
03a7b521 1044
00b5974f
LP
1045 } else {
1046 if (c->tasks_max != CGROUP_LIMIT_MAX) {
1047 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
03a7b521 1048
00b5974f
LP
1049 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1050 r = cg_set_attribute("pids", path, "pids.max", buf);
1051 } else
1052 r = cg_set_attribute("pids", path, "pids.max", "max");
1053 if (r < 0)
1054 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1055 "Failed to set pids.max: %m");
1056 }
03a7b521 1057 }
906c06f6
DM
1058
1059 if (apply_bpf)
0f2d84d2 1060 cgroup_apply_firewall(u);
fb385181
LP
1061}
1062
efdb0237
LP
1063CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1064 CGroupMask mask = 0;
8e274523 1065
4ad49000 1066 /* Figure out which controllers we need */
8e274523 1067
b2f8b02e 1068 if (c->cpu_accounting ||
66ebf6c0
TH
1069 cgroup_context_has_cpu_weight(c) ||
1070 cgroup_context_has_cpu_shares(c) ||
3a43da28 1071 c->cpu_quota_per_sec_usec != USEC_INFINITY)
efdb0237 1072 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
ecedd90f 1073
538b4852
TH
1074 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1075 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 1076
4ad49000 1077 if (c->memory_accounting ||
da4d897e
TH
1078 c->memory_limit != CGROUP_LIMIT_MAX ||
1079 cgroup_context_has_unified_memory_config(c))
efdb0237 1080 mask |= CGROUP_MASK_MEMORY;
8e274523 1081
a931ad47
LP
1082 if (c->device_allow ||
1083 c->device_policy != CGROUP_AUTO)
3905f127 1084 mask |= CGROUP_MASK_DEVICES;
4ad49000 1085
03a7b521 1086 if (c->tasks_accounting ||
8793fa25 1087 c->tasks_max != CGROUP_LIMIT_MAX)
03a7b521
LP
1088 mask |= CGROUP_MASK_PIDS;
1089
4ad49000 1090 return mask;
8e274523
LP
1091}
1092
efdb0237 1093CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 1094 CGroupContext *c;
8e274523 1095
efdb0237
LP
1096 /* Returns the mask of controllers the unit needs for itself */
1097
4ad49000
LP
1098 c = unit_get_cgroup_context(u);
1099 if (!c)
1100 return 0;
8e274523 1101
64e844e5 1102 return cgroup_context_get_mask(c) | unit_get_delegate_mask(u);
02638280
LP
1103}
1104
1105CGroupMask unit_get_delegate_mask(Unit *u) {
1106 CGroupContext *c;
1107
1108 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1109 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
19af675e 1110 *
02638280 1111 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
a931ad47 1112
1d9cc876 1113 if (!unit_cgroup_delegate(u))
02638280
LP
1114 return 0;
1115
1116 if (cg_all_unified() <= 0) {
a931ad47
LP
1117 ExecContext *e;
1118
1119 e = unit_get_exec_context(u);
02638280
LP
1120 if (e && !exec_context_maintains_privileges(e))
1121 return 0;
a931ad47
LP
1122 }
1123
1d9cc876 1124 assert_se(c = unit_get_cgroup_context(u));
02638280 1125 return c->delegate_controllers;
8e274523
LP
1126}
1127
efdb0237 1128CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 1129 assert(u);
bc432dc7 1130
02638280 1131 /* Returns the mask of controllers all of the unit's children require, merged */
efdb0237 1132
bc432dc7
LP
1133 if (u->cgroup_members_mask_valid)
1134 return u->cgroup_members_mask;
1135
64e844e5 1136 u->cgroup_members_mask = 0;
bc432dc7
LP
1137
1138 if (u->type == UNIT_SLICE) {
eef85c4a 1139 void *v;
bc432dc7
LP
1140 Unit *member;
1141 Iterator i;
1142
eef85c4a 1143 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
bc432dc7
LP
1144
1145 if (member == u)
1146 continue;
1147
d4fdc205 1148 if (UNIT_DEREF(member->slice) != u)
bc432dc7
LP
1149 continue;
1150
31604970 1151 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
bc432dc7
LP
1152 }
1153 }
1154
1155 u->cgroup_members_mask_valid = true;
6414b7c9 1156 return u->cgroup_members_mask;
246aa6dd
LP
1157}
1158
efdb0237 1159CGroupMask unit_get_siblings_mask(Unit *u) {
4ad49000 1160 assert(u);
246aa6dd 1161
efdb0237
LP
1162 /* Returns the mask of controllers all of the unit's siblings
1163 * require, i.e. the members mask of the unit's parent slice
1164 * if there is one. */
1165
bc432dc7 1166 if (UNIT_ISSET(u->slice))
637f421e 1167 return unit_get_members_mask(UNIT_DEREF(u->slice));
4ad49000 1168
64e844e5 1169 return unit_get_subtree_mask(u); /* we are the top-level slice */
246aa6dd
LP
1170}
1171
efdb0237
LP
1172CGroupMask unit_get_subtree_mask(Unit *u) {
1173
1174 /* Returns the mask of this subtree, meaning of the group
1175 * itself and its children. */
1176
1177 return unit_get_own_mask(u) | unit_get_members_mask(u);
1178}
1179
1180CGroupMask unit_get_target_mask(Unit *u) {
1181 CGroupMask mask;
1182
1183 /* This returns the cgroup mask of all controllers to enable
1184 * for a specific cgroup, i.e. everything it needs itself,
1185 * plus all that its children need, plus all that its siblings
1186 * need. This is primarily useful on the legacy cgroup
1187 * hierarchy, where we need to duplicate each cgroup in each
1188 * hierarchy that shall be enabled for it. */
6414b7c9 1189
efdb0237
LP
1190 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1191 mask &= u->manager->cgroup_supported;
1192
1193 return mask;
1194}
1195
1196CGroupMask unit_get_enable_mask(Unit *u) {
1197 CGroupMask mask;
1198
1199 /* This returns the cgroup mask of all controllers to enable
1200 * for the children of a specific cgroup. This is primarily
1201 * useful for the unified cgroup hierarchy, where each cgroup
1202 * controls which controllers are enabled for its children. */
1203
1204 mask = unit_get_members_mask(u);
6414b7c9
DS
1205 mask &= u->manager->cgroup_supported;
1206
1207 return mask;
1208}
1209
906c06f6
DM
1210bool unit_get_needs_bpf(Unit *u) {
1211 CGroupContext *c;
1212 Unit *p;
1213 assert(u);
1214
906c06f6
DM
1215 c = unit_get_cgroup_context(u);
1216 if (!c)
1217 return false;
1218
1219 if (c->ip_accounting ||
1220 c->ip_address_allow ||
1221 c->ip_address_deny)
1222 return true;
1223
1224 /* If any parent slice has an IP access list defined, it applies too */
1225 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1226 c = unit_get_cgroup_context(p);
1227 if (!c)
1228 return false;
1229
1230 if (c->ip_address_allow ||
1231 c->ip_address_deny)
1232 return true;
1233 }
1234
1235 return false;
1236}
1237
6414b7c9
DS
1238/* Recurse from a unit up through its containing slices, propagating
1239 * mask bits upward. A unit is also member of itself. */
bc432dc7 1240void unit_update_cgroup_members_masks(Unit *u) {
efdb0237 1241 CGroupMask m;
bc432dc7
LP
1242 bool more;
1243
1244 assert(u);
1245
1246 /* Calculate subtree mask */
efdb0237 1247 m = unit_get_subtree_mask(u);
bc432dc7
LP
1248
1249 /* See if anything changed from the previous invocation. If
1250 * not, we're done. */
1251 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1252 return;
1253
1254 more =
1255 u->cgroup_subtree_mask_valid &&
1256 ((m & ~u->cgroup_subtree_mask) != 0) &&
1257 ((~m & u->cgroup_subtree_mask) == 0);
1258
1259 u->cgroup_subtree_mask = m;
1260 u->cgroup_subtree_mask_valid = true;
1261
6414b7c9
DS
1262 if (UNIT_ISSET(u->slice)) {
1263 Unit *s = UNIT_DEREF(u->slice);
bc432dc7
LP
1264
1265 if (more)
1266 /* There's more set now than before. We
1267 * propagate the new mask to the parent's mask
1268 * (not caring if it actually was valid or
1269 * not). */
1270
1271 s->cgroup_members_mask |= m;
1272
1273 else
1274 /* There's less set now than before (or we
1275 * don't know), we need to recalculate
1276 * everything, so let's invalidate the
1277 * parent's members mask */
1278
1279 s->cgroup_members_mask_valid = false;
1280
1281 /* And now make sure that this change also hits our
1282 * grandparents */
1283 unit_update_cgroup_members_masks(s);
6414b7c9
DS
1284 }
1285}
1286
6592b975 1287const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask) {
03b90d4b 1288
6592b975 1289 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
03b90d4b
LP
1290
1291 while (u) {
6592b975 1292
03b90d4b
LP
1293 if (u->cgroup_path &&
1294 u->cgroup_realized &&
d94a24ca 1295 FLAGS_SET(u->cgroup_realized_mask, mask))
03b90d4b
LP
1296 return u->cgroup_path;
1297
1298 u = UNIT_DEREF(u->slice);
1299 }
1300
1301 return NULL;
1302}
1303
6592b975
LP
1304static const char *migrate_callback(CGroupMask mask, void *userdata) {
1305 return unit_get_realized_cgroup_path(userdata, mask);
1306}
1307
efdb0237
LP
1308char *unit_default_cgroup_path(Unit *u) {
1309 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1310 int r;
1311
1312 assert(u);
1313
1314 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1315 return strdup(u->manager->cgroup_root);
1316
1317 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1318 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1319 if (r < 0)
1320 return NULL;
1321 }
1322
1323 escaped = cg_escape(u->id);
1324 if (!escaped)
1325 return NULL;
1326
1327 if (slice)
605405c6
ZJS
1328 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1329 escaped);
efdb0237 1330 else
605405c6 1331 return strjoin(u->manager->cgroup_root, "/", escaped);
efdb0237
LP
1332}
1333
1334int unit_set_cgroup_path(Unit *u, const char *path) {
1335 _cleanup_free_ char *p = NULL;
1336 int r;
1337
1338 assert(u);
1339
1340 if (path) {
1341 p = strdup(path);
1342 if (!p)
1343 return -ENOMEM;
1344 } else
1345 p = NULL;
1346
1347 if (streq_ptr(u->cgroup_path, p))
1348 return 0;
1349
1350 if (p) {
1351 r = hashmap_put(u->manager->cgroup_unit, p, u);
1352 if (r < 0)
1353 return r;
1354 }
1355
1356 unit_release_cgroup(u);
1357
ae2a15bc 1358 u->cgroup_path = TAKE_PTR(p);
efdb0237
LP
1359
1360 return 1;
1361}
1362
1363int unit_watch_cgroup(Unit *u) {
ab2c3861 1364 _cleanup_free_ char *events = NULL;
efdb0237
LP
1365 int r;
1366
1367 assert(u);
1368
1369 if (!u->cgroup_path)
1370 return 0;
1371
1372 if (u->cgroup_inotify_wd >= 0)
1373 return 0;
1374
1375 /* Only applies to the unified hierarchy */
c22800e4 1376 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1377 if (r < 0)
1378 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1379 if (r == 0)
efdb0237
LP
1380 return 0;
1381
1382 /* Don't watch the root slice, it's pointless. */
1383 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1384 return 0;
1385
1386 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1387 if (r < 0)
1388 return log_oom();
1389
ab2c3861 1390 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
efdb0237
LP
1391 if (r < 0)
1392 return log_oom();
1393
ab2c3861 1394 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
efdb0237
LP
1395 if (u->cgroup_inotify_wd < 0) {
1396
1397 if (errno == ENOENT) /* If the directory is already
1398 * gone we don't need to track
1399 * it, so this is not an error */
1400 return 0;
1401
1402 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1403 }
1404
1405 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1406 if (r < 0)
1407 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1408
1409 return 0;
1410}
1411
a4634b21
LP
1412int unit_pick_cgroup_path(Unit *u) {
1413 _cleanup_free_ char *path = NULL;
1414 int r;
1415
1416 assert(u);
1417
1418 if (u->cgroup_path)
1419 return 0;
1420
1421 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1422 return -EINVAL;
1423
1424 path = unit_default_cgroup_path(u);
1425 if (!path)
1426 return log_oom();
1427
1428 r = unit_set_cgroup_path(u, path);
1429 if (r == -EEXIST)
1430 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1431 if (r < 0)
1432 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
1433
1434 return 0;
1435}
1436
efdb0237
LP
1437static int unit_create_cgroup(
1438 Unit *u,
1439 CGroupMask target_mask,
906c06f6
DM
1440 CGroupMask enable_mask,
1441 bool needs_bpf) {
efdb0237 1442
0cd385d3 1443 CGroupContext *c;
bc432dc7 1444 int r;
65be7e06 1445 bool created;
64747e2d 1446
4ad49000 1447 assert(u);
64747e2d 1448
0cd385d3
LP
1449 c = unit_get_cgroup_context(u);
1450 if (!c)
1451 return 0;
1452
a4634b21
LP
1453 /* Figure out our cgroup path */
1454 r = unit_pick_cgroup_path(u);
1455 if (r < 0)
1456 return r;
b58b8e11 1457
03b90d4b 1458 /* First, create our own group */
efdb0237 1459 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
23bbb0de 1460 if (r < 0)
efdb0237 1461 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
65be7e06 1462 created = !!r;
efdb0237
LP
1463
1464 /* Start watching it */
1465 (void) unit_watch_cgroup(u);
1466
65be7e06
ZJS
1467 /* Preserve enabled controllers in delegated units, adjust others. */
1468 if (created || !unit_cgroup_delegate(u)) {
1469
1470 /* Enable all controllers we need */
1471 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1472 if (r < 0)
1473 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m",
1474 u->cgroup_path);
1475 }
03b90d4b
LP
1476
1477 /* Keep track that this is now realized */
4ad49000 1478 u->cgroup_realized = true;
efdb0237 1479 u->cgroup_realized_mask = target_mask;
ccf78df1 1480 u->cgroup_enabled_mask = enable_mask;
906c06f6 1481 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
4ad49000 1482
1d9cc876 1483 if (u->type != UNIT_SLICE && !unit_cgroup_delegate(u)) {
0cd385d3
LP
1484
1485 /* Then, possibly move things over, but not if
1486 * subgroups may contain processes, which is the case
1487 * for slice and delegation units. */
1488 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1489 if (r < 0)
efdb0237 1490 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
0cd385d3 1491 }
03b90d4b 1492
64747e2d
LP
1493 return 0;
1494}
1495
6592b975
LP
1496static int unit_attach_pid_to_cgroup_via_bus(Unit *u, pid_t pid, const char *suffix_path) {
1497 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1498 char *pp;
7b3fd631 1499 int r;
6592b975 1500
7b3fd631
LP
1501 assert(u);
1502
6592b975
LP
1503 if (MANAGER_IS_SYSTEM(u->manager))
1504 return -EINVAL;
1505
1506 if (!u->manager->system_bus)
1507 return -EIO;
1508
1509 if (!u->cgroup_path)
1510 return -EINVAL;
1511
1512 /* Determine this unit's cgroup path relative to our cgroup root */
1513 pp = path_startswith(u->cgroup_path, u->manager->cgroup_root);
1514 if (!pp)
1515 return -EINVAL;
1516
1517 pp = strjoina("/", pp, suffix_path);
858d36c1 1518 path_simplify(pp, false);
6592b975
LP
1519
1520 r = sd_bus_call_method(u->manager->system_bus,
1521 "org.freedesktop.systemd1",
1522 "/org/freedesktop/systemd1",
1523 "org.freedesktop.systemd1.Manager",
1524 "AttachProcessesToUnit",
1525 &error, NULL,
1526 "ssau",
1527 NULL /* empty unit name means client's unit, i.e. us */, pp, 1, (uint32_t) pid);
7b3fd631 1528 if (r < 0)
6592b975
LP
1529 return log_unit_debug_errno(u, r, "Failed to attach unit process " PID_FMT " via the bus: %s", pid, bus_error_message(&error, r));
1530
1531 return 0;
1532}
1533
1534int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path) {
1535 CGroupMask delegated_mask;
1536 const char *p;
1537 Iterator i;
1538 void *pidp;
1539 int r, q;
1540
1541 assert(u);
1542
1543 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1544 return -EINVAL;
1545
1546 if (set_isempty(pids))
1547 return 0;
7b3fd631 1548
6592b975 1549 r = unit_realize_cgroup(u);
7b3fd631
LP
1550 if (r < 0)
1551 return r;
1552
6592b975
LP
1553 if (isempty(suffix_path))
1554 p = u->cgroup_path;
1555 else
1556 p = strjoina(u->cgroup_path, "/", suffix_path);
1557
1558 delegated_mask = unit_get_delegate_mask(u);
1559
1560 r = 0;
1561 SET_FOREACH(pidp, pids, i) {
1562 pid_t pid = PTR_TO_PID(pidp);
1563 CGroupController c;
1564
1565 /* First, attach the PID to the main cgroup hierarchy */
1566 q = cg_attach(SYSTEMD_CGROUP_CONTROLLER, p, pid);
1567 if (q < 0) {
1568 log_unit_debug_errno(u, q, "Couldn't move process " PID_FMT " to requested cgroup '%s': %m", pid, p);
1569
1570 if (MANAGER_IS_USER(u->manager) && IN_SET(q, -EPERM, -EACCES)) {
1571 int z;
1572
1573 /* If we are in a user instance, and we can't move the process ourselves due to
1574 * permission problems, let's ask the system instance about it instead. Since it's more
1575 * privileged it might be able to move the process across the leaves of a subtree who's
1576 * top node is not owned by us. */
1577
1578 z = unit_attach_pid_to_cgroup_via_bus(u, pid, suffix_path);
1579 if (z < 0)
1580 log_unit_debug_errno(u, z, "Couldn't move process " PID_FMT " to requested cgroup '%s' via the system bus either: %m", pid, p);
1581 else
1582 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1583 }
1584
1585 if (r >= 0)
1586 r = q; /* Remember first error */
1587
1588 continue;
1589 }
1590
1591 q = cg_all_unified();
1592 if (q < 0)
1593 return q;
1594 if (q > 0)
1595 continue;
1596
1597 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1598 * innermost realized one */
1599
1600 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1601 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1602 const char *realized;
1603
1604 if (!(u->manager->cgroup_supported & bit))
1605 continue;
1606
1607 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1608 if (delegated_mask & u->cgroup_realized_mask & bit) {
1609 q = cg_attach(cgroup_controller_to_string(c), p, pid);
1610 if (q >= 0)
1611 continue; /* Success! */
1612
1613 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1614 pid, p, cgroup_controller_to_string(c));
1615 }
1616
1617 /* So this controller is either not delegate or realized, or something else weird happened. In
1618 * that case let's attach the PID at least to the closest cgroup up the tree that is
1619 * realized. */
1620 realized = unit_get_realized_cgroup_path(u, bit);
1621 if (!realized)
1622 continue; /* Not even realized in the root slice? Then let's not bother */
1623
1624 q = cg_attach(cgroup_controller_to_string(c), realized, pid);
1625 if (q < 0)
1626 log_unit_debug_errno(u, q, "Failed to attach PID " PID_FMT " to realized cgroup %s in controller %s, ignoring: %m",
1627 pid, realized, cgroup_controller_to_string(c));
1628 }
1629 }
1630
1631 return r;
7b3fd631
LP
1632}
1633
4b58153d
LP
1634static void cgroup_xattr_apply(Unit *u) {
1635 char ids[SD_ID128_STRING_MAX];
1636 int r;
1637
1638 assert(u);
1639
1640 if (!MANAGER_IS_SYSTEM(u->manager))
1641 return;
1642
1643 if (sd_id128_is_null(u->invocation_id))
1644 return;
1645
1646 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1647 "trusted.invocation_id",
1648 sd_id128_to_string(u->invocation_id, ids), 32,
1649 0);
1650 if (r < 0)
0fb84499 1651 log_unit_debug_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
4b58153d
LP
1652}
1653
906c06f6
DM
1654static bool unit_has_mask_realized(
1655 Unit *u,
1656 CGroupMask target_mask,
1657 CGroupMask enable_mask,
1658 bool needs_bpf) {
1659
bc432dc7
LP
1660 assert(u);
1661
906c06f6
DM
1662 return u->cgroup_realized &&
1663 u->cgroup_realized_mask == target_mask &&
1664 u->cgroup_enabled_mask == enable_mask &&
1665 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1666 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
6414b7c9
DS
1667}
1668
2aa57a65
LP
1669static void unit_add_to_cgroup_realize_queue(Unit *u) {
1670 assert(u);
1671
1672 if (u->in_cgroup_realize_queue)
1673 return;
1674
1675 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1676 u->in_cgroup_realize_queue = true;
1677}
1678
1679static void unit_remove_from_cgroup_realize_queue(Unit *u) {
1680 assert(u);
1681
1682 if (!u->in_cgroup_realize_queue)
1683 return;
1684
1685 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1686 u->in_cgroup_realize_queue = false;
1687}
1688
6414b7c9
DS
1689/* Check if necessary controllers and attributes for a unit are in place.
1690 *
1691 * If so, do nothing.
1692 * If not, create paths, move processes over, and set attributes.
1693 *
1694 * Returns 0 on success and < 0 on failure. */
db785129 1695static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 1696 CGroupMask target_mask, enable_mask;
906c06f6 1697 bool needs_bpf, apply_bpf;
6414b7c9 1698 int r;
64747e2d 1699
4ad49000 1700 assert(u);
64747e2d 1701
2aa57a65 1702 unit_remove_from_cgroup_realize_queue(u);
64747e2d 1703
efdb0237 1704 target_mask = unit_get_target_mask(u);
ccf78df1 1705 enable_mask = unit_get_enable_mask(u);
906c06f6 1706 needs_bpf = unit_get_needs_bpf(u);
ccf78df1 1707
906c06f6 1708 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
0a1eb06d 1709 return 0;
64747e2d 1710
906c06f6
DM
1711 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1712 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1713 * this will trickle down properly to cgroupfs. */
1714 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1715
4ad49000 1716 /* First, realize parents */
6414b7c9 1717 if (UNIT_ISSET(u->slice)) {
db785129 1718 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
6414b7c9
DS
1719 if (r < 0)
1720 return r;
1721 }
4ad49000
LP
1722
1723 /* And then do the real work */
906c06f6 1724 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
6414b7c9
DS
1725 if (r < 0)
1726 return r;
1727
1728 /* Finally, apply the necessary attributes. */
906c06f6 1729 cgroup_context_apply(u, target_mask, apply_bpf, state);
4b58153d 1730 cgroup_xattr_apply(u);
6414b7c9
DS
1731
1732 return 0;
64747e2d
LP
1733}
1734
91a6073e 1735unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
db785129 1736 ManagerState state;
4ad49000 1737 unsigned n = 0;
db785129 1738 Unit *i;
6414b7c9 1739 int r;
ecedd90f 1740
91a6073e
LP
1741 assert(m);
1742
db785129
LP
1743 state = manager_state(m);
1744
91a6073e
LP
1745 while ((i = m->cgroup_realize_queue)) {
1746 assert(i->in_cgroup_realize_queue);
ecedd90f 1747
2aa57a65
LP
1748 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i))) {
1749 /* Maybe things changed, and the unit is not actually active anymore? */
1750 unit_remove_from_cgroup_realize_queue(i);
1751 continue;
1752 }
1753
db785129 1754 r = unit_realize_cgroup_now(i, state);
6414b7c9 1755 if (r < 0)
efdb0237 1756 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 1757
4ad49000
LP
1758 n++;
1759 }
ecedd90f 1760
4ad49000 1761 return n;
8e274523
LP
1762}
1763
91a6073e 1764static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
4ad49000 1765 Unit *slice;
ca949c9d 1766
4ad49000
LP
1767 /* This adds the siblings of the specified unit and the
1768 * siblings of all parent units to the cgroup queue. (But
1769 * neither the specified unit itself nor the parents.) */
1770
1771 while ((slice = UNIT_DEREF(u->slice))) {
1772 Iterator i;
1773 Unit *m;
eef85c4a 1774 void *v;
8f53a7b8 1775
eef85c4a 1776 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
4ad49000
LP
1777 if (m == u)
1778 continue;
8e274523 1779
6414b7c9
DS
1780 /* Skip units that have a dependency on the slice
1781 * but aren't actually in it. */
4ad49000 1782 if (UNIT_DEREF(m->slice) != slice)
50159e6a 1783 continue;
8e274523 1784
6414b7c9
DS
1785 /* No point in doing cgroup application for units
1786 * without active processes. */
1787 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1788 continue;
1789
1790 /* If the unit doesn't need any new controllers
1791 * and has current ones realized, it doesn't need
1792 * any changes. */
906c06f6
DM
1793 if (unit_has_mask_realized(m,
1794 unit_get_target_mask(m),
1795 unit_get_enable_mask(m),
1796 unit_get_needs_bpf(m)))
6414b7c9
DS
1797 continue;
1798
91a6073e 1799 unit_add_to_cgroup_realize_queue(m);
50159e6a
LP
1800 }
1801
4ad49000 1802 u = slice;
8e274523 1803 }
4ad49000
LP
1804}
1805
0a1eb06d 1806int unit_realize_cgroup(Unit *u) {
4ad49000
LP
1807 assert(u);
1808
35b7ff80 1809 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 1810 return 0;
8e274523 1811
4ad49000
LP
1812 /* So, here's the deal: when realizing the cgroups for this
1813 * unit, we need to first create all parents, but there's more
1814 * actually: for the weight-based controllers we also need to
1815 * make sure that all our siblings (i.e. units that are in the
73e231ab 1816 * same slice as we are) have cgroups, too. Otherwise, things
4ad49000
LP
1817 * would become very uneven as each of their processes would
1818 * get as much resources as all our group together. This call
1819 * will synchronously create the parent cgroups, but will
1820 * defer work on the siblings to the next event loop
1821 * iteration. */
ca949c9d 1822
4ad49000 1823 /* Add all sibling slices to the cgroup queue. */
91a6073e 1824 unit_add_siblings_to_cgroup_realize_queue(u);
4ad49000 1825
6414b7c9 1826 /* And realize this one now (and apply the values) */
db785129 1827 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
1828}
1829
efdb0237
LP
1830void unit_release_cgroup(Unit *u) {
1831 assert(u);
1832
1833 /* Forgets all cgroup details for this cgroup */
1834
1835 if (u->cgroup_path) {
1836 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1837 u->cgroup_path = mfree(u->cgroup_path);
1838 }
1839
1840 if (u->cgroup_inotify_wd >= 0) {
1841 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1842 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1843
1844 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1845 u->cgroup_inotify_wd = -1;
1846 }
1847}
1848
1849void unit_prune_cgroup(Unit *u) {
8e274523 1850 int r;
efdb0237 1851 bool is_root_slice;
8e274523 1852
4ad49000 1853 assert(u);
8e274523 1854
efdb0237
LP
1855 /* Removes the cgroup, if empty and possible, and stops watching it. */
1856
4ad49000
LP
1857 if (!u->cgroup_path)
1858 return;
8e274523 1859
fe700f46
LP
1860 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1861
efdb0237
LP
1862 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1863
1864 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
dab5bf85 1865 if (r < 0) {
f29ff115 1866 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
dab5bf85
RL
1867 return;
1868 }
8e274523 1869
efdb0237
LP
1870 if (is_root_slice)
1871 return;
1872
1873 unit_release_cgroup(u);
0a1eb06d 1874
4ad49000 1875 u->cgroup_realized = false;
bc432dc7 1876 u->cgroup_realized_mask = 0;
ccf78df1 1877 u->cgroup_enabled_mask = 0;
8e274523
LP
1878}
1879
efdb0237 1880int unit_search_main_pid(Unit *u, pid_t *ret) {
4ad49000
LP
1881 _cleanup_fclose_ FILE *f = NULL;
1882 pid_t pid = 0, npid, mypid;
efdb0237 1883 int r;
4ad49000
LP
1884
1885 assert(u);
efdb0237 1886 assert(ret);
4ad49000
LP
1887
1888 if (!u->cgroup_path)
efdb0237 1889 return -ENXIO;
4ad49000 1890
efdb0237
LP
1891 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1892 if (r < 0)
1893 return r;
4ad49000 1894
df0ff127 1895 mypid = getpid_cached();
4ad49000
LP
1896 while (cg_read_pid(f, &npid) > 0) {
1897 pid_t ppid;
1898
1899 if (npid == pid)
1900 continue;
8e274523 1901
4ad49000 1902 /* Ignore processes that aren't our kids */
6bc73acb 1903 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
4ad49000 1904 continue;
8e274523 1905
efdb0237 1906 if (pid != 0)
4ad49000
LP
1907 /* Dang, there's more than one daemonized PID
1908 in this group, so we don't know what process
1909 is the main process. */
efdb0237
LP
1910
1911 return -ENODATA;
8e274523 1912
4ad49000 1913 pid = npid;
8e274523
LP
1914 }
1915
efdb0237
LP
1916 *ret = pid;
1917 return 0;
1918}
1919
1920static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 1921 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
1922 _cleanup_fclose_ FILE *f = NULL;
1923 int ret = 0, r;
1924
1925 assert(u);
1926 assert(path);
1927
1928 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1929 if (r < 0)
1930 ret = r;
1931 else {
1932 pid_t pid;
1933
1934 while ((r = cg_read_pid(f, &pid)) > 0) {
1935 r = unit_watch_pid(u, pid);
1936 if (r < 0 && ret >= 0)
1937 ret = r;
1938 }
1939
1940 if (r < 0 && ret >= 0)
1941 ret = r;
1942 }
1943
1944 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1945 if (r < 0) {
1946 if (ret >= 0)
1947 ret = r;
1948 } else {
1949 char *fn;
1950
1951 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1952 _cleanup_free_ char *p = NULL;
1953
605405c6 1954 p = strjoin(path, "/", fn);
efdb0237
LP
1955 free(fn);
1956
1957 if (!p)
1958 return -ENOMEM;
1959
1960 r = unit_watch_pids_in_path(u, p);
1961 if (r < 0 && ret >= 0)
1962 ret = r;
1963 }
1964
1965 if (r < 0 && ret >= 0)
1966 ret = r;
1967 }
1968
1969 return ret;
1970}
1971
11aef522
LP
1972int unit_synthesize_cgroup_empty_event(Unit *u) {
1973 int r;
1974
1975 assert(u);
1976
1977 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
1978 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
1979 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
1980
1981 if (!u->cgroup_path)
1982 return -ENOENT;
1983
1984 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1985 if (r < 0)
1986 return r;
1987 if (r > 0) /* On unified we have reliable notifications, and don't need this */
1988 return 0;
1989
1990 if (!set_isempty(u->pids))
1991 return 0;
1992
1993 unit_add_to_cgroup_empty_queue(u);
1994 return 0;
1995}
1996
efdb0237 1997int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
1998 int r;
1999
efdb0237
LP
2000 assert(u);
2001
2002 /* Adds all PIDs from our cgroup to the set of PIDs we
2003 * watch. This is a fallback logic for cases where we do not
2004 * get reliable cgroup empty notifications: we try to use
2005 * SIGCHLD as replacement. */
2006
2007 if (!u->cgroup_path)
2008 return -ENOENT;
2009
c22800e4 2010 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2011 if (r < 0)
2012 return r;
2013 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
2014 return 0;
2015
2016 return unit_watch_pids_in_path(u, u->cgroup_path);
2017}
2018
09e24654
LP
2019static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
2020 Manager *m = userdata;
2021 Unit *u;
efdb0237
LP
2022 int r;
2023
09e24654
LP
2024 assert(s);
2025 assert(m);
efdb0237 2026
09e24654
LP
2027 u = m->cgroup_empty_queue;
2028 if (!u)
efdb0237
LP
2029 return 0;
2030
09e24654
LP
2031 assert(u->in_cgroup_empty_queue);
2032 u->in_cgroup_empty_queue = false;
2033 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
2034
2035 if (m->cgroup_empty_queue) {
2036 /* More stuff queued, let's make sure we remain enabled */
2037 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
2038 if (r < 0)
19a691a9 2039 log_debug_errno(r, "Failed to reenable cgroup empty event source, ignoring: %m");
09e24654 2040 }
efdb0237
LP
2041
2042 unit_add_to_gc_queue(u);
2043
2044 if (UNIT_VTABLE(u)->notify_cgroup_empty)
2045 UNIT_VTABLE(u)->notify_cgroup_empty(u);
2046
2047 return 0;
2048}
2049
09e24654
LP
2050void unit_add_to_cgroup_empty_queue(Unit *u) {
2051 int r;
2052
2053 assert(u);
2054
2055 /* Note that there are four different ways how cgroup empty events reach us:
2056 *
2057 * 1. On the unified hierarchy we get an inotify event on the cgroup
2058 *
2059 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2060 *
2061 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2062 *
2063 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2064 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2065 *
2066 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2067 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2068 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2069 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2070 * case for scope units). */
2071
2072 if (u->in_cgroup_empty_queue)
2073 return;
2074
2075 /* Let's verify that the cgroup is really empty */
2076 if (!u->cgroup_path)
2077 return;
2078 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
2079 if (r < 0) {
2080 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
2081 return;
2082 }
2083 if (r == 0)
2084 return;
2085
2086 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
2087 u->in_cgroup_empty_queue = true;
2088
2089 /* Trigger the defer event */
2090 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
2091 if (r < 0)
2092 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
2093}
2094
efdb0237
LP
2095static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2096 Manager *m = userdata;
2097
2098 assert(s);
2099 assert(fd >= 0);
2100 assert(m);
2101
2102 for (;;) {
2103 union inotify_event_buffer buffer;
2104 struct inotify_event *e;
2105 ssize_t l;
2106
2107 l = read(fd, &buffer, sizeof(buffer));
2108 if (l < 0) {
47249640 2109 if (IN_SET(errno, EINTR, EAGAIN))
efdb0237
LP
2110 return 0;
2111
2112 return log_error_errno(errno, "Failed to read control group inotify events: %m");
2113 }
2114
2115 FOREACH_INOTIFY_EVENT(e, buffer, l) {
2116 Unit *u;
2117
2118 if (e->wd < 0)
2119 /* Queue overflow has no watch descriptor */
2120 continue;
2121
2122 if (e->mask & IN_IGNORED)
2123 /* The watch was just removed */
2124 continue;
2125
2126 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
2127 if (!u) /* Not that inotify might deliver
2128 * events for a watch even after it
2129 * was removed, because it was queued
2130 * before the removal. Let's ignore
2131 * this here safely. */
2132 continue;
2133
09e24654 2134 unit_add_to_cgroup_empty_queue(u);
efdb0237
LP
2135 }
2136 }
8e274523
LP
2137}
2138
8e274523 2139int manager_setup_cgroup(Manager *m) {
9444b1f2 2140 _cleanup_free_ char *path = NULL;
10bd3e2e 2141 const char *scope_path;
efdb0237 2142 CGroupController c;
b4cccbc1 2143 int r, all_unified;
efdb0237 2144 char *e;
8e274523
LP
2145
2146 assert(m);
2147
35d2e7ec 2148 /* 1. Determine hierarchy */
efdb0237 2149 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 2150 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
2151 if (r < 0)
2152 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 2153
efdb0237
LP
2154 /* Chop off the init scope, if we are already located in it */
2155 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 2156
efdb0237
LP
2157 /* LEGACY: Also chop off the system slice if we are in
2158 * it. This is to support live upgrades from older systemd
2159 * versions where PID 1 was moved there. Also see
2160 * cg_get_root_path(). */
463d0d15 2161 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 2162 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 2163 if (!e)
efdb0237 2164 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 2165 }
efdb0237
LP
2166 if (e)
2167 *e = 0;
7ccfb64a 2168
7546145e
LP
2169 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2170 * easily prepend it everywhere. */
2171 delete_trailing_chars(m->cgroup_root, "/");
8e274523 2172
35d2e7ec 2173 /* 2. Show data */
9444b1f2 2174 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
2175 if (r < 0)
2176 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 2177
415fc41c
TH
2178 r = cg_unified_flush();
2179 if (r < 0)
2180 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 2181
b4cccbc1 2182 all_unified = cg_all_unified();
d4c819ed
ZJS
2183 if (all_unified < 0)
2184 return log_error_errno(all_unified, "Couldn't determine whether we are in all unified mode: %m");
2185 if (all_unified > 0)
efdb0237 2186 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 2187 else {
c22800e4 2188 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
2189 if (r < 0)
2190 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
2191 if (r > 0)
2192 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
2193 else
2194 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
2195 }
efdb0237 2196
09e24654
LP
2197 /* 3. Allocate cgroup empty defer event source */
2198 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2199 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
2200 if (r < 0)
2201 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
2202
2203 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
2204 if (r < 0)
2205 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
2206
2207 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
2208 if (r < 0)
2209 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
2210
2211 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
2212
2213 /* 4. Install notifier inotify object, or agent */
10bd3e2e 2214 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 2215
09e24654 2216 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
efdb0237 2217
10bd3e2e
LP
2218 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2219 safe_close(m->cgroup_inotify_fd);
efdb0237 2220
10bd3e2e
LP
2221 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2222 if (m->cgroup_inotify_fd < 0)
2223 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 2224
10bd3e2e
LP
2225 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2226 if (r < 0)
2227 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 2228
10bd3e2e
LP
2229 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2230 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
09e24654 2231 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
10bd3e2e
LP
2232 if (r < 0)
2233 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 2234
10bd3e2e 2235 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 2236
10bd3e2e 2237 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
efdb0237 2238
10bd3e2e
LP
2239 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2240 * since it does not generate events when control groups with children run empty. */
8e274523 2241
10bd3e2e 2242 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
23bbb0de 2243 if (r < 0)
10bd3e2e
LP
2244 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2245 else if (r > 0)
2246 log_debug("Installed release agent.");
2247 else if (r == 0)
2248 log_debug("Release agent already installed.");
2249 }
efdb0237 2250
09e24654 2251 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
10bd3e2e
LP
2252 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2253 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
aa77e234
MS
2254 if (r >= 0) {
2255 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2256 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2257 if (r < 0)
2258 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
c6c18be3 2259
aa77e234
MS
2260 /* 6. And pin it, so that it cannot be unmounted */
2261 safe_close(m->pin_cgroupfs_fd);
2262 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2263 if (m->pin_cgroupfs_fd < 0)
2264 return log_error_errno(errno, "Failed to open pin file: %m");
0d8c31ff 2265
b4dec49f 2266 } else if (!m->test_run_flags)
aa77e234 2267 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
10bd3e2e 2268
09e24654 2269 /* 7. Always enable hierarchical support if it exists... */
10bd3e2e
LP
2270 if (!all_unified && m->test_run_flags == 0)
2271 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 2272
09e24654 2273 /* 8. Figure out which controllers are supported, and log about it */
efdb0237
LP
2274 r = cg_mask_supported(&m->cgroup_supported);
2275 if (r < 0)
2276 return log_error_errno(r, "Failed to determine supported controllers: %m");
efdb0237 2277 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
eee0a1e4 2278 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 2279
a32360f1 2280 return 0;
8e274523
LP
2281}
2282
c6c18be3 2283void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
2284 assert(m);
2285
9444b1f2
LP
2286 /* We can't really delete the group, since we are in it. But
2287 * let's trim it. */
f6c63f6f 2288 if (delete && m->cgroup_root && m->test_run_flags != MANAGER_TEST_RUN_MINIMAL)
efdb0237
LP
2289 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2290
09e24654
LP
2291 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2292
efdb0237
LP
2293 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2294
2295 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2296 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 2297
03e334a1 2298 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 2299
efdb0237 2300 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
2301}
2302
4ad49000 2303Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 2304 char *p;
4ad49000 2305 Unit *u;
acb14d31
LP
2306
2307 assert(m);
2308 assert(cgroup);
acb14d31 2309
4ad49000
LP
2310 u = hashmap_get(m->cgroup_unit, cgroup);
2311 if (u)
2312 return u;
acb14d31 2313
8e70580b 2314 p = strdupa(cgroup);
acb14d31
LP
2315 for (;;) {
2316 char *e;
2317
2318 e = strrchr(p, '/');
efdb0237
LP
2319 if (!e || e == p)
2320 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
2321
2322 *e = 0;
2323
4ad49000
LP
2324 u = hashmap_get(m->cgroup_unit, p);
2325 if (u)
2326 return u;
acb14d31
LP
2327 }
2328}
2329
b3ac818b 2330Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
4ad49000 2331 _cleanup_free_ char *cgroup = NULL;
8e274523 2332
8c47c732
LP
2333 assert(m);
2334
62a76913 2335 if (!pid_is_valid(pid))
b3ac818b
LP
2336 return NULL;
2337
62a76913 2338 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup) < 0)
b3ac818b
LP
2339 return NULL;
2340
2341 return manager_get_unit_by_cgroup(m, cgroup);
2342}
2343
2344Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
62a76913 2345 Unit *u, **array;
b3ac818b
LP
2346
2347 assert(m);
2348
62a76913
LP
2349 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2350 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2351 * relevant one as children of the process will be assigned to that one, too, before all else. */
2352
2353 if (!pid_is_valid(pid))
8c47c732
LP
2354 return NULL;
2355
2ca9d979 2356 if (pid == getpid_cached())
efdb0237
LP
2357 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2358
62a76913 2359 u = manager_get_unit_by_pid_cgroup(m, pid);
5fe8876b
LP
2360 if (u)
2361 return u;
2362
62a76913 2363 u = hashmap_get(m->watch_pids, PID_TO_PTR(pid));
5fe8876b
LP
2364 if (u)
2365 return u;
2366
62a76913
LP
2367 array = hashmap_get(m->watch_pids, PID_TO_PTR(-pid));
2368 if (array)
2369 return array[0];
2370
2371 return NULL;
6dde1f33 2372}
4fbf50b3 2373
4ad49000
LP
2374int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2375 Unit *u;
4fbf50b3 2376
4ad49000
LP
2377 assert(m);
2378 assert(cgroup);
4fbf50b3 2379
09e24654
LP
2380 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2381 * or from the --system instance */
2382
d8fdc620
LP
2383 log_debug("Got cgroup empty notification for: %s", cgroup);
2384
4ad49000 2385 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
2386 if (!u)
2387 return 0;
b56c28c3 2388
09e24654
LP
2389 unit_add_to_cgroup_empty_queue(u);
2390 return 1;
5ad096b3
LP
2391}
2392
2393int unit_get_memory_current(Unit *u, uint64_t *ret) {
2394 _cleanup_free_ char *v = NULL;
2395 int r;
2396
2397 assert(u);
2398 assert(ret);
2399
2e4025c0 2400 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
cf3b4be1
LP
2401 return -ENODATA;
2402
5ad096b3
LP
2403 if (!u->cgroup_path)
2404 return -ENODATA;
2405
1f73aa00
LP
2406 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2407 if (unit_has_root_cgroup(u))
2408 return procfs_memory_get_current(ret);
2409
efdb0237 2410 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
2411 return -ENODATA;
2412
b4cccbc1
LP
2413 r = cg_all_unified();
2414 if (r < 0)
2415 return r;
2416 if (r > 0)
efdb0237 2417 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
b4cccbc1
LP
2418 else
2419 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
5ad096b3
LP
2420 if (r == -ENOENT)
2421 return -ENODATA;
2422 if (r < 0)
2423 return r;
2424
2425 return safe_atou64(v, ret);
2426}
2427
03a7b521
LP
2428int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2429 _cleanup_free_ char *v = NULL;
2430 int r;
2431
2432 assert(u);
2433 assert(ret);
2434
2e4025c0 2435 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
cf3b4be1
LP
2436 return -ENODATA;
2437
03a7b521
LP
2438 if (!u->cgroup_path)
2439 return -ENODATA;
2440
c36a69f4
LP
2441 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2442 if (unit_has_root_cgroup(u))
2443 return procfs_tasks_get_current(ret);
2444
1f73aa00
LP
2445 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2446 return -ENODATA;
2447
03a7b521
LP
2448 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2449 if (r == -ENOENT)
2450 return -ENODATA;
2451 if (r < 0)
2452 return r;
2453
2454 return safe_atou64(v, ret);
2455}
2456
5ad096b3
LP
2457static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2458 _cleanup_free_ char *v = NULL;
2459 uint64_t ns;
2460 int r;
2461
2462 assert(u);
2463 assert(ret);
2464
2465 if (!u->cgroup_path)
2466 return -ENODATA;
2467
1f73aa00
LP
2468 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2469 if (unit_has_root_cgroup(u))
2470 return procfs_cpu_get_usage(ret);
2471
b4cccbc1
LP
2472 r = cg_all_unified();
2473 if (r < 0)
2474 return r;
2475 if (r > 0) {
66ebf6c0
TH
2476 _cleanup_free_ char *val = NULL;
2477 uint64_t us;
5ad096b3 2478
66ebf6c0
TH
2479 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2480 return -ENODATA;
5ad096b3 2481
b734a4ff 2482 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
66ebf6c0
TH
2483 if (r < 0)
2484 return r;
b734a4ff
LP
2485 if (IN_SET(r, -ENOENT, -ENXIO))
2486 return -ENODATA;
66ebf6c0
TH
2487
2488 r = safe_atou64(val, &us);
2489 if (r < 0)
2490 return r;
2491
2492 ns = us * NSEC_PER_USEC;
2493 } else {
2494 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2495 return -ENODATA;
2496
2497 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2498 if (r == -ENOENT)
2499 return -ENODATA;
2500 if (r < 0)
2501 return r;
2502
2503 r = safe_atou64(v, &ns);
2504 if (r < 0)
2505 return r;
2506 }
5ad096b3
LP
2507
2508 *ret = ns;
2509 return 0;
2510}
2511
2512int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2513 nsec_t ns;
2514 int r;
2515
fe700f46
LP
2516 assert(u);
2517
2518 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2519 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2520 * call this function with a NULL return value. */
2521
2e4025c0 2522 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
cf3b4be1
LP
2523 return -ENODATA;
2524
5ad096b3 2525 r = unit_get_cpu_usage_raw(u, &ns);
fe700f46
LP
2526 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2527 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2528 * cached value. */
2529
2530 if (ret)
2531 *ret = u->cpu_usage_last;
2532 return 0;
2533 }
5ad096b3
LP
2534 if (r < 0)
2535 return r;
2536
66ebf6c0
TH
2537 if (ns > u->cpu_usage_base)
2538 ns -= u->cpu_usage_base;
5ad096b3
LP
2539 else
2540 ns = 0;
2541
fe700f46
LP
2542 u->cpu_usage_last = ns;
2543 if (ret)
2544 *ret = ns;
2545
5ad096b3
LP
2546 return 0;
2547}
2548
906c06f6
DM
2549int unit_get_ip_accounting(
2550 Unit *u,
2551 CGroupIPAccountingMetric metric,
2552 uint64_t *ret) {
2553
6b659ed8 2554 uint64_t value;
906c06f6
DM
2555 int fd, r;
2556
2557 assert(u);
2558 assert(metric >= 0);
2559 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2560 assert(ret);
2561
2e4025c0 2562 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
cf3b4be1
LP
2563 return -ENODATA;
2564
906c06f6
DM
2565 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2566 u->ip_accounting_ingress_map_fd :
2567 u->ip_accounting_egress_map_fd;
906c06f6
DM
2568 if (fd < 0)
2569 return -ENODATA;
2570
2571 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
6b659ed8 2572 r = bpf_firewall_read_accounting(fd, &value, NULL);
906c06f6 2573 else
6b659ed8
LP
2574 r = bpf_firewall_read_accounting(fd, NULL, &value);
2575 if (r < 0)
2576 return r;
2577
2578 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2579 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2580 * ip_accounting_extra[] field, and add them in here transparently. */
2581
2582 *ret = value + u->ip_accounting_extra[metric];
906c06f6
DM
2583
2584 return r;
2585}
2586
2587int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
2588 nsec_t ns;
2589 int r;
2590
2591 assert(u);
2592
fe700f46
LP
2593 u->cpu_usage_last = NSEC_INFINITY;
2594
5ad096b3
LP
2595 r = unit_get_cpu_usage_raw(u, &ns);
2596 if (r < 0) {
66ebf6c0 2597 u->cpu_usage_base = 0;
5ad096b3 2598 return r;
b56c28c3 2599 }
2633eb83 2600
66ebf6c0 2601 u->cpu_usage_base = ns;
4ad49000 2602 return 0;
4fbf50b3
LP
2603}
2604
906c06f6
DM
2605int unit_reset_ip_accounting(Unit *u) {
2606 int r = 0, q = 0;
2607
2608 assert(u);
2609
2610 if (u->ip_accounting_ingress_map_fd >= 0)
2611 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2612
2613 if (u->ip_accounting_egress_map_fd >= 0)
2614 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2615
6b659ed8
LP
2616 zero(u->ip_accounting_extra);
2617
906c06f6
DM
2618 return r < 0 ? r : q;
2619}
2620
e7ab4d1a
LP
2621void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2622 assert(u);
2623
2624 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2625 return;
2626
2627 if (m == 0)
2628 return;
2629
538b4852
TH
2630 /* always invalidate compat pairs together */
2631 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2632 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2633
7cce4fb7
LP
2634 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2635 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2636
60c728ad 2637 if ((u->cgroup_realized_mask & m) == 0) /* NOP? */
e7ab4d1a
LP
2638 return;
2639
2640 u->cgroup_realized_mask &= ~m;
91a6073e 2641 unit_add_to_cgroup_realize_queue(u);
e7ab4d1a
LP
2642}
2643
906c06f6
DM
2644void unit_invalidate_cgroup_bpf(Unit *u) {
2645 assert(u);
2646
2647 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2648 return;
2649
60c728ad 2650 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED) /* NOP? */
906c06f6
DM
2651 return;
2652
2653 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
91a6073e 2654 unit_add_to_cgroup_realize_queue(u);
906c06f6
DM
2655
2656 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2657 * list of our children includes our own. */
2658 if (u->type == UNIT_SLICE) {
2659 Unit *member;
2660 Iterator i;
eef85c4a 2661 void *v;
906c06f6 2662
eef85c4a 2663 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
906c06f6
DM
2664 if (member == u)
2665 continue;
2666
2667 if (UNIT_DEREF(member->slice) != u)
2668 continue;
2669
2670 unit_invalidate_cgroup_bpf(member);
2671 }
2672 }
2673}
2674
1d9cc876
LP
2675bool unit_cgroup_delegate(Unit *u) {
2676 CGroupContext *c;
2677
2678 assert(u);
2679
2680 if (!UNIT_VTABLE(u)->can_delegate)
2681 return false;
2682
2683 c = unit_get_cgroup_context(u);
2684 if (!c)
2685 return false;
2686
2687 return c->delegate;
2688}
2689
e7ab4d1a
LP
2690void manager_invalidate_startup_units(Manager *m) {
2691 Iterator i;
2692 Unit *u;
2693
2694 assert(m);
2695
2696 SET_FOREACH(u, m->startup_units, i)
13c31542 2697 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
e7ab4d1a
LP
2698}
2699
4ad49000
LP
2700static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2701 [CGROUP_AUTO] = "auto",
2702 [CGROUP_CLOSED] = "closed",
2703 [CGROUP_STRICT] = "strict",
2704};
4fbf50b3 2705
4ad49000 2706DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);