]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
systemctl: report accounted network traffic in "systemctl status"
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
8e274523
LP
1/***
2 This file is part of systemd.
3
4ad49000 4 Copyright 2013 Lennart Poettering
8e274523
LP
5
6 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
8e274523
LP
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 14 Lesser General Public License for more details.
8e274523 15
5430f7f2 16 You should have received a copy of the GNU Lesser General Public License
8e274523
LP
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18***/
19
c6c18be3 20#include <fcntl.h>
e41969e3 21#include <fnmatch.h>
8c6db833 22
b5efdb8a 23#include "alloc-util.h"
906c06f6 24#include "bpf-firewall.h"
03a7b521 25#include "cgroup-util.h"
3ffd4af2
LP
26#include "cgroup.h"
27#include "fd-util.h"
0d39fa9c 28#include "fileio.h"
77601719 29#include "fs-util.h"
6bedfcbb 30#include "parse-util.h"
9eb977db 31#include "path-util.h"
03a7b521 32#include "process-util.h"
9444b1f2 33#include "special.h"
906c06f6 34#include "stdio-util.h"
8b43440b 35#include "string-table.h"
07630cea 36#include "string-util.h"
8e274523 37
9a054909
LP
38#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
39
2b40998d 40static void cgroup_compat_warn(void) {
128fadc9
TH
41 static bool cgroup_compat_warned = false;
42
43 if (cgroup_compat_warned)
44 return;
45
46 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details.");
47 cgroup_compat_warned = true;
48}
49
50#define log_cgroup_compat(unit, fmt, ...) do { \
51 cgroup_compat_warn(); \
52 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 53 } while (false)
128fadc9 54
4ad49000
LP
55void cgroup_context_init(CGroupContext *c) {
56 assert(c);
57
58 /* Initialize everything to the kernel defaults, assuming the
59 * structure is preinitialized to 0 */
60
66ebf6c0
TH
61 c->cpu_weight = CGROUP_WEIGHT_INVALID;
62 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
63 c->cpu_quota_per_sec_usec = USEC_INFINITY;
64
d53d9474
LP
65 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
66 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
d53d9474 67
da4d897e
TH
68 c->memory_high = CGROUP_LIMIT_MAX;
69 c->memory_max = CGROUP_LIMIT_MAX;
96e131ea 70 c->memory_swap_max = CGROUP_LIMIT_MAX;
da4d897e
TH
71
72 c->memory_limit = CGROUP_LIMIT_MAX;
b2f8b02e 73
13c31542
TH
74 c->io_weight = CGROUP_WEIGHT_INVALID;
75 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
76
d53d9474
LP
77 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
78 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
79
80 c->tasks_max = (uint64_t) -1;
4ad49000 81}
8e274523 82
4ad49000
LP
83void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
84 assert(c);
85 assert(a);
86
71fda00f 87 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
88 free(a->path);
89 free(a);
90}
91
13c31542
TH
92void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
93 assert(c);
94 assert(w);
95
96 LIST_REMOVE(device_weights, c->io_device_weights, w);
97 free(w->path);
98 free(w);
99}
100
101void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
102 assert(c);
103 assert(l);
104
105 LIST_REMOVE(device_limits, c->io_device_limits, l);
106 free(l->path);
107 free(l);
108}
109
4ad49000
LP
110void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
111 assert(c);
112 assert(w);
113
71fda00f 114 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
115 free(w->path);
116 free(w);
117}
118
119void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
120 assert(c);
8e274523 121 assert(b);
8e274523 122
71fda00f 123 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
124 free(b->path);
125 free(b);
126}
127
128void cgroup_context_done(CGroupContext *c) {
129 assert(c);
130
13c31542
TH
131 while (c->io_device_weights)
132 cgroup_context_free_io_device_weight(c, c->io_device_weights);
133
134 while (c->io_device_limits)
135 cgroup_context_free_io_device_limit(c, c->io_device_limits);
136
4ad49000
LP
137 while (c->blockio_device_weights)
138 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
139
140 while (c->blockio_device_bandwidths)
141 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
142
143 while (c->device_allow)
144 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f
DM
145
146 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
147 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
4ad49000
LP
148}
149
150void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
13c31542
TH
151 CGroupIODeviceLimit *il;
152 CGroupIODeviceWeight *iw;
4ad49000
LP
153 CGroupBlockIODeviceBandwidth *b;
154 CGroupBlockIODeviceWeight *w;
155 CGroupDeviceAllow *a;
9a054909 156 char u[FORMAT_TIMESPAN_MAX];
4ad49000
LP
157
158 assert(c);
159 assert(f);
160
161 prefix = strempty(prefix);
162
163 fprintf(f,
164 "%sCPUAccounting=%s\n"
13c31542 165 "%sIOAccounting=%s\n"
4ad49000
LP
166 "%sBlockIOAccounting=%s\n"
167 "%sMemoryAccounting=%s\n"
d53d9474 168 "%sTasksAccounting=%s\n"
66ebf6c0
TH
169 "%sCPUWeight=%" PRIu64 "\n"
170 "%sStartupCPUWeight=%" PRIu64 "\n"
d53d9474
LP
171 "%sCPUShares=%" PRIu64 "\n"
172 "%sStartupCPUShares=%" PRIu64 "\n"
b2f8b02e 173 "%sCPUQuotaPerSecSec=%s\n"
13c31542
TH
174 "%sIOWeight=%" PRIu64 "\n"
175 "%sStartupIOWeight=%" PRIu64 "\n"
d53d9474
LP
176 "%sBlockIOWeight=%" PRIu64 "\n"
177 "%sStartupBlockIOWeight=%" PRIu64 "\n"
da4d897e
TH
178 "%sMemoryLow=%" PRIu64 "\n"
179 "%sMemoryHigh=%" PRIu64 "\n"
180 "%sMemoryMax=%" PRIu64 "\n"
96e131ea 181 "%sMemorySwapMax=%" PRIu64 "\n"
4ad49000 182 "%sMemoryLimit=%" PRIu64 "\n"
03a7b521 183 "%sTasksMax=%" PRIu64 "\n"
a931ad47
LP
184 "%sDevicePolicy=%s\n"
185 "%sDelegate=%s\n",
4ad49000 186 prefix, yes_no(c->cpu_accounting),
13c31542 187 prefix, yes_no(c->io_accounting),
4ad49000
LP
188 prefix, yes_no(c->blockio_accounting),
189 prefix, yes_no(c->memory_accounting),
d53d9474 190 prefix, yes_no(c->tasks_accounting),
66ebf6c0
TH
191 prefix, c->cpu_weight,
192 prefix, c->startup_cpu_weight,
4ad49000 193 prefix, c->cpu_shares,
95ae05c0 194 prefix, c->startup_cpu_shares,
b1d6dcf5 195 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
13c31542
TH
196 prefix, c->io_weight,
197 prefix, c->startup_io_weight,
4ad49000 198 prefix, c->blockio_weight,
95ae05c0 199 prefix, c->startup_blockio_weight,
da4d897e
TH
200 prefix, c->memory_low,
201 prefix, c->memory_high,
202 prefix, c->memory_max,
96e131ea 203 prefix, c->memory_swap_max,
4ad49000 204 prefix, c->memory_limit,
03a7b521 205 prefix, c->tasks_max,
a931ad47
LP
206 prefix, cgroup_device_policy_to_string(c->device_policy),
207 prefix, yes_no(c->delegate));
4ad49000
LP
208
209 LIST_FOREACH(device_allow, a, c->device_allow)
210 fprintf(f,
211 "%sDeviceAllow=%s %s%s%s\n",
212 prefix,
213 a->path,
214 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
215
13c31542
TH
216 LIST_FOREACH(device_weights, iw, c->io_device_weights)
217 fprintf(f,
218 "%sIODeviceWeight=%s %" PRIu64,
219 prefix,
220 iw->path,
221 iw->weight);
222
223 LIST_FOREACH(device_limits, il, c->io_device_limits) {
224 char buf[FORMAT_BYTES_MAX];
9be57249
TH
225 CGroupIOLimitType type;
226
227 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
228 if (il->limits[type] != cgroup_io_limit_defaults[type])
229 fprintf(f,
230 "%s%s=%s %s\n",
231 prefix,
232 cgroup_io_limit_type_to_string(type),
233 il->path,
234 format_bytes(buf, sizeof(buf), il->limits[type]));
13c31542
TH
235 }
236
4ad49000
LP
237 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
238 fprintf(f,
d53d9474 239 "%sBlockIODeviceWeight=%s %" PRIu64,
4ad49000
LP
240 prefix,
241 w->path,
242 w->weight);
243
244 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
245 char buf[FORMAT_BYTES_MAX];
246
979d0311
TH
247 if (b->rbps != CGROUP_LIMIT_MAX)
248 fprintf(f,
249 "%sBlockIOReadBandwidth=%s %s\n",
250 prefix,
251 b->path,
252 format_bytes(buf, sizeof(buf), b->rbps));
253 if (b->wbps != CGROUP_LIMIT_MAX)
254 fprintf(f,
255 "%sBlockIOWriteBandwidth=%s %s\n",
256 prefix,
257 b->path,
258 format_bytes(buf, sizeof(buf), b->wbps));
4ad49000
LP
259 }
260}
261
13c31542 262static int lookup_block_device(const char *p, dev_t *dev) {
4ad49000
LP
263 struct stat st;
264 int r;
265
266 assert(p);
267 assert(dev);
268
269 r = stat(p, &st);
4a62c710
MS
270 if (r < 0)
271 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
8e274523 272
4ad49000
LP
273 if (S_ISBLK(st.st_mode))
274 *dev = st.st_rdev;
275 else if (major(st.st_dev) != 0) {
276 /* If this is not a device node then find the block
277 * device this file is stored on */
278 *dev = st.st_dev;
279
280 /* If this is a partition, try to get the originating
281 * block device */
282 block_get_whole_disk(*dev, dev);
283 } else {
284 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
285 return -ENODEV;
286 }
8e274523 287
8e274523 288 return 0;
8e274523
LP
289}
290
4ad49000
LP
291static int whitelist_device(const char *path, const char *node, const char *acc) {
292 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
293 struct stat st;
b200489b 294 bool ignore_notfound;
8c6db833 295 int r;
8e274523 296
4ad49000
LP
297 assert(path);
298 assert(acc);
8e274523 299
b200489b
DR
300 if (node[0] == '-') {
301 /* Non-existent paths starting with "-" must be silently ignored */
302 node++;
303 ignore_notfound = true;
304 } else
305 ignore_notfound = false;
306
4ad49000 307 if (stat(node, &st) < 0) {
b200489b 308 if (errno == ENOENT && ignore_notfound)
e7330dfe
DP
309 return 0;
310
311 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
4ad49000
LP
312 }
313
314 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
315 log_warning("%s is not a device.", node);
316 return -ENODEV;
317 }
318
319 sprintf(buf,
320 "%c %u:%u %s",
321 S_ISCHR(st.st_mode) ? 'c' : 'b',
322 major(st.st_rdev), minor(st.st_rdev),
323 acc);
324
325 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 326 if (r < 0)
077ba06e 327 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 328 "Failed to set devices.allow on %s: %m", path);
4ad49000
LP
329
330 return r;
8e274523
LP
331}
332
90060676
LP
333static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
334 _cleanup_fclose_ FILE *f = NULL;
335 char line[LINE_MAX];
336 bool good = false;
337 int r;
338
339 assert(path);
340 assert(acc);
341 assert(type == 'b' || type == 'c');
342
343 f = fopen("/proc/devices", "re");
4a62c710
MS
344 if (!f)
345 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
90060676
LP
346
347 FOREACH_LINE(line, f, goto fail) {
348 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
349 unsigned maj;
350
351 truncate_nl(line);
352
353 if (type == 'c' && streq(line, "Character devices:")) {
354 good = true;
355 continue;
356 }
357
358 if (type == 'b' && streq(line, "Block devices:")) {
359 good = true;
360 continue;
361 }
362
363 if (isempty(line)) {
364 good = false;
365 continue;
366 }
367
368 if (!good)
369 continue;
370
371 p = strstrip(line);
372
373 w = strpbrk(p, WHITESPACE);
374 if (!w)
375 continue;
376 *w = 0;
377
378 r = safe_atou(p, &maj);
379 if (r < 0)
380 continue;
381 if (maj <= 0)
382 continue;
383
384 w++;
385 w += strspn(w, WHITESPACE);
e41969e3
LP
386
387 if (fnmatch(name, w, 0) != 0)
90060676
LP
388 continue;
389
390 sprintf(buf,
391 "%c %u:* %s",
392 type,
393 maj,
394 acc);
395
396 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 397 if (r < 0)
077ba06e 398 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 399 "Failed to set devices.allow on %s: %m", path);
90060676
LP
400 }
401
402 return 0;
403
404fail:
25f027c5 405 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
90060676
LP
406}
407
66ebf6c0
TH
408static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
409 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
410 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
411}
412
413static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
414 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
415 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
416}
417
418static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
419 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
420 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
421 return c->startup_cpu_weight;
422 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
423 return c->cpu_weight;
424 else
425 return CGROUP_WEIGHT_DEFAULT;
426}
427
428static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
429 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
430 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
431 return c->startup_cpu_shares;
432 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
433 return c->cpu_shares;
434 else
435 return CGROUP_CPU_SHARES_DEFAULT;
436}
437
438static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
439 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
440 int r;
441
442 xsprintf(buf, "%" PRIu64 "\n", weight);
443 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
444 if (r < 0)
445 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
446 "Failed to set cpu.weight: %m");
447
448 if (quota != USEC_INFINITY)
449 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
450 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
451 else
452 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
453
454 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
455
456 if (r < 0)
457 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
458 "Failed to set cpu.max: %m");
459}
460
461static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
462 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
463 int r;
464
465 xsprintf(buf, "%" PRIu64 "\n", shares);
466 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
467 if (r < 0)
468 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
469 "Failed to set cpu.shares: %m");
470
471 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
472 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
473 if (r < 0)
474 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
475 "Failed to set cpu.cfs_period_us: %m");
476
477 if (quota != USEC_INFINITY) {
478 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
479 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
480 } else
481 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
482 if (r < 0)
483 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
484 "Failed to set cpu.cfs_quota_us: %m");
485}
486
487static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
488 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
489 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
490}
491
492static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
493 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
494 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
495}
496
508c45da 497static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
498 return c->io_accounting ||
499 c->io_weight != CGROUP_WEIGHT_INVALID ||
500 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
501 c->io_device_weights ||
502 c->io_device_limits;
503}
504
508c45da 505static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
506 return c->blockio_accounting ||
507 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
508 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
509 c->blockio_device_weights ||
510 c->blockio_device_bandwidths;
511}
512
508c45da 513static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
514 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
515 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
516 return c->startup_io_weight;
517 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
518 return c->io_weight;
519 else
520 return CGROUP_WEIGHT_DEFAULT;
521}
522
508c45da 523static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
524 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
525 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
526 return c->startup_blockio_weight;
527 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
528 return c->blockio_weight;
529 else
530 return CGROUP_BLKIO_WEIGHT_DEFAULT;
531}
532
508c45da 533static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
534 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
535 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
536}
537
508c45da 538static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
539 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
540 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
541}
542
f29ff115 543static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
544 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
545 dev_t dev;
546 int r;
547
548 r = lookup_block_device(dev_path, &dev);
549 if (r < 0)
550 return;
551
552 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
f29ff115 553 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
64faf04c 554 if (r < 0)
f29ff115
TH
555 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
556 "Failed to set io.weight: %m");
64faf04c
TH
557}
558
f29ff115 559static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
560 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
561 dev_t dev;
562 int r;
563
564 r = lookup_block_device(dev_path, &dev);
565 if (r < 0)
566 return;
567
568 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
f29ff115 569 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
64faf04c 570 if (r < 0)
f29ff115
TH
571 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
572 "Failed to set blkio.weight_device: %m");
64faf04c
TH
573}
574
f29ff115 575static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
64faf04c
TH
576 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
577 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
578 CGroupIOLimitType type;
579 dev_t dev;
580 unsigned n = 0;
581 int r;
582
583 r = lookup_block_device(dev_path, &dev);
584 if (r < 0)
585 return 0;
586
587 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) {
588 if (limits[type] != cgroup_io_limit_defaults[type]) {
589 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
590 n++;
591 } else {
592 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
593 }
594 }
595
596 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
597 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
598 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
f29ff115 599 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
64faf04c 600 if (r < 0)
f29ff115
TH
601 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
602 "Failed to set io.max: %m");
64faf04c
TH
603 return n;
604}
605
f29ff115 606static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
607 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
608 dev_t dev;
609 unsigned n = 0;
610 int r;
611
612 r = lookup_block_device(dev_path, &dev);
613 if (r < 0)
614 return 0;
615
616 if (rbps != CGROUP_LIMIT_MAX)
617 n++;
618 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
f29ff115 619 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
64faf04c 620 if (r < 0)
f29ff115
TH
621 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
622 "Failed to set blkio.throttle.read_bps_device: %m");
64faf04c
TH
623
624 if (wbps != CGROUP_LIMIT_MAX)
625 n++;
626 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
f29ff115 627 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
64faf04c 628 if (r < 0)
f29ff115
TH
629 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
630 "Failed to set blkio.throttle.write_bps_device: %m");
64faf04c
TH
631
632 return n;
633}
634
da4d897e 635static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
96e131ea 636 return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
da4d897e
TH
637}
638
f29ff115 639static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
da4d897e
TH
640 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
641 int r;
642
643 if (v != CGROUP_LIMIT_MAX)
644 xsprintf(buf, "%" PRIu64 "\n", v);
645
f29ff115 646 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
da4d897e 647 if (r < 0)
f29ff115
TH
648 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
649 "Failed to set %s: %m", file);
da4d897e
TH
650}
651
906c06f6
DM
652static void cgroup_apply_firewall(Unit *u, CGroupContext *c) {
653 int r;
654
655 if (u->type == UNIT_SLICE) /* Skip this for slice units, they are inner cgroup nodes, and since bpf/cgroup is
656 * not recursive we don't ever touch the bpf on them */
657 return;
658
659 r = bpf_firewall_compile(u);
660 if (r < 0)
661 return;
662
663 (void) bpf_firewall_install(u);
664 return;
665}
666
667static void cgroup_context_apply(
668 Unit *u,
669 CGroupMask apply_mask,
670 bool apply_bpf,
671 ManagerState state) {
672
f29ff115
TH
673 const char *path;
674 CGroupContext *c;
01efdf13 675 bool is_root;
4ad49000
LP
676 int r;
677
f29ff115
TH
678 assert(u);
679
680 c = unit_get_cgroup_context(u);
681 path = u->cgroup_path;
682
4ad49000
LP
683 assert(c);
684 assert(path);
8e274523 685
906c06f6
DM
686 /* Nothing to do? Exit early! */
687 if (apply_mask == 0 && !apply_bpf)
4ad49000 688 return;
8e274523 689
71c26873 690 /* Some cgroup attributes are not supported on the root cgroup,
01efdf13
LP
691 * hence silently ignore */
692 is_root = isempty(path) || path_equal(path, "/");
6da13913
ZJS
693 if (is_root)
694 /* Make sure we don't try to display messages with an empty path. */
695 path = "/";
01efdf13 696
714e2e1d
LP
697 /* We generally ignore errors caused by read-only mounted
698 * cgroup trees (assuming we are running in a container then),
699 * and missing cgroups, i.e. EROFS and ENOENT. */
700
906c06f6
DM
701 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
702 bool has_weight, has_shares;
703
704 has_weight = cgroup_context_has_cpu_weight(c);
705 has_shares = cgroup_context_has_cpu_shares(c);
8e274523 706
b4cccbc1 707 if (cg_all_unified() > 0) {
66ebf6c0 708 uint64_t weight;
b2f8b02e 709
66ebf6c0
TH
710 if (has_weight)
711 weight = cgroup_context_cpu_weight(c, state);
712 else if (has_shares) {
713 uint64_t shares = cgroup_context_cpu_shares(c, state);
b2f8b02e 714
66ebf6c0
TH
715 weight = cgroup_cpu_shares_to_weight(shares);
716
717 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
718 shares, weight, path);
719 } else
720 weight = CGROUP_WEIGHT_DEFAULT;
721
722 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
723 } else {
724 uint64_t shares;
725
7d862ab8 726 if (has_weight) {
66ebf6c0
TH
727 uint64_t weight = cgroup_context_cpu_weight(c, state);
728
729 shares = cgroup_cpu_weight_to_shares(weight);
730
731 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
732 weight, shares, path);
7d862ab8
TH
733 } else if (has_shares)
734 shares = cgroup_context_cpu_shares(c, state);
735 else
66ebf6c0
TH
736 shares = CGROUP_CPU_SHARES_DEFAULT;
737
738 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
739 }
4ad49000
LP
740 }
741
906c06f6 742 if (apply_mask & CGROUP_MASK_IO) {
538b4852
TH
743 bool has_io = cgroup_context_has_io_config(c);
744 bool has_blockio = cgroup_context_has_blockio_config(c);
13c31542
TH
745
746 if (!is_root) {
64faf04c
TH
747 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
748 uint64_t weight;
13c31542 749
538b4852
TH
750 if (has_io)
751 weight = cgroup_context_io_weight(c, state);
128fadc9
TH
752 else if (has_blockio) {
753 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
754
755 weight = cgroup_weight_blkio_to_io(blkio_weight);
756
757 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
758 blkio_weight, weight);
759 } else
538b4852 760 weight = CGROUP_WEIGHT_DEFAULT;
13c31542
TH
761
762 xsprintf(buf, "default %" PRIu64 "\n", weight);
763 r = cg_set_attribute("io", path, "io.weight", buf);
764 if (r < 0)
f29ff115
TH
765 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
766 "Failed to set io.weight: %m");
13c31542 767
538b4852
TH
768 if (has_io) {
769 CGroupIODeviceWeight *w;
770
771 /* FIXME: no way to reset this list */
772 LIST_FOREACH(device_weights, w, c->io_device_weights)
f29ff115 773 cgroup_apply_io_device_weight(u, w->path, w->weight);
538b4852
TH
774 } else if (has_blockio) {
775 CGroupBlockIODeviceWeight *w;
776
777 /* FIXME: no way to reset this list */
128fadc9
TH
778 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
779 weight = cgroup_weight_blkio_to_io(w->weight);
780
781 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
782 w->weight, weight, w->path);
783
784 cgroup_apply_io_device_weight(u, w->path, weight);
785 }
538b4852 786 }
13c31542
TH
787 }
788
64faf04c 789 /* Apply limits and free ones without config. */
538b4852
TH
790 if (has_io) {
791 CGroupIODeviceLimit *l, *next;
792
793 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
f29ff115 794 if (!cgroup_apply_io_device_limit(u, l->path, l->limits))
538b4852
TH
795 cgroup_context_free_io_device_limit(c, l);
796 }
797 } else if (has_blockio) {
798 CGroupBlockIODeviceBandwidth *b, *next;
799
800 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
801 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
802 CGroupIOLimitType type;
803
804 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
805 limits[type] = cgroup_io_limit_defaults[type];
806
807 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
808 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
809
128fadc9
TH
810 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
811 b->rbps, b->wbps, b->path);
812
f29ff115 813 if (!cgroup_apply_io_device_limit(u, b->path, limits))
538b4852
TH
814 cgroup_context_free_blockio_device_bandwidth(c, b);
815 }
13c31542
TH
816 }
817 }
818
906c06f6 819 if (apply_mask & CGROUP_MASK_BLKIO) {
538b4852
TH
820 bool has_io = cgroup_context_has_io_config(c);
821 bool has_blockio = cgroup_context_has_blockio_config(c);
4ad49000 822
01efdf13 823 if (!is_root) {
64faf04c
TH
824 char buf[DECIMAL_STR_MAX(uint64_t)+1];
825 uint64_t weight;
64faf04c 826
7d862ab8 827 if (has_io) {
128fadc9
TH
828 uint64_t io_weight = cgroup_context_io_weight(c, state);
829
538b4852 830 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9
TH
831
832 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
833 io_weight, weight);
7d862ab8
TH
834 } else if (has_blockio)
835 weight = cgroup_context_blkio_weight(c, state);
836 else
538b4852 837 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
838
839 xsprintf(buf, "%" PRIu64 "\n", weight);
01efdf13 840 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
1aeab12b 841 if (r < 0)
f29ff115
TH
842 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
843 "Failed to set blkio.weight: %m");
4ad49000 844
7d862ab8 845 if (has_io) {
538b4852
TH
846 CGroupIODeviceWeight *w;
847
848 /* FIXME: no way to reset this list */
128fadc9
TH
849 LIST_FOREACH(device_weights, w, c->io_device_weights) {
850 weight = cgroup_weight_io_to_blkio(w->weight);
851
852 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
853 w->weight, weight, w->path);
854
855 cgroup_apply_blkio_device_weight(u, w->path, weight);
856 }
7d862ab8
TH
857 } else if (has_blockio) {
858 CGroupBlockIODeviceWeight *w;
859
860 /* FIXME: no way to reset this list */
861 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
862 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
538b4852 863 }
4ad49000
LP
864 }
865
64faf04c 866 /* Apply limits and free ones without config. */
7d862ab8 867 if (has_io) {
538b4852
TH
868 CGroupIODeviceLimit *l, *next;
869
870 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
128fadc9
TH
871 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
872 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
873
f29ff115 874 if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]))
538b4852
TH
875 cgroup_context_free_io_device_limit(c, l);
876 }
7d862ab8
TH
877 } else if (has_blockio) {
878 CGroupBlockIODeviceBandwidth *b, *next;
879
880 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths)
881 if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps))
882 cgroup_context_free_blockio_device_bandwidth(c, b);
d686d8a9 883 }
8e274523
LP
884 }
885
906c06f6 886 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
b4cccbc1
LP
887 if (cg_all_unified() > 0) {
888 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
efdb0237 889
96e131ea 890 if (cgroup_context_has_unified_memory_config(c)) {
da4d897e 891 max = c->memory_max;
96e131ea
WC
892 swap_max = c->memory_swap_max;
893 } else {
da4d897e 894 max = c->memory_limit;
efdb0237 895
128fadc9
TH
896 if (max != CGROUP_LIMIT_MAX)
897 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
898 }
899
f29ff115
TH
900 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
901 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
902 cgroup_apply_unified_memory_limit(u, "memory.max", max);
96e131ea 903 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
efdb0237 904 } else {
da4d897e 905 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
7d862ab8 906 uint64_t val;
da4d897e 907
7d862ab8 908 if (cgroup_context_has_unified_memory_config(c)) {
78a4ee59 909 val = c->memory_max;
7d862ab8
TH
910 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
911 } else
912 val = c->memory_limit;
128fadc9 913
78a4ee59
DM
914 if (val == CGROUP_LIMIT_MAX)
915 strncpy(buf, "-1\n", sizeof(buf));
916 else
917 xsprintf(buf, "%" PRIu64 "\n", val);
918
da4d897e
TH
919 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
920 if (r < 0)
f29ff115
TH
921 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
922 "Failed to set memory.limit_in_bytes: %m");
da4d897e 923 }
4ad49000 924 }
8e274523 925
906c06f6 926 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
4ad49000 927 CGroupDeviceAllow *a;
8e274523 928
714e2e1d
LP
929 /* Changing the devices list of a populated cgroup
930 * might result in EINVAL, hence ignore EINVAL
931 * here. */
932
4ad49000
LP
933 if (c->device_allow || c->device_policy != CGROUP_AUTO)
934 r = cg_set_attribute("devices", path, "devices.deny", "a");
935 else
936 r = cg_set_attribute("devices", path, "devices.allow", "a");
1aeab12b 937 if (r < 0)
f29ff115
TH
938 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
939 "Failed to reset devices.list: %m");
fb385181 940
4ad49000
LP
941 if (c->device_policy == CGROUP_CLOSED ||
942 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
943 static const char auto_devices[] =
7d711efb
LP
944 "/dev/null\0" "rwm\0"
945 "/dev/zero\0" "rwm\0"
946 "/dev/full\0" "rwm\0"
947 "/dev/random\0" "rwm\0"
948 "/dev/urandom\0" "rwm\0"
949 "/dev/tty\0" "rwm\0"
0d9e7991
AP
950 "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */
951 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
e7330dfe
DP
952 "-/run/systemd/inaccessible/chr\0" "rwm\0"
953 "-/run/systemd/inaccessible/blk\0" "rwm\0";
4ad49000
LP
954
955 const char *x, *y;
956
957 NULSTR_FOREACH_PAIR(x, y, auto_devices)
958 whitelist_device(path, x, y);
7d711efb
LP
959
960 whitelist_major(path, "pts", 'c', "rw");
4ad49000
LP
961 }
962
963 LIST_FOREACH(device_allow, a, c->device_allow) {
fb4650aa 964 char acc[4], *val;
4ad49000
LP
965 unsigned k = 0;
966
967 if (a->r)
968 acc[k++] = 'r';
969 if (a->w)
970 acc[k++] = 'w';
971 if (a->m)
972 acc[k++] = 'm';
fb385181 973
4ad49000
LP
974 if (k == 0)
975 continue;
fb385181 976
4ad49000 977 acc[k++] = 0;
90060676 978
27458ed6 979 if (path_startswith(a->path, "/dev/"))
90060676 980 whitelist_device(path, a->path, acc);
fb4650aa
ZJS
981 else if ((val = startswith(a->path, "block-")))
982 whitelist_major(path, val, 'b', acc);
983 else if ((val = startswith(a->path, "char-")))
984 whitelist_major(path, val, 'c', acc);
90060676 985 else
f29ff115 986 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
4ad49000
LP
987 }
988 }
03a7b521 989
906c06f6 990 if ((apply_mask & CGROUP_MASK_PIDS) && !is_root) {
03a7b521 991
f5058264 992 if (c->tasks_max != CGROUP_LIMIT_MAX) {
03a7b521
LP
993 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
994
995 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
996 r = cg_set_attribute("pids", path, "pids.max", buf);
997 } else
998 r = cg_set_attribute("pids", path, "pids.max", "max");
999
1000 if (r < 0)
f29ff115
TH
1001 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1002 "Failed to set pids.max: %m");
03a7b521 1003 }
906c06f6
DM
1004
1005 if (apply_bpf)
1006 cgroup_apply_firewall(u, c);
fb385181
LP
1007}
1008
efdb0237
LP
1009CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1010 CGroupMask mask = 0;
8e274523 1011
4ad49000 1012 /* Figure out which controllers we need */
8e274523 1013
b2f8b02e 1014 if (c->cpu_accounting ||
66ebf6c0
TH
1015 cgroup_context_has_cpu_weight(c) ||
1016 cgroup_context_has_cpu_shares(c) ||
3a43da28 1017 c->cpu_quota_per_sec_usec != USEC_INFINITY)
efdb0237 1018 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
ecedd90f 1019
538b4852
TH
1020 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1021 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 1022
4ad49000 1023 if (c->memory_accounting ||
da4d897e
TH
1024 c->memory_limit != CGROUP_LIMIT_MAX ||
1025 cgroup_context_has_unified_memory_config(c))
efdb0237 1026 mask |= CGROUP_MASK_MEMORY;
8e274523 1027
a931ad47
LP
1028 if (c->device_allow ||
1029 c->device_policy != CGROUP_AUTO)
3905f127 1030 mask |= CGROUP_MASK_DEVICES;
4ad49000 1031
03a7b521
LP
1032 if (c->tasks_accounting ||
1033 c->tasks_max != (uint64_t) -1)
1034 mask |= CGROUP_MASK_PIDS;
1035
4ad49000 1036 return mask;
8e274523
LP
1037}
1038
efdb0237 1039CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 1040 CGroupContext *c;
8e274523 1041
efdb0237
LP
1042 /* Returns the mask of controllers the unit needs for itself */
1043
4ad49000
LP
1044 c = unit_get_cgroup_context(u);
1045 if (!c)
1046 return 0;
8e274523 1047
a931ad47 1048 /* If delegation is turned on, then turn on all cgroups,
19af675e
LP
1049 * unless we are on the legacy hierarchy and the process we
1050 * fork into it is known to drop privileges, and hence
1051 * shouldn't get access to the controllers.
1052 *
1053 * Note that on the unified hierarchy it is safe to delegate
1054 * controllers to unprivileged services. */
a931ad47
LP
1055
1056 if (c->delegate) {
1057 ExecContext *e;
1058
1059 e = unit_get_exec_context(u);
19af675e
LP
1060 if (!e ||
1061 exec_context_maintains_privileges(e) ||
b4cccbc1 1062 cg_all_unified() > 0)
efdb0237 1063 return _CGROUP_MASK_ALL;
a931ad47
LP
1064 }
1065
db785129 1066 return cgroup_context_get_mask(c);
8e274523
LP
1067}
1068
efdb0237 1069CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 1070 assert(u);
bc432dc7 1071
efdb0237
LP
1072 /* Returns the mask of controllers all of the unit's children
1073 * require, merged */
1074
bc432dc7
LP
1075 if (u->cgroup_members_mask_valid)
1076 return u->cgroup_members_mask;
1077
1078 u->cgroup_members_mask = 0;
1079
1080 if (u->type == UNIT_SLICE) {
1081 Unit *member;
1082 Iterator i;
1083
1084 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
1085
1086 if (member == u)
1087 continue;
1088
d4fdc205 1089 if (UNIT_DEREF(member->slice) != u)
bc432dc7
LP
1090 continue;
1091
1092 u->cgroup_members_mask |=
efdb0237 1093 unit_get_own_mask(member) |
bc432dc7
LP
1094 unit_get_members_mask(member);
1095 }
1096 }
1097
1098 u->cgroup_members_mask_valid = true;
6414b7c9 1099 return u->cgroup_members_mask;
246aa6dd
LP
1100}
1101
efdb0237 1102CGroupMask unit_get_siblings_mask(Unit *u) {
4ad49000 1103 assert(u);
246aa6dd 1104
efdb0237
LP
1105 /* Returns the mask of controllers all of the unit's siblings
1106 * require, i.e. the members mask of the unit's parent slice
1107 * if there is one. */
1108
bc432dc7 1109 if (UNIT_ISSET(u->slice))
637f421e 1110 return unit_get_members_mask(UNIT_DEREF(u->slice));
4ad49000 1111
efdb0237 1112 return unit_get_own_mask(u) | unit_get_members_mask(u);
246aa6dd
LP
1113}
1114
efdb0237
LP
1115CGroupMask unit_get_subtree_mask(Unit *u) {
1116
1117 /* Returns the mask of this subtree, meaning of the group
1118 * itself and its children. */
1119
1120 return unit_get_own_mask(u) | unit_get_members_mask(u);
1121}
1122
1123CGroupMask unit_get_target_mask(Unit *u) {
1124 CGroupMask mask;
1125
1126 /* This returns the cgroup mask of all controllers to enable
1127 * for a specific cgroup, i.e. everything it needs itself,
1128 * plus all that its children need, plus all that its siblings
1129 * need. This is primarily useful on the legacy cgroup
1130 * hierarchy, where we need to duplicate each cgroup in each
1131 * hierarchy that shall be enabled for it. */
6414b7c9 1132
efdb0237
LP
1133 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1134 mask &= u->manager->cgroup_supported;
1135
1136 return mask;
1137}
1138
1139CGroupMask unit_get_enable_mask(Unit *u) {
1140 CGroupMask mask;
1141
1142 /* This returns the cgroup mask of all controllers to enable
1143 * for the children of a specific cgroup. This is primarily
1144 * useful for the unified cgroup hierarchy, where each cgroup
1145 * controls which controllers are enabled for its children. */
1146
1147 mask = unit_get_members_mask(u);
6414b7c9
DS
1148 mask &= u->manager->cgroup_supported;
1149
1150 return mask;
1151}
1152
906c06f6
DM
1153bool unit_get_needs_bpf(Unit *u) {
1154 CGroupContext *c;
1155 Unit *p;
1156 assert(u);
1157
1158 /* We never attach BPF to slice units, as they are inner cgroup nodes and cgroup/BPF is not recursive at the
1159 * moment. */
1160 if (u->type == UNIT_SLICE)
1161 return false;
1162
1163 c = unit_get_cgroup_context(u);
1164 if (!c)
1165 return false;
1166
1167 if (c->ip_accounting ||
1168 c->ip_address_allow ||
1169 c->ip_address_deny)
1170 return true;
1171
1172 /* If any parent slice has an IP access list defined, it applies too */
1173 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1174 c = unit_get_cgroup_context(p);
1175 if (!c)
1176 return false;
1177
1178 if (c->ip_address_allow ||
1179 c->ip_address_deny)
1180 return true;
1181 }
1182
1183 return false;
1184}
1185
6414b7c9
DS
1186/* Recurse from a unit up through its containing slices, propagating
1187 * mask bits upward. A unit is also member of itself. */
bc432dc7 1188void unit_update_cgroup_members_masks(Unit *u) {
efdb0237 1189 CGroupMask m;
bc432dc7
LP
1190 bool more;
1191
1192 assert(u);
1193
1194 /* Calculate subtree mask */
efdb0237 1195 m = unit_get_subtree_mask(u);
bc432dc7
LP
1196
1197 /* See if anything changed from the previous invocation. If
1198 * not, we're done. */
1199 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1200 return;
1201
1202 more =
1203 u->cgroup_subtree_mask_valid &&
1204 ((m & ~u->cgroup_subtree_mask) != 0) &&
1205 ((~m & u->cgroup_subtree_mask) == 0);
1206
1207 u->cgroup_subtree_mask = m;
1208 u->cgroup_subtree_mask_valid = true;
1209
6414b7c9
DS
1210 if (UNIT_ISSET(u->slice)) {
1211 Unit *s = UNIT_DEREF(u->slice);
bc432dc7
LP
1212
1213 if (more)
1214 /* There's more set now than before. We
1215 * propagate the new mask to the parent's mask
1216 * (not caring if it actually was valid or
1217 * not). */
1218
1219 s->cgroup_members_mask |= m;
1220
1221 else
1222 /* There's less set now than before (or we
1223 * don't know), we need to recalculate
1224 * everything, so let's invalidate the
1225 * parent's members mask */
1226
1227 s->cgroup_members_mask_valid = false;
1228
1229 /* And now make sure that this change also hits our
1230 * grandparents */
1231 unit_update_cgroup_members_masks(s);
6414b7c9
DS
1232 }
1233}
1234
efdb0237 1235static const char *migrate_callback(CGroupMask mask, void *userdata) {
03b90d4b
LP
1236 Unit *u = userdata;
1237
1238 assert(mask != 0);
1239 assert(u);
1240
1241 while (u) {
1242 if (u->cgroup_path &&
1243 u->cgroup_realized &&
1244 (u->cgroup_realized_mask & mask) == mask)
1245 return u->cgroup_path;
1246
1247 u = UNIT_DEREF(u->slice);
1248 }
1249
1250 return NULL;
1251}
1252
efdb0237
LP
1253char *unit_default_cgroup_path(Unit *u) {
1254 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1255 int r;
1256
1257 assert(u);
1258
1259 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1260 return strdup(u->manager->cgroup_root);
1261
1262 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1263 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1264 if (r < 0)
1265 return NULL;
1266 }
1267
1268 escaped = cg_escape(u->id);
1269 if (!escaped)
1270 return NULL;
1271
1272 if (slice)
605405c6
ZJS
1273 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1274 escaped);
efdb0237 1275 else
605405c6 1276 return strjoin(u->manager->cgroup_root, "/", escaped);
efdb0237
LP
1277}
1278
1279int unit_set_cgroup_path(Unit *u, const char *path) {
1280 _cleanup_free_ char *p = NULL;
1281 int r;
1282
1283 assert(u);
1284
1285 if (path) {
1286 p = strdup(path);
1287 if (!p)
1288 return -ENOMEM;
1289 } else
1290 p = NULL;
1291
1292 if (streq_ptr(u->cgroup_path, p))
1293 return 0;
1294
1295 if (p) {
1296 r = hashmap_put(u->manager->cgroup_unit, p, u);
1297 if (r < 0)
1298 return r;
1299 }
1300
1301 unit_release_cgroup(u);
1302
1303 u->cgroup_path = p;
1304 p = NULL;
1305
1306 return 1;
1307}
1308
1309int unit_watch_cgroup(Unit *u) {
ab2c3861 1310 _cleanup_free_ char *events = NULL;
efdb0237
LP
1311 int r;
1312
1313 assert(u);
1314
1315 if (!u->cgroup_path)
1316 return 0;
1317
1318 if (u->cgroup_inotify_wd >= 0)
1319 return 0;
1320
1321 /* Only applies to the unified hierarchy */
c22800e4 1322 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1323 if (r < 0)
1324 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1325 if (r == 0)
efdb0237
LP
1326 return 0;
1327
1328 /* Don't watch the root slice, it's pointless. */
1329 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1330 return 0;
1331
1332 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1333 if (r < 0)
1334 return log_oom();
1335
ab2c3861 1336 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
efdb0237
LP
1337 if (r < 0)
1338 return log_oom();
1339
ab2c3861 1340 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
efdb0237
LP
1341 if (u->cgroup_inotify_wd < 0) {
1342
1343 if (errno == ENOENT) /* If the directory is already
1344 * gone we don't need to track
1345 * it, so this is not an error */
1346 return 0;
1347
1348 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1349 }
1350
1351 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1352 if (r < 0)
1353 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1354
1355 return 0;
1356}
1357
1358static int unit_create_cgroup(
1359 Unit *u,
1360 CGroupMask target_mask,
906c06f6
DM
1361 CGroupMask enable_mask,
1362 bool needs_bpf) {
efdb0237 1363
0cd385d3 1364 CGroupContext *c;
bc432dc7 1365 int r;
64747e2d 1366
4ad49000 1367 assert(u);
64747e2d 1368
0cd385d3
LP
1369 c = unit_get_cgroup_context(u);
1370 if (!c)
1371 return 0;
1372
7b3fd631
LP
1373 if (!u->cgroup_path) {
1374 _cleanup_free_ char *path = NULL;
64747e2d 1375
7b3fd631
LP
1376 path = unit_default_cgroup_path(u);
1377 if (!path)
1378 return log_oom();
1379
efdb0237
LP
1380 r = unit_set_cgroup_path(u, path);
1381 if (r == -EEXIST)
1382 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1383 if (r < 0)
1384 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
b58b8e11
HH
1385 }
1386
03b90d4b 1387 /* First, create our own group */
efdb0237 1388 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
23bbb0de 1389 if (r < 0)
efdb0237
LP
1390 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1391
1392 /* Start watching it */
1393 (void) unit_watch_cgroup(u);
1394
1395 /* Enable all controllers we need */
1396 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1397 if (r < 0)
1398 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
03b90d4b
LP
1399
1400 /* Keep track that this is now realized */
4ad49000 1401 u->cgroup_realized = true;
efdb0237 1402 u->cgroup_realized_mask = target_mask;
ccf78df1 1403 u->cgroup_enabled_mask = enable_mask;
906c06f6 1404 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
4ad49000 1405
0cd385d3
LP
1406 if (u->type != UNIT_SLICE && !c->delegate) {
1407
1408 /* Then, possibly move things over, but not if
1409 * subgroups may contain processes, which is the case
1410 * for slice and delegation units. */
1411 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1412 if (r < 0)
efdb0237 1413 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
0cd385d3 1414 }
03b90d4b 1415
64747e2d
LP
1416 return 0;
1417}
1418
7b3fd631
LP
1419int unit_attach_pids_to_cgroup(Unit *u) {
1420 int r;
1421 assert(u);
1422
1423 r = unit_realize_cgroup(u);
1424 if (r < 0)
1425 return r;
1426
1427 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
1428 if (r < 0)
1429 return r;
1430
1431 return 0;
1432}
1433
4b58153d
LP
1434static void cgroup_xattr_apply(Unit *u) {
1435 char ids[SD_ID128_STRING_MAX];
1436 int r;
1437
1438 assert(u);
1439
1440 if (!MANAGER_IS_SYSTEM(u->manager))
1441 return;
1442
1443 if (sd_id128_is_null(u->invocation_id))
1444 return;
1445
1446 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1447 "trusted.invocation_id",
1448 sd_id128_to_string(u->invocation_id, ids), 32,
1449 0);
1450 if (r < 0)
1451 log_unit_warning_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
1452}
1453
906c06f6
DM
1454static bool unit_has_mask_realized(
1455 Unit *u,
1456 CGroupMask target_mask,
1457 CGroupMask enable_mask,
1458 bool needs_bpf) {
1459
bc432dc7
LP
1460 assert(u);
1461
906c06f6
DM
1462 return u->cgroup_realized &&
1463 u->cgroup_realized_mask == target_mask &&
1464 u->cgroup_enabled_mask == enable_mask &&
1465 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1466 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
6414b7c9
DS
1467}
1468
1469/* Check if necessary controllers and attributes for a unit are in place.
1470 *
1471 * If so, do nothing.
1472 * If not, create paths, move processes over, and set attributes.
1473 *
1474 * Returns 0 on success and < 0 on failure. */
db785129 1475static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 1476 CGroupMask target_mask, enable_mask;
906c06f6 1477 bool needs_bpf, apply_bpf;
6414b7c9 1478 int r;
64747e2d 1479
4ad49000 1480 assert(u);
64747e2d 1481
4ad49000 1482 if (u->in_cgroup_queue) {
71fda00f 1483 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
4ad49000
LP
1484 u->in_cgroup_queue = false;
1485 }
64747e2d 1486
efdb0237 1487 target_mask = unit_get_target_mask(u);
ccf78df1 1488 enable_mask = unit_get_enable_mask(u);
906c06f6 1489 needs_bpf = unit_get_needs_bpf(u);
ccf78df1 1490
906c06f6 1491 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
0a1eb06d 1492 return 0;
64747e2d 1493
906c06f6
DM
1494 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1495 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1496 * this will trickle down properly to cgroupfs. */
1497 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1498
4ad49000 1499 /* First, realize parents */
6414b7c9 1500 if (UNIT_ISSET(u->slice)) {
db785129 1501 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
6414b7c9
DS
1502 if (r < 0)
1503 return r;
1504 }
4ad49000
LP
1505
1506 /* And then do the real work */
906c06f6 1507 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
6414b7c9
DS
1508 if (r < 0)
1509 return r;
1510
1511 /* Finally, apply the necessary attributes. */
906c06f6 1512 cgroup_context_apply(u, target_mask, apply_bpf, state);
4b58153d 1513 cgroup_xattr_apply(u);
6414b7c9
DS
1514
1515 return 0;
64747e2d
LP
1516}
1517
4ad49000 1518static void unit_add_to_cgroup_queue(Unit *u) {
ecedd90f 1519
4ad49000
LP
1520 if (u->in_cgroup_queue)
1521 return;
8e274523 1522
71fda00f 1523 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
4ad49000
LP
1524 u->in_cgroup_queue = true;
1525}
8c6db833 1526
4ad49000 1527unsigned manager_dispatch_cgroup_queue(Manager *m) {
db785129 1528 ManagerState state;
4ad49000 1529 unsigned n = 0;
db785129 1530 Unit *i;
6414b7c9 1531 int r;
ecedd90f 1532
db785129
LP
1533 state = manager_state(m);
1534
4ad49000
LP
1535 while ((i = m->cgroup_queue)) {
1536 assert(i->in_cgroup_queue);
ecedd90f 1537
db785129 1538 r = unit_realize_cgroup_now(i, state);
6414b7c9 1539 if (r < 0)
efdb0237 1540 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 1541
4ad49000
LP
1542 n++;
1543 }
ecedd90f 1544
4ad49000 1545 return n;
8e274523
LP
1546}
1547
4ad49000
LP
1548static void unit_queue_siblings(Unit *u) {
1549 Unit *slice;
ca949c9d 1550
4ad49000
LP
1551 /* This adds the siblings of the specified unit and the
1552 * siblings of all parent units to the cgroup queue. (But
1553 * neither the specified unit itself nor the parents.) */
1554
1555 while ((slice = UNIT_DEREF(u->slice))) {
1556 Iterator i;
1557 Unit *m;
8f53a7b8 1558
4ad49000
LP
1559 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
1560 if (m == u)
1561 continue;
8e274523 1562
6414b7c9
DS
1563 /* Skip units that have a dependency on the slice
1564 * but aren't actually in it. */
4ad49000 1565 if (UNIT_DEREF(m->slice) != slice)
50159e6a 1566 continue;
8e274523 1567
6414b7c9
DS
1568 /* No point in doing cgroup application for units
1569 * without active processes. */
1570 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1571 continue;
1572
1573 /* If the unit doesn't need any new controllers
1574 * and has current ones realized, it doesn't need
1575 * any changes. */
906c06f6
DM
1576 if (unit_has_mask_realized(m,
1577 unit_get_target_mask(m),
1578 unit_get_enable_mask(m),
1579 unit_get_needs_bpf(m)))
6414b7c9
DS
1580 continue;
1581
4ad49000 1582 unit_add_to_cgroup_queue(m);
50159e6a
LP
1583 }
1584
4ad49000 1585 u = slice;
8e274523 1586 }
4ad49000
LP
1587}
1588
0a1eb06d 1589int unit_realize_cgroup(Unit *u) {
4ad49000
LP
1590 assert(u);
1591
35b7ff80 1592 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 1593 return 0;
8e274523 1594
4ad49000
LP
1595 /* So, here's the deal: when realizing the cgroups for this
1596 * unit, we need to first create all parents, but there's more
1597 * actually: for the weight-based controllers we also need to
1598 * make sure that all our siblings (i.e. units that are in the
73e231ab 1599 * same slice as we are) have cgroups, too. Otherwise, things
4ad49000
LP
1600 * would become very uneven as each of their processes would
1601 * get as much resources as all our group together. This call
1602 * will synchronously create the parent cgroups, but will
1603 * defer work on the siblings to the next event loop
1604 * iteration. */
ca949c9d 1605
4ad49000
LP
1606 /* Add all sibling slices to the cgroup queue. */
1607 unit_queue_siblings(u);
1608
6414b7c9 1609 /* And realize this one now (and apply the values) */
db785129 1610 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
1611}
1612
efdb0237
LP
1613void unit_release_cgroup(Unit *u) {
1614 assert(u);
1615
1616 /* Forgets all cgroup details for this cgroup */
1617
1618 if (u->cgroup_path) {
1619 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1620 u->cgroup_path = mfree(u->cgroup_path);
1621 }
1622
1623 if (u->cgroup_inotify_wd >= 0) {
1624 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1625 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1626
1627 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1628 u->cgroup_inotify_wd = -1;
1629 }
1630}
1631
1632void unit_prune_cgroup(Unit *u) {
8e274523 1633 int r;
efdb0237 1634 bool is_root_slice;
8e274523 1635
4ad49000 1636 assert(u);
8e274523 1637
efdb0237
LP
1638 /* Removes the cgroup, if empty and possible, and stops watching it. */
1639
4ad49000
LP
1640 if (!u->cgroup_path)
1641 return;
8e274523 1642
fe700f46
LP
1643 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1644
efdb0237
LP
1645 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1646
1647 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
dab5bf85 1648 if (r < 0) {
f29ff115 1649 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
dab5bf85
RL
1650 return;
1651 }
8e274523 1652
efdb0237
LP
1653 if (is_root_slice)
1654 return;
1655
1656 unit_release_cgroup(u);
0a1eb06d 1657
4ad49000 1658 u->cgroup_realized = false;
bc432dc7 1659 u->cgroup_realized_mask = 0;
ccf78df1 1660 u->cgroup_enabled_mask = 0;
8e274523
LP
1661}
1662
efdb0237 1663int unit_search_main_pid(Unit *u, pid_t *ret) {
4ad49000
LP
1664 _cleanup_fclose_ FILE *f = NULL;
1665 pid_t pid = 0, npid, mypid;
efdb0237 1666 int r;
4ad49000
LP
1667
1668 assert(u);
efdb0237 1669 assert(ret);
4ad49000
LP
1670
1671 if (!u->cgroup_path)
efdb0237 1672 return -ENXIO;
4ad49000 1673
efdb0237
LP
1674 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1675 if (r < 0)
1676 return r;
4ad49000 1677
df0ff127 1678 mypid = getpid_cached();
4ad49000
LP
1679 while (cg_read_pid(f, &npid) > 0) {
1680 pid_t ppid;
1681
1682 if (npid == pid)
1683 continue;
8e274523 1684
4ad49000 1685 /* Ignore processes that aren't our kids */
6bc73acb 1686 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
4ad49000 1687 continue;
8e274523 1688
efdb0237 1689 if (pid != 0)
4ad49000
LP
1690 /* Dang, there's more than one daemonized PID
1691 in this group, so we don't know what process
1692 is the main process. */
efdb0237
LP
1693
1694 return -ENODATA;
8e274523 1695
4ad49000 1696 pid = npid;
8e274523
LP
1697 }
1698
efdb0237
LP
1699 *ret = pid;
1700 return 0;
1701}
1702
1703static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 1704 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
1705 _cleanup_fclose_ FILE *f = NULL;
1706 int ret = 0, r;
1707
1708 assert(u);
1709 assert(path);
1710
1711 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1712 if (r < 0)
1713 ret = r;
1714 else {
1715 pid_t pid;
1716
1717 while ((r = cg_read_pid(f, &pid)) > 0) {
1718 r = unit_watch_pid(u, pid);
1719 if (r < 0 && ret >= 0)
1720 ret = r;
1721 }
1722
1723 if (r < 0 && ret >= 0)
1724 ret = r;
1725 }
1726
1727 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1728 if (r < 0) {
1729 if (ret >= 0)
1730 ret = r;
1731 } else {
1732 char *fn;
1733
1734 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1735 _cleanup_free_ char *p = NULL;
1736
605405c6 1737 p = strjoin(path, "/", fn);
efdb0237
LP
1738 free(fn);
1739
1740 if (!p)
1741 return -ENOMEM;
1742
1743 r = unit_watch_pids_in_path(u, p);
1744 if (r < 0 && ret >= 0)
1745 ret = r;
1746 }
1747
1748 if (r < 0 && ret >= 0)
1749 ret = r;
1750 }
1751
1752 return ret;
1753}
1754
1755int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
1756 int r;
1757
efdb0237
LP
1758 assert(u);
1759
1760 /* Adds all PIDs from our cgroup to the set of PIDs we
1761 * watch. This is a fallback logic for cases where we do not
1762 * get reliable cgroup empty notifications: we try to use
1763 * SIGCHLD as replacement. */
1764
1765 if (!u->cgroup_path)
1766 return -ENOENT;
1767
c22800e4 1768 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1769 if (r < 0)
1770 return r;
1771 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
1772 return 0;
1773
1774 return unit_watch_pids_in_path(u, u->cgroup_path);
1775}
1776
1777int unit_notify_cgroup_empty(Unit *u) {
1778 int r;
1779
1780 assert(u);
1781
1782 if (!u->cgroup_path)
1783 return 0;
1784
1785 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1786 if (r <= 0)
1787 return r;
1788
1789 unit_add_to_gc_queue(u);
1790
1791 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1792 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1793
1794 return 0;
1795}
1796
1797static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1798 Manager *m = userdata;
1799
1800 assert(s);
1801 assert(fd >= 0);
1802 assert(m);
1803
1804 for (;;) {
1805 union inotify_event_buffer buffer;
1806 struct inotify_event *e;
1807 ssize_t l;
1808
1809 l = read(fd, &buffer, sizeof(buffer));
1810 if (l < 0) {
1811 if (errno == EINTR || errno == EAGAIN)
1812 return 0;
1813
1814 return log_error_errno(errno, "Failed to read control group inotify events: %m");
1815 }
1816
1817 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1818 Unit *u;
1819
1820 if (e->wd < 0)
1821 /* Queue overflow has no watch descriptor */
1822 continue;
1823
1824 if (e->mask & IN_IGNORED)
1825 /* The watch was just removed */
1826 continue;
1827
1828 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1829 if (!u) /* Not that inotify might deliver
1830 * events for a watch even after it
1831 * was removed, because it was queued
1832 * before the removal. Let's ignore
1833 * this here safely. */
1834 continue;
1835
1836 (void) unit_notify_cgroup_empty(u);
1837 }
1838 }
8e274523
LP
1839}
1840
8e274523 1841int manager_setup_cgroup(Manager *m) {
9444b1f2 1842 _cleanup_free_ char *path = NULL;
10bd3e2e 1843 const char *scope_path;
efdb0237 1844 CGroupController c;
b4cccbc1 1845 int r, all_unified;
efdb0237 1846 char *e;
8e274523
LP
1847
1848 assert(m);
1849
35d2e7ec 1850 /* 1. Determine hierarchy */
efdb0237 1851 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 1852 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
1853 if (r < 0)
1854 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 1855
efdb0237
LP
1856 /* Chop off the init scope, if we are already located in it */
1857 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 1858
efdb0237
LP
1859 /* LEGACY: Also chop off the system slice if we are in
1860 * it. This is to support live upgrades from older systemd
1861 * versions where PID 1 was moved there. Also see
1862 * cg_get_root_path(). */
463d0d15 1863 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 1864 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 1865 if (!e)
efdb0237 1866 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 1867 }
efdb0237
LP
1868 if (e)
1869 *e = 0;
7ccfb64a 1870
9444b1f2
LP
1871 /* And make sure to store away the root value without trailing
1872 * slash, even for the root dir, so that we can easily prepend
1873 * it everywhere. */
efdb0237
LP
1874 while ((e = endswith(m->cgroup_root, "/")))
1875 *e = 0;
8e274523 1876
35d2e7ec 1877 /* 2. Show data */
9444b1f2 1878 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
1879 if (r < 0)
1880 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 1881
415fc41c
TH
1882 r = cg_unified_flush();
1883 if (r < 0)
1884 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 1885
b4cccbc1
LP
1886 all_unified = cg_all_unified();
1887 if (r < 0)
1888 return log_error_errno(r, "Couldn't determine whether we are in all unified mode: %m");
1889 if (r > 0)
efdb0237 1890 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 1891 else {
c22800e4 1892 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1893 if (r < 0)
1894 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
1895 if (r > 0)
1896 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
1897 else
1898 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
1899 }
efdb0237 1900
10bd3e2e
LP
1901 /* 3. Install agent */
1902 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 1903
10bd3e2e
LP
1904 /* In the unified hierarchy we can get
1905 * cgroup empty notifications via inotify. */
efdb0237 1906
10bd3e2e
LP
1907 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1908 safe_close(m->cgroup_inotify_fd);
efdb0237 1909
10bd3e2e
LP
1910 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
1911 if (m->cgroup_inotify_fd < 0)
1912 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 1913
10bd3e2e
LP
1914 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
1915 if (r < 0)
1916 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 1917
10bd3e2e
LP
1918 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
1919 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
1920 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-5);
1921 if (r < 0)
1922 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 1923
10bd3e2e 1924 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 1925
10bd3e2e 1926 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
efdb0237 1927
10bd3e2e
LP
1928 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
1929 * since it does not generate events when control groups with children run empty. */
8e274523 1930
10bd3e2e 1931 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
23bbb0de 1932 if (r < 0)
10bd3e2e
LP
1933 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
1934 else if (r > 0)
1935 log_debug("Installed release agent.");
1936 else if (r == 0)
1937 log_debug("Release agent already installed.");
1938 }
efdb0237 1939
10bd3e2e
LP
1940 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1941 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1942 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1943 if (r < 0)
1944 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
c6c18be3 1945
10bd3e2e
LP
1946 /* also, move all other userspace processes remaining
1947 * in the root cgroup into that scope. */
1948 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1949 if (r < 0)
1950 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
0d8c31ff 1951
10bd3e2e
LP
1952 /* 5. And pin it, so that it cannot be unmounted */
1953 safe_close(m->pin_cgroupfs_fd);
1954 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
1955 if (m->pin_cgroupfs_fd < 0)
1956 return log_error_errno(errno, "Failed to open pin file: %m");
1957
1958 /* 6. Always enable hierarchical support if it exists... */
1959 if (!all_unified && m->test_run_flags == 0)
1960 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 1961
0d8c31ff 1962 /* 7. Figure out which controllers are supported */
efdb0237
LP
1963 r = cg_mask_supported(&m->cgroup_supported);
1964 if (r < 0)
1965 return log_error_errno(r, "Failed to determine supported controllers: %m");
1966
1967 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
eee0a1e4 1968 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 1969
a32360f1 1970 return 0;
8e274523
LP
1971}
1972
c6c18be3 1973void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
1974 assert(m);
1975
9444b1f2
LP
1976 /* We can't really delete the group, since we are in it. But
1977 * let's trim it. */
1978 if (delete && m->cgroup_root)
efdb0237
LP
1979 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
1980
1981 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
1982
1983 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1984 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 1985
03e334a1 1986 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 1987
efdb0237 1988 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
1989}
1990
4ad49000 1991Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 1992 char *p;
4ad49000 1993 Unit *u;
acb14d31
LP
1994
1995 assert(m);
1996 assert(cgroup);
acb14d31 1997
4ad49000
LP
1998 u = hashmap_get(m->cgroup_unit, cgroup);
1999 if (u)
2000 return u;
acb14d31 2001
8e70580b 2002 p = strdupa(cgroup);
acb14d31
LP
2003 for (;;) {
2004 char *e;
2005
2006 e = strrchr(p, '/');
efdb0237
LP
2007 if (!e || e == p)
2008 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
2009
2010 *e = 0;
2011
4ad49000
LP
2012 u = hashmap_get(m->cgroup_unit, p);
2013 if (u)
2014 return u;
acb14d31
LP
2015 }
2016}
2017
b3ac818b 2018Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
4ad49000 2019 _cleanup_free_ char *cgroup = NULL;
acb14d31 2020 int r;
8e274523 2021
8c47c732
LP
2022 assert(m);
2023
b3ac818b
LP
2024 if (pid <= 0)
2025 return NULL;
2026
2027 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
2028 if (r < 0)
2029 return NULL;
2030
2031 return manager_get_unit_by_cgroup(m, cgroup);
2032}
2033
2034Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
2035 Unit *u;
2036
2037 assert(m);
2038
efdb0237 2039 if (pid <= 0)
8c47c732
LP
2040 return NULL;
2041
efdb0237
LP
2042 if (pid == 1)
2043 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2044
fea72cc0 2045 u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
5fe8876b
LP
2046 if (u)
2047 return u;
2048
fea72cc0 2049 u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
5fe8876b
LP
2050 if (u)
2051 return u;
2052
b3ac818b 2053 return manager_get_unit_by_pid_cgroup(m, pid);
6dde1f33 2054}
4fbf50b3 2055
4ad49000
LP
2056int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2057 Unit *u;
4fbf50b3 2058
4ad49000
LP
2059 assert(m);
2060 assert(cgroup);
4fbf50b3 2061
d8fdc620
LP
2062 log_debug("Got cgroup empty notification for: %s", cgroup);
2063
4ad49000 2064 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
2065 if (!u)
2066 return 0;
b56c28c3 2067
efdb0237 2068 return unit_notify_cgroup_empty(u);
5ad096b3
LP
2069}
2070
2071int unit_get_memory_current(Unit *u, uint64_t *ret) {
2072 _cleanup_free_ char *v = NULL;
2073 int r;
2074
2075 assert(u);
2076 assert(ret);
2077
2078 if (!u->cgroup_path)
2079 return -ENODATA;
2080
efdb0237 2081 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
2082 return -ENODATA;
2083
b4cccbc1
LP
2084 r = cg_all_unified();
2085 if (r < 0)
2086 return r;
2087 if (r > 0)
efdb0237 2088 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
b4cccbc1
LP
2089 else
2090 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
5ad096b3
LP
2091 if (r == -ENOENT)
2092 return -ENODATA;
2093 if (r < 0)
2094 return r;
2095
2096 return safe_atou64(v, ret);
2097}
2098
03a7b521
LP
2099int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2100 _cleanup_free_ char *v = NULL;
2101 int r;
2102
2103 assert(u);
2104 assert(ret);
2105
2106 if (!u->cgroup_path)
2107 return -ENODATA;
2108
2109 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2110 return -ENODATA;
2111
2112 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2113 if (r == -ENOENT)
2114 return -ENODATA;
2115 if (r < 0)
2116 return r;
2117
2118 return safe_atou64(v, ret);
2119}
2120
5ad096b3
LP
2121static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2122 _cleanup_free_ char *v = NULL;
2123 uint64_t ns;
2124 int r;
2125
2126 assert(u);
2127 assert(ret);
2128
2129 if (!u->cgroup_path)
2130 return -ENODATA;
2131
b4cccbc1
LP
2132 r = cg_all_unified();
2133 if (r < 0)
2134 return r;
2135 if (r > 0) {
66ebf6c0
TH
2136 const char *keys[] = { "usage_usec", NULL };
2137 _cleanup_free_ char *val = NULL;
2138 uint64_t us;
5ad096b3 2139
66ebf6c0
TH
2140 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2141 return -ENODATA;
5ad096b3 2142
66ebf6c0
TH
2143 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", keys, &val);
2144 if (r < 0)
2145 return r;
2146
2147 r = safe_atou64(val, &us);
2148 if (r < 0)
2149 return r;
2150
2151 ns = us * NSEC_PER_USEC;
2152 } else {
2153 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2154 return -ENODATA;
2155
2156 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2157 if (r == -ENOENT)
2158 return -ENODATA;
2159 if (r < 0)
2160 return r;
2161
2162 r = safe_atou64(v, &ns);
2163 if (r < 0)
2164 return r;
2165 }
5ad096b3
LP
2166
2167 *ret = ns;
2168 return 0;
2169}
2170
2171int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2172 nsec_t ns;
2173 int r;
2174
fe700f46
LP
2175 assert(u);
2176
2177 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2178 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2179 * call this function with a NULL return value. */
2180
5ad096b3 2181 r = unit_get_cpu_usage_raw(u, &ns);
fe700f46
LP
2182 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2183 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2184 * cached value. */
2185
2186 if (ret)
2187 *ret = u->cpu_usage_last;
2188 return 0;
2189 }
5ad096b3
LP
2190 if (r < 0)
2191 return r;
2192
66ebf6c0
TH
2193 if (ns > u->cpu_usage_base)
2194 ns -= u->cpu_usage_base;
5ad096b3
LP
2195 else
2196 ns = 0;
2197
fe700f46
LP
2198 u->cpu_usage_last = ns;
2199 if (ret)
2200 *ret = ns;
2201
5ad096b3
LP
2202 return 0;
2203}
2204
906c06f6
DM
2205int unit_get_ip_accounting(
2206 Unit *u,
2207 CGroupIPAccountingMetric metric,
2208 uint64_t *ret) {
2209
2210 int fd, r;
2211
2212 assert(u);
2213 assert(metric >= 0);
2214 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2215 assert(ret);
2216
2217 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2218 u->ip_accounting_ingress_map_fd :
2219 u->ip_accounting_egress_map_fd;
2220
2221 if (fd < 0)
2222 return -ENODATA;
2223
2224 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2225 r = bpf_firewall_read_accounting(fd, ret, NULL);
2226 else
2227 r = bpf_firewall_read_accounting(fd, NULL, ret);
2228
2229 return r;
2230}
2231
2232int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
2233 nsec_t ns;
2234 int r;
2235
2236 assert(u);
2237
fe700f46
LP
2238 u->cpu_usage_last = NSEC_INFINITY;
2239
5ad096b3
LP
2240 r = unit_get_cpu_usage_raw(u, &ns);
2241 if (r < 0) {
66ebf6c0 2242 u->cpu_usage_base = 0;
5ad096b3 2243 return r;
b56c28c3 2244 }
2633eb83 2245
66ebf6c0 2246 u->cpu_usage_base = ns;
4ad49000 2247 return 0;
4fbf50b3
LP
2248}
2249
906c06f6
DM
2250int unit_reset_ip_accounting(Unit *u) {
2251 int r = 0, q = 0;
2252
2253 assert(u);
2254
2255 if (u->ip_accounting_ingress_map_fd >= 0)
2256 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2257
2258 if (u->ip_accounting_egress_map_fd >= 0)
2259 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2260
2261 return r < 0 ? r : q;
2262}
2263
e9db43d5
LP
2264bool unit_cgroup_delegate(Unit *u) {
2265 CGroupContext *c;
2266
2267 assert(u);
2268
2269 c = unit_get_cgroup_context(u);
2270 if (!c)
2271 return false;
2272
2273 return c->delegate;
2274}
2275
e7ab4d1a
LP
2276void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2277 assert(u);
2278
2279 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2280 return;
2281
2282 if (m == 0)
2283 return;
2284
538b4852
TH
2285 /* always invalidate compat pairs together */
2286 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2287 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2288
7cce4fb7
LP
2289 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2290 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2291
e7ab4d1a
LP
2292 if ((u->cgroup_realized_mask & m) == 0)
2293 return;
2294
2295 u->cgroup_realized_mask &= ~m;
2296 unit_add_to_cgroup_queue(u);
2297}
2298
906c06f6
DM
2299void unit_invalidate_cgroup_bpf(Unit *u) {
2300 assert(u);
2301
2302 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2303 return;
2304
2305 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED)
2306 return;
2307
2308 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
2309 unit_add_to_cgroup_queue(u);
2310
2311 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2312 * list of our children includes our own. */
2313 if (u->type == UNIT_SLICE) {
2314 Unit *member;
2315 Iterator i;
2316
2317 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
2318 if (member == u)
2319 continue;
2320
2321 if (UNIT_DEREF(member->slice) != u)
2322 continue;
2323
2324 unit_invalidate_cgroup_bpf(member);
2325 }
2326 }
2327}
2328
e7ab4d1a
LP
2329void manager_invalidate_startup_units(Manager *m) {
2330 Iterator i;
2331 Unit *u;
2332
2333 assert(m);
2334
2335 SET_FOREACH(u, m->startup_units, i)
13c31542 2336 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
e7ab4d1a
LP
2337}
2338
4ad49000
LP
2339static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2340 [CGROUP_AUTO] = "auto",
2341 [CGROUP_CLOSED] = "closed",
2342 [CGROUP_STRICT] = "strict",
2343};
4fbf50b3 2344
4ad49000 2345DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);