]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/cgroup.c
core: downgrade a log message from error to warning
[thirdparty/systemd.git] / src / core / cgroup.c
CommitLineData
8e274523
LP
1/***
2 This file is part of systemd.
3
4ad49000 4 Copyright 2013 Lennart Poettering
8e274523
LP
5
6 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
8e274523
LP
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 14 Lesser General Public License for more details.
8e274523 15
5430f7f2 16 You should have received a copy of the GNU Lesser General Public License
8e274523
LP
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18***/
19
c6c18be3 20#include <fcntl.h>
e41969e3 21#include <fnmatch.h>
8c6db833 22
b5efdb8a 23#include "alloc-util.h"
906c06f6 24#include "bpf-firewall.h"
03a7b521 25#include "cgroup-util.h"
3ffd4af2
LP
26#include "cgroup.h"
27#include "fd-util.h"
0d39fa9c 28#include "fileio.h"
77601719 29#include "fs-util.h"
6bedfcbb 30#include "parse-util.h"
9eb977db 31#include "path-util.h"
03a7b521 32#include "process-util.h"
9444b1f2 33#include "special.h"
906c06f6 34#include "stdio-util.h"
8b43440b 35#include "string-table.h"
07630cea 36#include "string-util.h"
8e274523 37
9a054909
LP
38#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
39
2b40998d 40static void cgroup_compat_warn(void) {
128fadc9
TH
41 static bool cgroup_compat_warned = false;
42
43 if (cgroup_compat_warned)
44 return;
45
46 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details.");
47 cgroup_compat_warned = true;
48}
49
50#define log_cgroup_compat(unit, fmt, ...) do { \
51 cgroup_compat_warn(); \
52 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
2b40998d 53 } while (false)
128fadc9 54
4ad49000
LP
55void cgroup_context_init(CGroupContext *c) {
56 assert(c);
57
58 /* Initialize everything to the kernel defaults, assuming the
59 * structure is preinitialized to 0 */
60
66ebf6c0
TH
61 c->cpu_weight = CGROUP_WEIGHT_INVALID;
62 c->startup_cpu_weight = CGROUP_WEIGHT_INVALID;
63 c->cpu_quota_per_sec_usec = USEC_INFINITY;
64
d53d9474
LP
65 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
66 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
d53d9474 67
da4d897e
TH
68 c->memory_high = CGROUP_LIMIT_MAX;
69 c->memory_max = CGROUP_LIMIT_MAX;
96e131ea 70 c->memory_swap_max = CGROUP_LIMIT_MAX;
da4d897e
TH
71
72 c->memory_limit = CGROUP_LIMIT_MAX;
b2f8b02e 73
13c31542
TH
74 c->io_weight = CGROUP_WEIGHT_INVALID;
75 c->startup_io_weight = CGROUP_WEIGHT_INVALID;
76
d53d9474
LP
77 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
78 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
79
80 c->tasks_max = (uint64_t) -1;
4ad49000 81}
8e274523 82
4ad49000
LP
83void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
84 assert(c);
85 assert(a);
86
71fda00f 87 LIST_REMOVE(device_allow, c->device_allow, a);
4ad49000
LP
88 free(a->path);
89 free(a);
90}
91
13c31542
TH
92void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w) {
93 assert(c);
94 assert(w);
95
96 LIST_REMOVE(device_weights, c->io_device_weights, w);
97 free(w->path);
98 free(w);
99}
100
101void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l) {
102 assert(c);
103 assert(l);
104
105 LIST_REMOVE(device_limits, c->io_device_limits, l);
106 free(l->path);
107 free(l);
108}
109
4ad49000
LP
110void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
111 assert(c);
112 assert(w);
113
71fda00f 114 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
4ad49000
LP
115 free(w->path);
116 free(w);
117}
118
119void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
120 assert(c);
8e274523 121 assert(b);
8e274523 122
71fda00f 123 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
4ad49000
LP
124 free(b->path);
125 free(b);
126}
127
128void cgroup_context_done(CGroupContext *c) {
129 assert(c);
130
13c31542
TH
131 while (c->io_device_weights)
132 cgroup_context_free_io_device_weight(c, c->io_device_weights);
133
134 while (c->io_device_limits)
135 cgroup_context_free_io_device_limit(c, c->io_device_limits);
136
4ad49000
LP
137 while (c->blockio_device_weights)
138 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
139
140 while (c->blockio_device_bandwidths)
141 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
142
143 while (c->device_allow)
144 cgroup_context_free_device_allow(c, c->device_allow);
6a48d82f
DM
145
146 c->ip_address_allow = ip_address_access_free_all(c->ip_address_allow);
147 c->ip_address_deny = ip_address_access_free_all(c->ip_address_deny);
4ad49000
LP
148}
149
150void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
13c31542
TH
151 CGroupIODeviceLimit *il;
152 CGroupIODeviceWeight *iw;
4ad49000
LP
153 CGroupBlockIODeviceBandwidth *b;
154 CGroupBlockIODeviceWeight *w;
155 CGroupDeviceAllow *a;
c21c9906 156 IPAddressAccessItem *iaai;
9a054909 157 char u[FORMAT_TIMESPAN_MAX];
4ad49000
LP
158
159 assert(c);
160 assert(f);
161
162 prefix = strempty(prefix);
163
164 fprintf(f,
165 "%sCPUAccounting=%s\n"
13c31542 166 "%sIOAccounting=%s\n"
4ad49000
LP
167 "%sBlockIOAccounting=%s\n"
168 "%sMemoryAccounting=%s\n"
d53d9474 169 "%sTasksAccounting=%s\n"
c21c9906 170 "%sIPAccounting=%s\n"
66ebf6c0
TH
171 "%sCPUWeight=%" PRIu64 "\n"
172 "%sStartupCPUWeight=%" PRIu64 "\n"
d53d9474
LP
173 "%sCPUShares=%" PRIu64 "\n"
174 "%sStartupCPUShares=%" PRIu64 "\n"
b2f8b02e 175 "%sCPUQuotaPerSecSec=%s\n"
13c31542
TH
176 "%sIOWeight=%" PRIu64 "\n"
177 "%sStartupIOWeight=%" PRIu64 "\n"
d53d9474
LP
178 "%sBlockIOWeight=%" PRIu64 "\n"
179 "%sStartupBlockIOWeight=%" PRIu64 "\n"
da4d897e
TH
180 "%sMemoryLow=%" PRIu64 "\n"
181 "%sMemoryHigh=%" PRIu64 "\n"
182 "%sMemoryMax=%" PRIu64 "\n"
96e131ea 183 "%sMemorySwapMax=%" PRIu64 "\n"
4ad49000 184 "%sMemoryLimit=%" PRIu64 "\n"
03a7b521 185 "%sTasksMax=%" PRIu64 "\n"
a931ad47
LP
186 "%sDevicePolicy=%s\n"
187 "%sDelegate=%s\n",
4ad49000 188 prefix, yes_no(c->cpu_accounting),
13c31542 189 prefix, yes_no(c->io_accounting),
4ad49000
LP
190 prefix, yes_no(c->blockio_accounting),
191 prefix, yes_no(c->memory_accounting),
d53d9474 192 prefix, yes_no(c->tasks_accounting),
c21c9906 193 prefix, yes_no(c->ip_accounting),
66ebf6c0
TH
194 prefix, c->cpu_weight,
195 prefix, c->startup_cpu_weight,
4ad49000 196 prefix, c->cpu_shares,
95ae05c0 197 prefix, c->startup_cpu_shares,
b1d6dcf5 198 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
13c31542
TH
199 prefix, c->io_weight,
200 prefix, c->startup_io_weight,
4ad49000 201 prefix, c->blockio_weight,
95ae05c0 202 prefix, c->startup_blockio_weight,
da4d897e
TH
203 prefix, c->memory_low,
204 prefix, c->memory_high,
205 prefix, c->memory_max,
96e131ea 206 prefix, c->memory_swap_max,
4ad49000 207 prefix, c->memory_limit,
03a7b521 208 prefix, c->tasks_max,
a931ad47
LP
209 prefix, cgroup_device_policy_to_string(c->device_policy),
210 prefix, yes_no(c->delegate));
4ad49000
LP
211
212 LIST_FOREACH(device_allow, a, c->device_allow)
213 fprintf(f,
214 "%sDeviceAllow=%s %s%s%s\n",
215 prefix,
216 a->path,
217 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
218
13c31542
TH
219 LIST_FOREACH(device_weights, iw, c->io_device_weights)
220 fprintf(f,
221 "%sIODeviceWeight=%s %" PRIu64,
222 prefix,
223 iw->path,
224 iw->weight);
225
226 LIST_FOREACH(device_limits, il, c->io_device_limits) {
227 char buf[FORMAT_BYTES_MAX];
9be57249
TH
228 CGroupIOLimitType type;
229
230 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
231 if (il->limits[type] != cgroup_io_limit_defaults[type])
232 fprintf(f,
233 "%s%s=%s %s\n",
234 prefix,
235 cgroup_io_limit_type_to_string(type),
236 il->path,
237 format_bytes(buf, sizeof(buf), il->limits[type]));
13c31542
TH
238 }
239
4ad49000
LP
240 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
241 fprintf(f,
d53d9474 242 "%sBlockIODeviceWeight=%s %" PRIu64,
4ad49000
LP
243 prefix,
244 w->path,
245 w->weight);
246
247 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
248 char buf[FORMAT_BYTES_MAX];
249
979d0311
TH
250 if (b->rbps != CGROUP_LIMIT_MAX)
251 fprintf(f,
252 "%sBlockIOReadBandwidth=%s %s\n",
253 prefix,
254 b->path,
255 format_bytes(buf, sizeof(buf), b->rbps));
256 if (b->wbps != CGROUP_LIMIT_MAX)
257 fprintf(f,
258 "%sBlockIOWriteBandwidth=%s %s\n",
259 prefix,
260 b->path,
261 format_bytes(buf, sizeof(buf), b->wbps));
4ad49000 262 }
c21c9906
LP
263
264 LIST_FOREACH(items, iaai, c->ip_address_allow) {
265 _cleanup_free_ char *k = NULL;
266
267 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
268 fprintf(f, "%sIPAddressAllow=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
269 }
270
271 LIST_FOREACH(items, iaai, c->ip_address_deny) {
272 _cleanup_free_ char *k = NULL;
273
274 (void) in_addr_to_string(iaai->family, &iaai->address, &k);
275 fprintf(f, "%sIPAddressDeny=%s/%u\n", prefix, strnull(k), iaai->prefixlen);
276 }
4ad49000
LP
277}
278
13c31542 279static int lookup_block_device(const char *p, dev_t *dev) {
4ad49000
LP
280 struct stat st;
281 int r;
282
283 assert(p);
284 assert(dev);
285
286 r = stat(p, &st);
4a62c710
MS
287 if (r < 0)
288 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
8e274523 289
4ad49000
LP
290 if (S_ISBLK(st.st_mode))
291 *dev = st.st_rdev;
292 else if (major(st.st_dev) != 0) {
293 /* If this is not a device node then find the block
294 * device this file is stored on */
295 *dev = st.st_dev;
296
297 /* If this is a partition, try to get the originating
298 * block device */
299 block_get_whole_disk(*dev, dev);
300 } else {
301 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
302 return -ENODEV;
303 }
8e274523 304
8e274523 305 return 0;
8e274523
LP
306}
307
4ad49000
LP
308static int whitelist_device(const char *path, const char *node, const char *acc) {
309 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
310 struct stat st;
b200489b 311 bool ignore_notfound;
8c6db833 312 int r;
8e274523 313
4ad49000
LP
314 assert(path);
315 assert(acc);
8e274523 316
b200489b
DR
317 if (node[0] == '-') {
318 /* Non-existent paths starting with "-" must be silently ignored */
319 node++;
320 ignore_notfound = true;
321 } else
322 ignore_notfound = false;
323
4ad49000 324 if (stat(node, &st) < 0) {
b200489b 325 if (errno == ENOENT && ignore_notfound)
e7330dfe
DP
326 return 0;
327
328 return log_warning_errno(errno, "Couldn't stat device %s: %m", node);
4ad49000
LP
329 }
330
331 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
332 log_warning("%s is not a device.", node);
333 return -ENODEV;
334 }
335
336 sprintf(buf,
337 "%c %u:%u %s",
338 S_ISCHR(st.st_mode) ? 'c' : 'b',
339 major(st.st_rdev), minor(st.st_rdev),
340 acc);
341
342 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 343 if (r < 0)
077ba06e 344 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 345 "Failed to set devices.allow on %s: %m", path);
4ad49000
LP
346
347 return r;
8e274523
LP
348}
349
90060676
LP
350static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
351 _cleanup_fclose_ FILE *f = NULL;
352 char line[LINE_MAX];
353 bool good = false;
354 int r;
355
356 assert(path);
357 assert(acc);
4c701096 358 assert(IN_SET(type, 'b', 'c'));
90060676
LP
359
360 f = fopen("/proc/devices", "re");
4a62c710
MS
361 if (!f)
362 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
90060676
LP
363
364 FOREACH_LINE(line, f, goto fail) {
365 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
366 unsigned maj;
367
368 truncate_nl(line);
369
370 if (type == 'c' && streq(line, "Character devices:")) {
371 good = true;
372 continue;
373 }
374
375 if (type == 'b' && streq(line, "Block devices:")) {
376 good = true;
377 continue;
378 }
379
380 if (isempty(line)) {
381 good = false;
382 continue;
383 }
384
385 if (!good)
386 continue;
387
388 p = strstrip(line);
389
390 w = strpbrk(p, WHITESPACE);
391 if (!w)
392 continue;
393 *w = 0;
394
395 r = safe_atou(p, &maj);
396 if (r < 0)
397 continue;
398 if (maj <= 0)
399 continue;
400
401 w++;
402 w += strspn(w, WHITESPACE);
e41969e3
LP
403
404 if (fnmatch(name, w, 0) != 0)
90060676
LP
405 continue;
406
407 sprintf(buf,
408 "%c %u:* %s",
409 type,
410 maj,
411 acc);
412
413 r = cg_set_attribute("devices", path, "devices.allow", buf);
1aeab12b 414 if (r < 0)
077ba06e 415 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
714e2e1d 416 "Failed to set devices.allow on %s: %m", path);
90060676
LP
417 }
418
419 return 0;
420
421fail:
25f027c5 422 return log_warning_errno(errno, "Failed to read /proc/devices: %m");
90060676
LP
423}
424
66ebf6c0
TH
425static bool cgroup_context_has_cpu_weight(CGroupContext *c) {
426 return c->cpu_weight != CGROUP_WEIGHT_INVALID ||
427 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID;
428}
429
430static bool cgroup_context_has_cpu_shares(CGroupContext *c) {
431 return c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
432 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID;
433}
434
435static uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state) {
436 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
437 c->startup_cpu_weight != CGROUP_WEIGHT_INVALID)
438 return c->startup_cpu_weight;
439 else if (c->cpu_weight != CGROUP_WEIGHT_INVALID)
440 return c->cpu_weight;
441 else
442 return CGROUP_WEIGHT_DEFAULT;
443}
444
445static uint64_t cgroup_context_cpu_shares(CGroupContext *c, ManagerState state) {
446 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
447 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID)
448 return c->startup_cpu_shares;
449 else if (c->cpu_shares != CGROUP_CPU_SHARES_INVALID)
450 return c->cpu_shares;
451 else
452 return CGROUP_CPU_SHARES_DEFAULT;
453}
454
455static void cgroup_apply_unified_cpu_config(Unit *u, uint64_t weight, uint64_t quota) {
456 char buf[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t) + 1) * 2)];
457 int r;
458
459 xsprintf(buf, "%" PRIu64 "\n", weight);
460 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.weight", buf);
461 if (r < 0)
462 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
463 "Failed to set cpu.weight: %m");
464
465 if (quota != USEC_INFINITY)
466 xsprintf(buf, USEC_FMT " " USEC_FMT "\n",
467 quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC, CGROUP_CPU_QUOTA_PERIOD_USEC);
468 else
469 xsprintf(buf, "max " USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
470
471 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.max", buf);
472
473 if (r < 0)
474 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
475 "Failed to set cpu.max: %m");
476}
477
478static void cgroup_apply_legacy_cpu_config(Unit *u, uint64_t shares, uint64_t quota) {
479 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
480 int r;
481
482 xsprintf(buf, "%" PRIu64 "\n", shares);
483 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.shares", buf);
484 if (r < 0)
485 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
486 "Failed to set cpu.shares: %m");
487
488 xsprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
489 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_period_us", buf);
490 if (r < 0)
491 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
492 "Failed to set cpu.cfs_period_us: %m");
493
494 if (quota != USEC_INFINITY) {
495 xsprintf(buf, USEC_FMT "\n", quota * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
496 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", buf);
497 } else
498 r = cg_set_attribute("cpu", u->cgroup_path, "cpu.cfs_quota_us", "-1");
499 if (r < 0)
500 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
501 "Failed to set cpu.cfs_quota_us: %m");
502}
503
504static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares) {
505 return CLAMP(shares * CGROUP_WEIGHT_DEFAULT / CGROUP_CPU_SHARES_DEFAULT,
506 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
507}
508
509static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight) {
510 return CLAMP(weight * CGROUP_CPU_SHARES_DEFAULT / CGROUP_WEIGHT_DEFAULT,
511 CGROUP_CPU_SHARES_MIN, CGROUP_CPU_SHARES_MAX);
512}
513
508c45da 514static bool cgroup_context_has_io_config(CGroupContext *c) {
538b4852
TH
515 return c->io_accounting ||
516 c->io_weight != CGROUP_WEIGHT_INVALID ||
517 c->startup_io_weight != CGROUP_WEIGHT_INVALID ||
518 c->io_device_weights ||
519 c->io_device_limits;
520}
521
508c45da 522static bool cgroup_context_has_blockio_config(CGroupContext *c) {
538b4852
TH
523 return c->blockio_accounting ||
524 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
525 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
526 c->blockio_device_weights ||
527 c->blockio_device_bandwidths;
528}
529
508c45da 530static uint64_t cgroup_context_io_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
531 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
532 c->startup_io_weight != CGROUP_WEIGHT_INVALID)
533 return c->startup_io_weight;
534 else if (c->io_weight != CGROUP_WEIGHT_INVALID)
535 return c->io_weight;
536 else
537 return CGROUP_WEIGHT_DEFAULT;
538}
539
508c45da 540static uint64_t cgroup_context_blkio_weight(CGroupContext *c, ManagerState state) {
64faf04c
TH
541 if (IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) &&
542 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
543 return c->startup_blockio_weight;
544 else if (c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID)
545 return c->blockio_weight;
546 else
547 return CGROUP_BLKIO_WEIGHT_DEFAULT;
548}
549
508c45da 550static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight) {
538b4852
TH
551 return CLAMP(blkio_weight * CGROUP_WEIGHT_DEFAULT / CGROUP_BLKIO_WEIGHT_DEFAULT,
552 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
553}
554
508c45da 555static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight) {
538b4852
TH
556 return CLAMP(io_weight * CGROUP_BLKIO_WEIGHT_DEFAULT / CGROUP_WEIGHT_DEFAULT,
557 CGROUP_BLKIO_WEIGHT_MIN, CGROUP_BLKIO_WEIGHT_MAX);
558}
559
f29ff115 560static void cgroup_apply_io_device_weight(Unit *u, const char *dev_path, uint64_t io_weight) {
64faf04c
TH
561 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
562 dev_t dev;
563 int r;
564
565 r = lookup_block_device(dev_path, &dev);
566 if (r < 0)
567 return;
568
569 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), io_weight);
f29ff115 570 r = cg_set_attribute("io", u->cgroup_path, "io.weight", buf);
64faf04c 571 if (r < 0)
f29ff115
TH
572 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
573 "Failed to set io.weight: %m");
64faf04c
TH
574}
575
f29ff115 576static void cgroup_apply_blkio_device_weight(Unit *u, const char *dev_path, uint64_t blkio_weight) {
64faf04c
TH
577 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
578 dev_t dev;
579 int r;
580
581 r = lookup_block_device(dev_path, &dev);
582 if (r < 0)
583 return;
584
585 xsprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), blkio_weight);
f29ff115 586 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.weight_device", buf);
64faf04c 587 if (r < 0)
f29ff115
TH
588 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
589 "Failed to set blkio.weight_device: %m");
64faf04c
TH
590}
591
f29ff115 592static unsigned cgroup_apply_io_device_limit(Unit *u, const char *dev_path, uint64_t *limits) {
64faf04c
TH
593 char limit_bufs[_CGROUP_IO_LIMIT_TYPE_MAX][DECIMAL_STR_MAX(uint64_t)];
594 char buf[DECIMAL_STR_MAX(dev_t)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
595 CGroupIOLimitType type;
596 dev_t dev;
597 unsigned n = 0;
598 int r;
599
600 r = lookup_block_device(dev_path, &dev);
601 if (r < 0)
602 return 0;
603
604 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++) {
605 if (limits[type] != cgroup_io_limit_defaults[type]) {
606 xsprintf(limit_bufs[type], "%" PRIu64, limits[type]);
607 n++;
608 } else {
609 xsprintf(limit_bufs[type], "%s", limits[type] == CGROUP_LIMIT_MAX ? "max" : "0");
610 }
611 }
612
613 xsprintf(buf, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev), minor(dev),
614 limit_bufs[CGROUP_IO_RBPS_MAX], limit_bufs[CGROUP_IO_WBPS_MAX],
615 limit_bufs[CGROUP_IO_RIOPS_MAX], limit_bufs[CGROUP_IO_WIOPS_MAX]);
f29ff115 616 r = cg_set_attribute("io", u->cgroup_path, "io.max", buf);
64faf04c 617 if (r < 0)
f29ff115
TH
618 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
619 "Failed to set io.max: %m");
64faf04c
TH
620 return n;
621}
622
f29ff115 623static unsigned cgroup_apply_blkio_device_limit(Unit *u, const char *dev_path, uint64_t rbps, uint64_t wbps) {
64faf04c
TH
624 char buf[DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
625 dev_t dev;
626 unsigned n = 0;
627 int r;
628
629 r = lookup_block_device(dev_path, &dev);
630 if (r < 0)
631 return 0;
632
633 if (rbps != CGROUP_LIMIT_MAX)
634 n++;
635 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), rbps);
f29ff115 636 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.read_bps_device", buf);
64faf04c 637 if (r < 0)
f29ff115
TH
638 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
639 "Failed to set blkio.throttle.read_bps_device: %m");
64faf04c
TH
640
641 if (wbps != CGROUP_LIMIT_MAX)
642 n++;
643 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), wbps);
f29ff115 644 r = cg_set_attribute("blkio", u->cgroup_path, "blkio.throttle.write_bps_device", buf);
64faf04c 645 if (r < 0)
f29ff115
TH
646 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
647 "Failed to set blkio.throttle.write_bps_device: %m");
64faf04c
TH
648
649 return n;
650}
651
da4d897e 652static bool cgroup_context_has_unified_memory_config(CGroupContext *c) {
96e131ea 653 return c->memory_low > 0 || c->memory_high != CGROUP_LIMIT_MAX || c->memory_max != CGROUP_LIMIT_MAX || c->memory_swap_max != CGROUP_LIMIT_MAX;
da4d897e
TH
654}
655
f29ff115 656static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_t v) {
da4d897e
TH
657 char buf[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
658 int r;
659
660 if (v != CGROUP_LIMIT_MAX)
661 xsprintf(buf, "%" PRIu64 "\n", v);
662
f29ff115 663 r = cg_set_attribute("memory", u->cgroup_path, file, buf);
da4d897e 664 if (r < 0)
f29ff115
TH
665 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
666 "Failed to set %s: %m", file);
da4d897e
TH
667}
668
906c06f6
DM
669static void cgroup_apply_firewall(Unit *u, CGroupContext *c) {
670 int r;
671
672 if (u->type == UNIT_SLICE) /* Skip this for slice units, they are inner cgroup nodes, and since bpf/cgroup is
673 * not recursive we don't ever touch the bpf on them */
674 return;
675
676 r = bpf_firewall_compile(u);
677 if (r < 0)
678 return;
679
680 (void) bpf_firewall_install(u);
681 return;
682}
683
684static void cgroup_context_apply(
685 Unit *u,
686 CGroupMask apply_mask,
687 bool apply_bpf,
688 ManagerState state) {
689
f29ff115
TH
690 const char *path;
691 CGroupContext *c;
01efdf13 692 bool is_root;
4ad49000
LP
693 int r;
694
f29ff115
TH
695 assert(u);
696
697 c = unit_get_cgroup_context(u);
698 path = u->cgroup_path;
699
4ad49000
LP
700 assert(c);
701 assert(path);
8e274523 702
906c06f6
DM
703 /* Nothing to do? Exit early! */
704 if (apply_mask == 0 && !apply_bpf)
4ad49000 705 return;
8e274523 706
71c26873 707 /* Some cgroup attributes are not supported on the root cgroup,
01efdf13
LP
708 * hence silently ignore */
709 is_root = isempty(path) || path_equal(path, "/");
6da13913
ZJS
710 if (is_root)
711 /* Make sure we don't try to display messages with an empty path. */
712 path = "/";
01efdf13 713
714e2e1d
LP
714 /* We generally ignore errors caused by read-only mounted
715 * cgroup trees (assuming we are running in a container then),
716 * and missing cgroups, i.e. EROFS and ENOENT. */
717
906c06f6
DM
718 if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
719 bool has_weight, has_shares;
720
721 has_weight = cgroup_context_has_cpu_weight(c);
722 has_shares = cgroup_context_has_cpu_shares(c);
8e274523 723
b4cccbc1 724 if (cg_all_unified() > 0) {
66ebf6c0 725 uint64_t weight;
b2f8b02e 726
66ebf6c0
TH
727 if (has_weight)
728 weight = cgroup_context_cpu_weight(c, state);
729 else if (has_shares) {
730 uint64_t shares = cgroup_context_cpu_shares(c, state);
b2f8b02e 731
66ebf6c0
TH
732 weight = cgroup_cpu_shares_to_weight(shares);
733
734 log_cgroup_compat(u, "Applying [Startup]CpuShares %" PRIu64 " as [Startup]CpuWeight %" PRIu64 " on %s",
735 shares, weight, path);
736 } else
737 weight = CGROUP_WEIGHT_DEFAULT;
738
739 cgroup_apply_unified_cpu_config(u, weight, c->cpu_quota_per_sec_usec);
740 } else {
741 uint64_t shares;
742
7d862ab8 743 if (has_weight) {
66ebf6c0
TH
744 uint64_t weight = cgroup_context_cpu_weight(c, state);
745
746 shares = cgroup_cpu_weight_to_shares(weight);
747
748 log_cgroup_compat(u, "Applying [Startup]CpuWeight %" PRIu64 " as [Startup]CpuShares %" PRIu64 " on %s",
749 weight, shares, path);
7d862ab8
TH
750 } else if (has_shares)
751 shares = cgroup_context_cpu_shares(c, state);
752 else
66ebf6c0
TH
753 shares = CGROUP_CPU_SHARES_DEFAULT;
754
755 cgroup_apply_legacy_cpu_config(u, shares, c->cpu_quota_per_sec_usec);
756 }
4ad49000
LP
757 }
758
906c06f6 759 if (apply_mask & CGROUP_MASK_IO) {
538b4852
TH
760 bool has_io = cgroup_context_has_io_config(c);
761 bool has_blockio = cgroup_context_has_blockio_config(c);
13c31542
TH
762
763 if (!is_root) {
64faf04c
TH
764 char buf[8+DECIMAL_STR_MAX(uint64_t)+1];
765 uint64_t weight;
13c31542 766
538b4852
TH
767 if (has_io)
768 weight = cgroup_context_io_weight(c, state);
128fadc9
TH
769 else if (has_blockio) {
770 uint64_t blkio_weight = cgroup_context_blkio_weight(c, state);
771
772 weight = cgroup_weight_blkio_to_io(blkio_weight);
773
774 log_cgroup_compat(u, "Applying [Startup]BlockIOWeight %" PRIu64 " as [Startup]IOWeight %" PRIu64,
775 blkio_weight, weight);
776 } else
538b4852 777 weight = CGROUP_WEIGHT_DEFAULT;
13c31542
TH
778
779 xsprintf(buf, "default %" PRIu64 "\n", weight);
780 r = cg_set_attribute("io", path, "io.weight", buf);
781 if (r < 0)
f29ff115
TH
782 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
783 "Failed to set io.weight: %m");
13c31542 784
538b4852
TH
785 if (has_io) {
786 CGroupIODeviceWeight *w;
787
788 /* FIXME: no way to reset this list */
789 LIST_FOREACH(device_weights, w, c->io_device_weights)
f29ff115 790 cgroup_apply_io_device_weight(u, w->path, w->weight);
538b4852
TH
791 } else if (has_blockio) {
792 CGroupBlockIODeviceWeight *w;
793
794 /* FIXME: no way to reset this list */
128fadc9
TH
795 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
796 weight = cgroup_weight_blkio_to_io(w->weight);
797
798 log_cgroup_compat(u, "Applying BlockIODeviceWeight %" PRIu64 " as IODeviceWeight %" PRIu64 " for %s",
799 w->weight, weight, w->path);
800
801 cgroup_apply_io_device_weight(u, w->path, weight);
802 }
538b4852 803 }
13c31542
TH
804 }
805
64faf04c 806 /* Apply limits and free ones without config. */
538b4852
TH
807 if (has_io) {
808 CGroupIODeviceLimit *l, *next;
809
810 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
f29ff115 811 if (!cgroup_apply_io_device_limit(u, l->path, l->limits))
538b4852
TH
812 cgroup_context_free_io_device_limit(c, l);
813 }
814 } else if (has_blockio) {
815 CGroupBlockIODeviceBandwidth *b, *next;
816
817 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths) {
818 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
819 CGroupIOLimitType type;
820
821 for (type = 0; type < _CGROUP_IO_LIMIT_TYPE_MAX; type++)
822 limits[type] = cgroup_io_limit_defaults[type];
823
824 limits[CGROUP_IO_RBPS_MAX] = b->rbps;
825 limits[CGROUP_IO_WBPS_MAX] = b->wbps;
826
128fadc9
TH
827 log_cgroup_compat(u, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as IO{Read|Write}BandwidthMax for %s",
828 b->rbps, b->wbps, b->path);
829
f29ff115 830 if (!cgroup_apply_io_device_limit(u, b->path, limits))
538b4852
TH
831 cgroup_context_free_blockio_device_bandwidth(c, b);
832 }
13c31542
TH
833 }
834 }
835
906c06f6 836 if (apply_mask & CGROUP_MASK_BLKIO) {
538b4852
TH
837 bool has_io = cgroup_context_has_io_config(c);
838 bool has_blockio = cgroup_context_has_blockio_config(c);
4ad49000 839
01efdf13 840 if (!is_root) {
64faf04c
TH
841 char buf[DECIMAL_STR_MAX(uint64_t)+1];
842 uint64_t weight;
64faf04c 843
7d862ab8 844 if (has_io) {
128fadc9
TH
845 uint64_t io_weight = cgroup_context_io_weight(c, state);
846
538b4852 847 weight = cgroup_weight_io_to_blkio(cgroup_context_io_weight(c, state));
128fadc9
TH
848
849 log_cgroup_compat(u, "Applying [Startup]IOWeight %" PRIu64 " as [Startup]BlockIOWeight %" PRIu64,
850 io_weight, weight);
7d862ab8
TH
851 } else if (has_blockio)
852 weight = cgroup_context_blkio_weight(c, state);
853 else
538b4852 854 weight = CGROUP_BLKIO_WEIGHT_DEFAULT;
64faf04c
TH
855
856 xsprintf(buf, "%" PRIu64 "\n", weight);
01efdf13 857 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
1aeab12b 858 if (r < 0)
f29ff115
TH
859 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
860 "Failed to set blkio.weight: %m");
4ad49000 861
7d862ab8 862 if (has_io) {
538b4852
TH
863 CGroupIODeviceWeight *w;
864
865 /* FIXME: no way to reset this list */
128fadc9
TH
866 LIST_FOREACH(device_weights, w, c->io_device_weights) {
867 weight = cgroup_weight_io_to_blkio(w->weight);
868
869 log_cgroup_compat(u, "Applying IODeviceWeight %" PRIu64 " as BlockIODeviceWeight %" PRIu64 " for %s",
870 w->weight, weight, w->path);
871
872 cgroup_apply_blkio_device_weight(u, w->path, weight);
873 }
7d862ab8
TH
874 } else if (has_blockio) {
875 CGroupBlockIODeviceWeight *w;
876
877 /* FIXME: no way to reset this list */
878 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
879 cgroup_apply_blkio_device_weight(u, w->path, w->weight);
538b4852 880 }
4ad49000
LP
881 }
882
64faf04c 883 /* Apply limits and free ones without config. */
7d862ab8 884 if (has_io) {
538b4852
TH
885 CGroupIODeviceLimit *l, *next;
886
887 LIST_FOREACH_SAFE(device_limits, l, next, c->io_device_limits) {
128fadc9
TH
888 log_cgroup_compat(u, "Applying IO{Read|Write}Bandwidth %" PRIu64 " %" PRIu64 " as BlockIO{Read|Write}BandwidthMax for %s",
889 l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX], l->path);
890
f29ff115 891 if (!cgroup_apply_blkio_device_limit(u, l->path, l->limits[CGROUP_IO_RBPS_MAX], l->limits[CGROUP_IO_WBPS_MAX]))
538b4852
TH
892 cgroup_context_free_io_device_limit(c, l);
893 }
7d862ab8
TH
894 } else if (has_blockio) {
895 CGroupBlockIODeviceBandwidth *b, *next;
896
897 LIST_FOREACH_SAFE(device_bandwidths, b, next, c->blockio_device_bandwidths)
898 if (!cgroup_apply_blkio_device_limit(u, b->path, b->rbps, b->wbps))
899 cgroup_context_free_blockio_device_bandwidth(c, b);
d686d8a9 900 }
8e274523
LP
901 }
902
906c06f6 903 if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
b4cccbc1
LP
904 if (cg_all_unified() > 0) {
905 uint64_t max, swap_max = CGROUP_LIMIT_MAX;
efdb0237 906
96e131ea 907 if (cgroup_context_has_unified_memory_config(c)) {
da4d897e 908 max = c->memory_max;
96e131ea
WC
909 swap_max = c->memory_swap_max;
910 } else {
da4d897e 911 max = c->memory_limit;
efdb0237 912
128fadc9
TH
913 if (max != CGROUP_LIMIT_MAX)
914 log_cgroup_compat(u, "Applying MemoryLimit %" PRIu64 " as MemoryMax", max);
915 }
916
f29ff115
TH
917 cgroup_apply_unified_memory_limit(u, "memory.low", c->memory_low);
918 cgroup_apply_unified_memory_limit(u, "memory.high", c->memory_high);
919 cgroup_apply_unified_memory_limit(u, "memory.max", max);
96e131ea 920 cgroup_apply_unified_memory_limit(u, "memory.swap.max", swap_max);
efdb0237 921 } else {
da4d897e 922 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
7d862ab8 923 uint64_t val;
da4d897e 924
7d862ab8 925 if (cgroup_context_has_unified_memory_config(c)) {
78a4ee59 926 val = c->memory_max;
7d862ab8
TH
927 log_cgroup_compat(u, "Applying MemoryMax %" PRIi64 " as MemoryLimit", val);
928 } else
929 val = c->memory_limit;
128fadc9 930
78a4ee59
DM
931 if (val == CGROUP_LIMIT_MAX)
932 strncpy(buf, "-1\n", sizeof(buf));
933 else
934 xsprintf(buf, "%" PRIu64 "\n", val);
935
da4d897e
TH
936 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
937 if (r < 0)
f29ff115
TH
938 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
939 "Failed to set memory.limit_in_bytes: %m");
da4d897e 940 }
4ad49000 941 }
8e274523 942
906c06f6 943 if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
4ad49000 944 CGroupDeviceAllow *a;
8e274523 945
714e2e1d
LP
946 /* Changing the devices list of a populated cgroup
947 * might result in EINVAL, hence ignore EINVAL
948 * here. */
949
4ad49000
LP
950 if (c->device_allow || c->device_policy != CGROUP_AUTO)
951 r = cg_set_attribute("devices", path, "devices.deny", "a");
952 else
953 r = cg_set_attribute("devices", path, "devices.allow", "a");
1aeab12b 954 if (r < 0)
f29ff115
TH
955 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
956 "Failed to reset devices.list: %m");
fb385181 957
4ad49000
LP
958 if (c->device_policy == CGROUP_CLOSED ||
959 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
960 static const char auto_devices[] =
7d711efb
LP
961 "/dev/null\0" "rwm\0"
962 "/dev/zero\0" "rwm\0"
963 "/dev/full\0" "rwm\0"
964 "/dev/random\0" "rwm\0"
965 "/dev/urandom\0" "rwm\0"
966 "/dev/tty\0" "rwm\0"
0d9e7991
AP
967 "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */
968 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
e7330dfe
DP
969 "-/run/systemd/inaccessible/chr\0" "rwm\0"
970 "-/run/systemd/inaccessible/blk\0" "rwm\0";
4ad49000
LP
971
972 const char *x, *y;
973
974 NULSTR_FOREACH_PAIR(x, y, auto_devices)
975 whitelist_device(path, x, y);
7d711efb
LP
976
977 whitelist_major(path, "pts", 'c', "rw");
4ad49000
LP
978 }
979
980 LIST_FOREACH(device_allow, a, c->device_allow) {
fb4650aa 981 char acc[4], *val;
4ad49000
LP
982 unsigned k = 0;
983
984 if (a->r)
985 acc[k++] = 'r';
986 if (a->w)
987 acc[k++] = 'w';
988 if (a->m)
989 acc[k++] = 'm';
fb385181 990
4ad49000
LP
991 if (k == 0)
992 continue;
fb385181 993
4ad49000 994 acc[k++] = 0;
90060676 995
27458ed6 996 if (path_startswith(a->path, "/dev/"))
90060676 997 whitelist_device(path, a->path, acc);
fb4650aa
ZJS
998 else if ((val = startswith(a->path, "block-")))
999 whitelist_major(path, val, 'b', acc);
1000 else if ((val = startswith(a->path, "char-")))
1001 whitelist_major(path, val, 'c', acc);
90060676 1002 else
f29ff115 1003 log_unit_debug(u, "Ignoring device %s while writing cgroup attribute.", a->path);
4ad49000
LP
1004 }
1005 }
03a7b521 1006
906c06f6 1007 if ((apply_mask & CGROUP_MASK_PIDS) && !is_root) {
03a7b521 1008
f5058264 1009 if (c->tasks_max != CGROUP_LIMIT_MAX) {
03a7b521
LP
1010 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
1011
1012 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
1013 r = cg_set_attribute("pids", path, "pids.max", buf);
1014 } else
1015 r = cg_set_attribute("pids", path, "pids.max", "max");
1016
1017 if (r < 0)
f29ff115
TH
1018 log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
1019 "Failed to set pids.max: %m");
03a7b521 1020 }
906c06f6
DM
1021
1022 if (apply_bpf)
1023 cgroup_apply_firewall(u, c);
fb385181
LP
1024}
1025
efdb0237
LP
1026CGroupMask cgroup_context_get_mask(CGroupContext *c) {
1027 CGroupMask mask = 0;
8e274523 1028
4ad49000 1029 /* Figure out which controllers we need */
8e274523 1030
b2f8b02e 1031 if (c->cpu_accounting ||
66ebf6c0
TH
1032 cgroup_context_has_cpu_weight(c) ||
1033 cgroup_context_has_cpu_shares(c) ||
3a43da28 1034 c->cpu_quota_per_sec_usec != USEC_INFINITY)
efdb0237 1035 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
ecedd90f 1036
538b4852
TH
1037 if (cgroup_context_has_io_config(c) || cgroup_context_has_blockio_config(c))
1038 mask |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
ecedd90f 1039
4ad49000 1040 if (c->memory_accounting ||
da4d897e
TH
1041 c->memory_limit != CGROUP_LIMIT_MAX ||
1042 cgroup_context_has_unified_memory_config(c))
efdb0237 1043 mask |= CGROUP_MASK_MEMORY;
8e274523 1044
a931ad47
LP
1045 if (c->device_allow ||
1046 c->device_policy != CGROUP_AUTO)
3905f127 1047 mask |= CGROUP_MASK_DEVICES;
4ad49000 1048
03a7b521
LP
1049 if (c->tasks_accounting ||
1050 c->tasks_max != (uint64_t) -1)
1051 mask |= CGROUP_MASK_PIDS;
1052
4ad49000 1053 return mask;
8e274523
LP
1054}
1055
efdb0237 1056CGroupMask unit_get_own_mask(Unit *u) {
4ad49000 1057 CGroupContext *c;
8e274523 1058
efdb0237
LP
1059 /* Returns the mask of controllers the unit needs for itself */
1060
4ad49000
LP
1061 c = unit_get_cgroup_context(u);
1062 if (!c)
1063 return 0;
8e274523 1064
a931ad47 1065 /* If delegation is turned on, then turn on all cgroups,
19af675e
LP
1066 * unless we are on the legacy hierarchy and the process we
1067 * fork into it is known to drop privileges, and hence
1068 * shouldn't get access to the controllers.
1069 *
1070 * Note that on the unified hierarchy it is safe to delegate
1071 * controllers to unprivileged services. */
a931ad47
LP
1072
1073 if (c->delegate) {
1074 ExecContext *e;
1075
1076 e = unit_get_exec_context(u);
19af675e
LP
1077 if (!e ||
1078 exec_context_maintains_privileges(e) ||
b4cccbc1 1079 cg_all_unified() > 0)
efdb0237 1080 return _CGROUP_MASK_ALL;
a931ad47
LP
1081 }
1082
db785129 1083 return cgroup_context_get_mask(c);
8e274523
LP
1084}
1085
efdb0237 1086CGroupMask unit_get_members_mask(Unit *u) {
4ad49000 1087 assert(u);
bc432dc7 1088
efdb0237
LP
1089 /* Returns the mask of controllers all of the unit's children
1090 * require, merged */
1091
bc432dc7
LP
1092 if (u->cgroup_members_mask_valid)
1093 return u->cgroup_members_mask;
1094
1095 u->cgroup_members_mask = 0;
1096
1097 if (u->type == UNIT_SLICE) {
eef85c4a 1098 void *v;
bc432dc7
LP
1099 Unit *member;
1100 Iterator i;
1101
eef85c4a 1102 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
bc432dc7
LP
1103
1104 if (member == u)
1105 continue;
1106
d4fdc205 1107 if (UNIT_DEREF(member->slice) != u)
bc432dc7
LP
1108 continue;
1109
31604970 1110 u->cgroup_members_mask |= unit_get_subtree_mask(member); /* note that this calls ourselves again, for the children */
bc432dc7
LP
1111 }
1112 }
1113
1114 u->cgroup_members_mask_valid = true;
6414b7c9 1115 return u->cgroup_members_mask;
246aa6dd
LP
1116}
1117
efdb0237 1118CGroupMask unit_get_siblings_mask(Unit *u) {
4ad49000 1119 assert(u);
246aa6dd 1120
efdb0237
LP
1121 /* Returns the mask of controllers all of the unit's siblings
1122 * require, i.e. the members mask of the unit's parent slice
1123 * if there is one. */
1124
bc432dc7 1125 if (UNIT_ISSET(u->slice))
637f421e 1126 return unit_get_members_mask(UNIT_DEREF(u->slice));
4ad49000 1127
31604970 1128 return unit_get_subtree_mask(u);
246aa6dd
LP
1129}
1130
efdb0237
LP
1131CGroupMask unit_get_subtree_mask(Unit *u) {
1132
1133 /* Returns the mask of this subtree, meaning of the group
1134 * itself and its children. */
1135
1136 return unit_get_own_mask(u) | unit_get_members_mask(u);
1137}
1138
1139CGroupMask unit_get_target_mask(Unit *u) {
1140 CGroupMask mask;
1141
1142 /* This returns the cgroup mask of all controllers to enable
1143 * for a specific cgroup, i.e. everything it needs itself,
1144 * plus all that its children need, plus all that its siblings
1145 * need. This is primarily useful on the legacy cgroup
1146 * hierarchy, where we need to duplicate each cgroup in each
1147 * hierarchy that shall be enabled for it. */
6414b7c9 1148
efdb0237
LP
1149 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
1150 mask &= u->manager->cgroup_supported;
1151
1152 return mask;
1153}
1154
1155CGroupMask unit_get_enable_mask(Unit *u) {
1156 CGroupMask mask;
1157
1158 /* This returns the cgroup mask of all controllers to enable
1159 * for the children of a specific cgroup. This is primarily
1160 * useful for the unified cgroup hierarchy, where each cgroup
1161 * controls which controllers are enabled for its children. */
1162
1163 mask = unit_get_members_mask(u);
6414b7c9
DS
1164 mask &= u->manager->cgroup_supported;
1165
1166 return mask;
1167}
1168
906c06f6
DM
1169bool unit_get_needs_bpf(Unit *u) {
1170 CGroupContext *c;
1171 Unit *p;
1172 assert(u);
1173
1174 /* We never attach BPF to slice units, as they are inner cgroup nodes and cgroup/BPF is not recursive at the
1175 * moment. */
1176 if (u->type == UNIT_SLICE)
1177 return false;
1178
1179 c = unit_get_cgroup_context(u);
1180 if (!c)
1181 return false;
1182
1183 if (c->ip_accounting ||
1184 c->ip_address_allow ||
1185 c->ip_address_deny)
1186 return true;
1187
1188 /* If any parent slice has an IP access list defined, it applies too */
1189 for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
1190 c = unit_get_cgroup_context(p);
1191 if (!c)
1192 return false;
1193
1194 if (c->ip_address_allow ||
1195 c->ip_address_deny)
1196 return true;
1197 }
1198
1199 return false;
1200}
1201
6414b7c9
DS
1202/* Recurse from a unit up through its containing slices, propagating
1203 * mask bits upward. A unit is also member of itself. */
bc432dc7 1204void unit_update_cgroup_members_masks(Unit *u) {
efdb0237 1205 CGroupMask m;
bc432dc7
LP
1206 bool more;
1207
1208 assert(u);
1209
1210 /* Calculate subtree mask */
efdb0237 1211 m = unit_get_subtree_mask(u);
bc432dc7
LP
1212
1213 /* See if anything changed from the previous invocation. If
1214 * not, we're done. */
1215 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
1216 return;
1217
1218 more =
1219 u->cgroup_subtree_mask_valid &&
1220 ((m & ~u->cgroup_subtree_mask) != 0) &&
1221 ((~m & u->cgroup_subtree_mask) == 0);
1222
1223 u->cgroup_subtree_mask = m;
1224 u->cgroup_subtree_mask_valid = true;
1225
6414b7c9
DS
1226 if (UNIT_ISSET(u->slice)) {
1227 Unit *s = UNIT_DEREF(u->slice);
bc432dc7
LP
1228
1229 if (more)
1230 /* There's more set now than before. We
1231 * propagate the new mask to the parent's mask
1232 * (not caring if it actually was valid or
1233 * not). */
1234
1235 s->cgroup_members_mask |= m;
1236
1237 else
1238 /* There's less set now than before (or we
1239 * don't know), we need to recalculate
1240 * everything, so let's invalidate the
1241 * parent's members mask */
1242
1243 s->cgroup_members_mask_valid = false;
1244
1245 /* And now make sure that this change also hits our
1246 * grandparents */
1247 unit_update_cgroup_members_masks(s);
6414b7c9
DS
1248 }
1249}
1250
efdb0237 1251static const char *migrate_callback(CGroupMask mask, void *userdata) {
03b90d4b
LP
1252 Unit *u = userdata;
1253
1254 assert(mask != 0);
1255 assert(u);
1256
1257 while (u) {
1258 if (u->cgroup_path &&
1259 u->cgroup_realized &&
1260 (u->cgroup_realized_mask & mask) == mask)
1261 return u->cgroup_path;
1262
1263 u = UNIT_DEREF(u->slice);
1264 }
1265
1266 return NULL;
1267}
1268
efdb0237
LP
1269char *unit_default_cgroup_path(Unit *u) {
1270 _cleanup_free_ char *escaped = NULL, *slice = NULL;
1271 int r;
1272
1273 assert(u);
1274
1275 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1276 return strdup(u->manager->cgroup_root);
1277
1278 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
1279 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
1280 if (r < 0)
1281 return NULL;
1282 }
1283
1284 escaped = cg_escape(u->id);
1285 if (!escaped)
1286 return NULL;
1287
1288 if (slice)
605405c6
ZJS
1289 return strjoin(u->manager->cgroup_root, "/", slice, "/",
1290 escaped);
efdb0237 1291 else
605405c6 1292 return strjoin(u->manager->cgroup_root, "/", escaped);
efdb0237
LP
1293}
1294
1295int unit_set_cgroup_path(Unit *u, const char *path) {
1296 _cleanup_free_ char *p = NULL;
1297 int r;
1298
1299 assert(u);
1300
1301 if (path) {
1302 p = strdup(path);
1303 if (!p)
1304 return -ENOMEM;
1305 } else
1306 p = NULL;
1307
1308 if (streq_ptr(u->cgroup_path, p))
1309 return 0;
1310
1311 if (p) {
1312 r = hashmap_put(u->manager->cgroup_unit, p, u);
1313 if (r < 0)
1314 return r;
1315 }
1316
1317 unit_release_cgroup(u);
1318
1319 u->cgroup_path = p;
1320 p = NULL;
1321
1322 return 1;
1323}
1324
1325int unit_watch_cgroup(Unit *u) {
ab2c3861 1326 _cleanup_free_ char *events = NULL;
efdb0237
LP
1327 int r;
1328
1329 assert(u);
1330
1331 if (!u->cgroup_path)
1332 return 0;
1333
1334 if (u->cgroup_inotify_wd >= 0)
1335 return 0;
1336
1337 /* Only applies to the unified hierarchy */
c22800e4 1338 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1339 if (r < 0)
1340 return log_error_errno(r, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1341 if (r == 0)
efdb0237
LP
1342 return 0;
1343
1344 /* Don't watch the root slice, it's pointless. */
1345 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1346 return 0;
1347
1348 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
1349 if (r < 0)
1350 return log_oom();
1351
ab2c3861 1352 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
efdb0237
LP
1353 if (r < 0)
1354 return log_oom();
1355
ab2c3861 1356 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
efdb0237
LP
1357 if (u->cgroup_inotify_wd < 0) {
1358
1359 if (errno == ENOENT) /* If the directory is already
1360 * gone we don't need to track
1361 * it, so this is not an error */
1362 return 0;
1363
1364 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
1365 }
1366
1367 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
1368 if (r < 0)
1369 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
1370
1371 return 0;
1372}
1373
1374static int unit_create_cgroup(
1375 Unit *u,
1376 CGroupMask target_mask,
906c06f6
DM
1377 CGroupMask enable_mask,
1378 bool needs_bpf) {
efdb0237 1379
0cd385d3 1380 CGroupContext *c;
bc432dc7 1381 int r;
64747e2d 1382
4ad49000 1383 assert(u);
64747e2d 1384
0cd385d3
LP
1385 c = unit_get_cgroup_context(u);
1386 if (!c)
1387 return 0;
1388
7b3fd631
LP
1389 if (!u->cgroup_path) {
1390 _cleanup_free_ char *path = NULL;
64747e2d 1391
7b3fd631
LP
1392 path = unit_default_cgroup_path(u);
1393 if (!path)
1394 return log_oom();
1395
efdb0237
LP
1396 r = unit_set_cgroup_path(u, path);
1397 if (r == -EEXIST)
1398 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
1399 if (r < 0)
1400 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
b58b8e11
HH
1401 }
1402
03b90d4b 1403 /* First, create our own group */
efdb0237 1404 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
23bbb0de 1405 if (r < 0)
efdb0237
LP
1406 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
1407
1408 /* Start watching it */
1409 (void) unit_watch_cgroup(u);
1410
1411 /* Enable all controllers we need */
1412 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
1413 if (r < 0)
1414 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
03b90d4b
LP
1415
1416 /* Keep track that this is now realized */
4ad49000 1417 u->cgroup_realized = true;
efdb0237 1418 u->cgroup_realized_mask = target_mask;
ccf78df1 1419 u->cgroup_enabled_mask = enable_mask;
906c06f6 1420 u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
4ad49000 1421
0cd385d3
LP
1422 if (u->type != UNIT_SLICE && !c->delegate) {
1423
1424 /* Then, possibly move things over, but not if
1425 * subgroups may contain processes, which is the case
1426 * for slice and delegation units. */
1427 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
1428 if (r < 0)
efdb0237 1429 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
0cd385d3 1430 }
03b90d4b 1431
64747e2d
LP
1432 return 0;
1433}
1434
7b3fd631
LP
1435int unit_attach_pids_to_cgroup(Unit *u) {
1436 int r;
1437 assert(u);
1438
1439 r = unit_realize_cgroup(u);
1440 if (r < 0)
1441 return r;
1442
1443 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
1444 if (r < 0)
1445 return r;
1446
1447 return 0;
1448}
1449
4b58153d
LP
1450static void cgroup_xattr_apply(Unit *u) {
1451 char ids[SD_ID128_STRING_MAX];
1452 int r;
1453
1454 assert(u);
1455
1456 if (!MANAGER_IS_SYSTEM(u->manager))
1457 return;
1458
1459 if (sd_id128_is_null(u->invocation_id))
1460 return;
1461
1462 r = cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
1463 "trusted.invocation_id",
1464 sd_id128_to_string(u->invocation_id, ids), 32,
1465 0);
1466 if (r < 0)
1467 log_unit_warning_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
1468}
1469
906c06f6
DM
1470static bool unit_has_mask_realized(
1471 Unit *u,
1472 CGroupMask target_mask,
1473 CGroupMask enable_mask,
1474 bool needs_bpf) {
1475
bc432dc7
LP
1476 assert(u);
1477
906c06f6
DM
1478 return u->cgroup_realized &&
1479 u->cgroup_realized_mask == target_mask &&
1480 u->cgroup_enabled_mask == enable_mask &&
1481 ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
1482 (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
6414b7c9
DS
1483}
1484
1485/* Check if necessary controllers and attributes for a unit are in place.
1486 *
1487 * If so, do nothing.
1488 * If not, create paths, move processes over, and set attributes.
1489 *
1490 * Returns 0 on success and < 0 on failure. */
db785129 1491static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
efdb0237 1492 CGroupMask target_mask, enable_mask;
906c06f6 1493 bool needs_bpf, apply_bpf;
6414b7c9 1494 int r;
64747e2d 1495
4ad49000 1496 assert(u);
64747e2d 1497
91a6073e
LP
1498 if (u->in_cgroup_realize_queue) {
1499 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1500 u->in_cgroup_realize_queue = false;
4ad49000 1501 }
64747e2d 1502
efdb0237 1503 target_mask = unit_get_target_mask(u);
ccf78df1 1504 enable_mask = unit_get_enable_mask(u);
906c06f6 1505 needs_bpf = unit_get_needs_bpf(u);
ccf78df1 1506
906c06f6 1507 if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
0a1eb06d 1508 return 0;
64747e2d 1509
906c06f6
DM
1510 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1511 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1512 * this will trickle down properly to cgroupfs. */
1513 apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
1514
4ad49000 1515 /* First, realize parents */
6414b7c9 1516 if (UNIT_ISSET(u->slice)) {
db785129 1517 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
6414b7c9
DS
1518 if (r < 0)
1519 return r;
1520 }
4ad49000
LP
1521
1522 /* And then do the real work */
906c06f6 1523 r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
6414b7c9
DS
1524 if (r < 0)
1525 return r;
1526
1527 /* Finally, apply the necessary attributes. */
906c06f6 1528 cgroup_context_apply(u, target_mask, apply_bpf, state);
4b58153d 1529 cgroup_xattr_apply(u);
6414b7c9
DS
1530
1531 return 0;
64747e2d
LP
1532}
1533
91a6073e 1534static void unit_add_to_cgroup_realize_queue(Unit *u) {
58d83430 1535 assert(u);
ecedd90f 1536
91a6073e 1537 if (u->in_cgroup_realize_queue)
4ad49000 1538 return;
8e274523 1539
91a6073e
LP
1540 LIST_PREPEND(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
1541 u->in_cgroup_realize_queue = true;
4ad49000 1542}
8c6db833 1543
91a6073e 1544unsigned manager_dispatch_cgroup_realize_queue(Manager *m) {
db785129 1545 ManagerState state;
4ad49000 1546 unsigned n = 0;
db785129 1547 Unit *i;
6414b7c9 1548 int r;
ecedd90f 1549
91a6073e
LP
1550 assert(m);
1551
db785129
LP
1552 state = manager_state(m);
1553
91a6073e
LP
1554 while ((i = m->cgroup_realize_queue)) {
1555 assert(i->in_cgroup_realize_queue);
ecedd90f 1556
db785129 1557 r = unit_realize_cgroup_now(i, state);
6414b7c9 1558 if (r < 0)
efdb0237 1559 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
0a1eb06d 1560
4ad49000
LP
1561 n++;
1562 }
ecedd90f 1563
4ad49000 1564 return n;
8e274523
LP
1565}
1566
91a6073e 1567static void unit_add_siblings_to_cgroup_realize_queue(Unit *u) {
4ad49000 1568 Unit *slice;
ca949c9d 1569
4ad49000
LP
1570 /* This adds the siblings of the specified unit and the
1571 * siblings of all parent units to the cgroup queue. (But
1572 * neither the specified unit itself nor the parents.) */
1573
1574 while ((slice = UNIT_DEREF(u->slice))) {
1575 Iterator i;
1576 Unit *m;
eef85c4a 1577 void *v;
8f53a7b8 1578
eef85c4a 1579 HASHMAP_FOREACH_KEY(v, m, u->dependencies[UNIT_BEFORE], i) {
4ad49000
LP
1580 if (m == u)
1581 continue;
8e274523 1582
6414b7c9
DS
1583 /* Skip units that have a dependency on the slice
1584 * but aren't actually in it. */
4ad49000 1585 if (UNIT_DEREF(m->slice) != slice)
50159e6a 1586 continue;
8e274523 1587
6414b7c9
DS
1588 /* No point in doing cgroup application for units
1589 * without active processes. */
1590 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
1591 continue;
1592
1593 /* If the unit doesn't need any new controllers
1594 * and has current ones realized, it doesn't need
1595 * any changes. */
906c06f6
DM
1596 if (unit_has_mask_realized(m,
1597 unit_get_target_mask(m),
1598 unit_get_enable_mask(m),
1599 unit_get_needs_bpf(m)))
6414b7c9
DS
1600 continue;
1601
91a6073e 1602 unit_add_to_cgroup_realize_queue(m);
50159e6a
LP
1603 }
1604
4ad49000 1605 u = slice;
8e274523 1606 }
4ad49000
LP
1607}
1608
0a1eb06d 1609int unit_realize_cgroup(Unit *u) {
4ad49000
LP
1610 assert(u);
1611
35b7ff80 1612 if (!UNIT_HAS_CGROUP_CONTEXT(u))
0a1eb06d 1613 return 0;
8e274523 1614
4ad49000
LP
1615 /* So, here's the deal: when realizing the cgroups for this
1616 * unit, we need to first create all parents, but there's more
1617 * actually: for the weight-based controllers we also need to
1618 * make sure that all our siblings (i.e. units that are in the
73e231ab 1619 * same slice as we are) have cgroups, too. Otherwise, things
4ad49000
LP
1620 * would become very uneven as each of their processes would
1621 * get as much resources as all our group together. This call
1622 * will synchronously create the parent cgroups, but will
1623 * defer work on the siblings to the next event loop
1624 * iteration. */
ca949c9d 1625
4ad49000 1626 /* Add all sibling slices to the cgroup queue. */
91a6073e 1627 unit_add_siblings_to_cgroup_realize_queue(u);
4ad49000 1628
6414b7c9 1629 /* And realize this one now (and apply the values) */
db785129 1630 return unit_realize_cgroup_now(u, manager_state(u->manager));
8e274523
LP
1631}
1632
efdb0237
LP
1633void unit_release_cgroup(Unit *u) {
1634 assert(u);
1635
1636 /* Forgets all cgroup details for this cgroup */
1637
1638 if (u->cgroup_path) {
1639 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1640 u->cgroup_path = mfree(u->cgroup_path);
1641 }
1642
1643 if (u->cgroup_inotify_wd >= 0) {
1644 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1645 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1646
1647 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1648 u->cgroup_inotify_wd = -1;
1649 }
1650}
1651
1652void unit_prune_cgroup(Unit *u) {
8e274523 1653 int r;
efdb0237 1654 bool is_root_slice;
8e274523 1655
4ad49000 1656 assert(u);
8e274523 1657
efdb0237
LP
1658 /* Removes the cgroup, if empty and possible, and stops watching it. */
1659
4ad49000
LP
1660 if (!u->cgroup_path)
1661 return;
8e274523 1662
fe700f46
LP
1663 (void) unit_get_cpu_usage(u, NULL); /* Cache the last CPU usage value before we destroy the cgroup */
1664
efdb0237
LP
1665 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1666
1667 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
dab5bf85 1668 if (r < 0) {
f29ff115 1669 log_unit_debug_errno(u, r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
dab5bf85
RL
1670 return;
1671 }
8e274523 1672
efdb0237
LP
1673 if (is_root_slice)
1674 return;
1675
1676 unit_release_cgroup(u);
0a1eb06d 1677
4ad49000 1678 u->cgroup_realized = false;
bc432dc7 1679 u->cgroup_realized_mask = 0;
ccf78df1 1680 u->cgroup_enabled_mask = 0;
8e274523
LP
1681}
1682
efdb0237 1683int unit_search_main_pid(Unit *u, pid_t *ret) {
4ad49000
LP
1684 _cleanup_fclose_ FILE *f = NULL;
1685 pid_t pid = 0, npid, mypid;
efdb0237 1686 int r;
4ad49000
LP
1687
1688 assert(u);
efdb0237 1689 assert(ret);
4ad49000
LP
1690
1691 if (!u->cgroup_path)
efdb0237 1692 return -ENXIO;
4ad49000 1693
efdb0237
LP
1694 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1695 if (r < 0)
1696 return r;
4ad49000 1697
df0ff127 1698 mypid = getpid_cached();
4ad49000
LP
1699 while (cg_read_pid(f, &npid) > 0) {
1700 pid_t ppid;
1701
1702 if (npid == pid)
1703 continue;
8e274523 1704
4ad49000 1705 /* Ignore processes that aren't our kids */
6bc73acb 1706 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
4ad49000 1707 continue;
8e274523 1708
efdb0237 1709 if (pid != 0)
4ad49000
LP
1710 /* Dang, there's more than one daemonized PID
1711 in this group, so we don't know what process
1712 is the main process. */
efdb0237
LP
1713
1714 return -ENODATA;
8e274523 1715
4ad49000 1716 pid = npid;
8e274523
LP
1717 }
1718
efdb0237
LP
1719 *ret = pid;
1720 return 0;
1721}
1722
1723static int unit_watch_pids_in_path(Unit *u, const char *path) {
b3c5bad3 1724 _cleanup_closedir_ DIR *d = NULL;
efdb0237
LP
1725 _cleanup_fclose_ FILE *f = NULL;
1726 int ret = 0, r;
1727
1728 assert(u);
1729 assert(path);
1730
1731 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1732 if (r < 0)
1733 ret = r;
1734 else {
1735 pid_t pid;
1736
1737 while ((r = cg_read_pid(f, &pid)) > 0) {
1738 r = unit_watch_pid(u, pid);
1739 if (r < 0 && ret >= 0)
1740 ret = r;
1741 }
1742
1743 if (r < 0 && ret >= 0)
1744 ret = r;
1745 }
1746
1747 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1748 if (r < 0) {
1749 if (ret >= 0)
1750 ret = r;
1751 } else {
1752 char *fn;
1753
1754 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1755 _cleanup_free_ char *p = NULL;
1756
605405c6 1757 p = strjoin(path, "/", fn);
efdb0237
LP
1758 free(fn);
1759
1760 if (!p)
1761 return -ENOMEM;
1762
1763 r = unit_watch_pids_in_path(u, p);
1764 if (r < 0 && ret >= 0)
1765 ret = r;
1766 }
1767
1768 if (r < 0 && ret >= 0)
1769 ret = r;
1770 }
1771
1772 return ret;
1773}
1774
1775int unit_watch_all_pids(Unit *u) {
b4cccbc1
LP
1776 int r;
1777
efdb0237
LP
1778 assert(u);
1779
1780 /* Adds all PIDs from our cgroup to the set of PIDs we
1781 * watch. This is a fallback logic for cases where we do not
1782 * get reliable cgroup empty notifications: we try to use
1783 * SIGCHLD as replacement. */
1784
1785 if (!u->cgroup_path)
1786 return -ENOENT;
1787
c22800e4 1788 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1789 if (r < 0)
1790 return r;
1791 if (r > 0) /* On unified we can use proper notifications */
efdb0237
LP
1792 return 0;
1793
1794 return unit_watch_pids_in_path(u, u->cgroup_path);
1795}
1796
09e24654
LP
1797static int on_cgroup_empty_event(sd_event_source *s, void *userdata) {
1798 Manager *m = userdata;
1799 Unit *u;
efdb0237
LP
1800 int r;
1801
09e24654
LP
1802 assert(s);
1803 assert(m);
efdb0237 1804
09e24654
LP
1805 u = m->cgroup_empty_queue;
1806 if (!u)
efdb0237
LP
1807 return 0;
1808
09e24654
LP
1809 assert(u->in_cgroup_empty_queue);
1810 u->in_cgroup_empty_queue = false;
1811 LIST_REMOVE(cgroup_empty_queue, m->cgroup_empty_queue, u);
1812
1813 if (m->cgroup_empty_queue) {
1814 /* More stuff queued, let's make sure we remain enabled */
1815 r = sd_event_source_set_enabled(s, SD_EVENT_ONESHOT);
1816 if (r < 0)
1817 log_debug_errno(r, "Failed to reenable cgroup empty event source: %m");
1818 }
efdb0237
LP
1819
1820 unit_add_to_gc_queue(u);
1821
1822 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1823 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1824
1825 return 0;
1826}
1827
09e24654
LP
1828void unit_add_to_cgroup_empty_queue(Unit *u) {
1829 int r;
1830
1831 assert(u);
1832
1833 /* Note that there are four different ways how cgroup empty events reach us:
1834 *
1835 * 1. On the unified hierarchy we get an inotify event on the cgroup
1836 *
1837 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
1838 *
1839 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
1840 *
1841 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
1842 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
1843 *
1844 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
1845 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
1846 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
1847 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
1848 * case for scope units). */
1849
1850 if (u->in_cgroup_empty_queue)
1851 return;
1852
1853 /* Let's verify that the cgroup is really empty */
1854 if (!u->cgroup_path)
1855 return;
1856 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1857 if (r < 0) {
1858 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
1859 return;
1860 }
1861 if (r == 0)
1862 return;
1863
1864 LIST_PREPEND(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
1865 u->in_cgroup_empty_queue = true;
1866
1867 /* Trigger the defer event */
1868 r = sd_event_source_set_enabled(u->manager->cgroup_empty_event_source, SD_EVENT_ONESHOT);
1869 if (r < 0)
1870 log_debug_errno(r, "Failed to enable cgroup empty event source: %m");
1871}
1872
efdb0237
LP
1873static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1874 Manager *m = userdata;
1875
1876 assert(s);
1877 assert(fd >= 0);
1878 assert(m);
1879
1880 for (;;) {
1881 union inotify_event_buffer buffer;
1882 struct inotify_event *e;
1883 ssize_t l;
1884
1885 l = read(fd, &buffer, sizeof(buffer));
1886 if (l < 0) {
47249640 1887 if (IN_SET(errno, EINTR, EAGAIN))
efdb0237
LP
1888 return 0;
1889
1890 return log_error_errno(errno, "Failed to read control group inotify events: %m");
1891 }
1892
1893 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1894 Unit *u;
1895
1896 if (e->wd < 0)
1897 /* Queue overflow has no watch descriptor */
1898 continue;
1899
1900 if (e->mask & IN_IGNORED)
1901 /* The watch was just removed */
1902 continue;
1903
1904 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1905 if (!u) /* Not that inotify might deliver
1906 * events for a watch even after it
1907 * was removed, because it was queued
1908 * before the removal. Let's ignore
1909 * this here safely. */
1910 continue;
1911
09e24654 1912 unit_add_to_cgroup_empty_queue(u);
efdb0237
LP
1913 }
1914 }
8e274523
LP
1915}
1916
8e274523 1917int manager_setup_cgroup(Manager *m) {
9444b1f2 1918 _cleanup_free_ char *path = NULL;
10bd3e2e 1919 const char *scope_path;
efdb0237 1920 CGroupController c;
b4cccbc1 1921 int r, all_unified;
efdb0237 1922 char *e;
8e274523
LP
1923
1924 assert(m);
1925
35d2e7ec 1926 /* 1. Determine hierarchy */
efdb0237 1927 m->cgroup_root = mfree(m->cgroup_root);
9444b1f2 1928 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
23bbb0de
MS
1929 if (r < 0)
1930 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
8e274523 1931
efdb0237
LP
1932 /* Chop off the init scope, if we are already located in it */
1933 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
0d8c31ff 1934
efdb0237
LP
1935 /* LEGACY: Also chop off the system slice if we are in
1936 * it. This is to support live upgrades from older systemd
1937 * versions where PID 1 was moved there. Also see
1938 * cg_get_root_path(). */
463d0d15 1939 if (!e && MANAGER_IS_SYSTEM(m)) {
9444b1f2 1940 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
15c60e99 1941 if (!e)
efdb0237 1942 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
0baf24dd 1943 }
efdb0237
LP
1944 if (e)
1945 *e = 0;
7ccfb64a 1946
7546145e
LP
1947 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
1948 * easily prepend it everywhere. */
1949 delete_trailing_chars(m->cgroup_root, "/");
8e274523 1950
35d2e7ec 1951 /* 2. Show data */
9444b1f2 1952 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
23bbb0de
MS
1953 if (r < 0)
1954 return log_error_errno(r, "Cannot find cgroup mount point: %m");
8e274523 1955
415fc41c
TH
1956 r = cg_unified_flush();
1957 if (r < 0)
1958 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
5da38d07 1959
b4cccbc1
LP
1960 all_unified = cg_all_unified();
1961 if (r < 0)
1962 return log_error_errno(r, "Couldn't determine whether we are in all unified mode: %m");
1963 if (r > 0)
efdb0237 1964 log_debug("Unified cgroup hierarchy is located at %s.", path);
b4cccbc1 1965 else {
c22800e4 1966 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1967 if (r < 0)
1968 return log_error_errno(r, "Failed to determine whether systemd's own controller is in unified mode: %m");
1969 if (r > 0)
1970 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path);
1971 else
1972 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY ". File system hierarchy is at %s.", path);
1973 }
efdb0237 1974
09e24654
LP
1975 /* 3. Allocate cgroup empty defer event source */
1976 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
1977 r = sd_event_add_defer(m->event, &m->cgroup_empty_event_source, on_cgroup_empty_event, m);
1978 if (r < 0)
1979 return log_error_errno(r, "Failed to create cgroup empty event source: %m");
1980
1981 r = sd_event_source_set_priority(m->cgroup_empty_event_source, SD_EVENT_PRIORITY_NORMAL-5);
1982 if (r < 0)
1983 return log_error_errno(r, "Failed to set priority of cgroup empty event source: %m");
1984
1985 r = sd_event_source_set_enabled(m->cgroup_empty_event_source, SD_EVENT_OFF);
1986 if (r < 0)
1987 return log_error_errno(r, "Failed to disable cgroup empty event source: %m");
1988
1989 (void) sd_event_source_set_description(m->cgroup_empty_event_source, "cgroup-empty");
1990
1991 /* 4. Install notifier inotify object, or agent */
10bd3e2e 1992 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0) {
c6c18be3 1993
09e24654 1994 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
efdb0237 1995
10bd3e2e
LP
1996 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1997 safe_close(m->cgroup_inotify_fd);
efdb0237 1998
10bd3e2e
LP
1999 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
2000 if (m->cgroup_inotify_fd < 0)
2001 return log_error_errno(errno, "Failed to create control group inotify object: %m");
efdb0237 2002
10bd3e2e
LP
2003 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
2004 if (r < 0)
2005 return log_error_errno(r, "Failed to watch control group inotify object: %m");
efdb0237 2006
10bd3e2e
LP
2007 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2008 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
09e24654 2009 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-4);
10bd3e2e
LP
2010 if (r < 0)
2011 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
efdb0237 2012
10bd3e2e 2013 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
efdb0237 2014
10bd3e2e 2015 } else if (MANAGER_IS_SYSTEM(m) && m->test_run_flags == 0) {
efdb0237 2016
10bd3e2e
LP
2017 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2018 * since it does not generate events when control groups with children run empty. */
8e274523 2019
10bd3e2e 2020 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
23bbb0de 2021 if (r < 0)
10bd3e2e
LP
2022 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
2023 else if (r > 0)
2024 log_debug("Installed release agent.");
2025 else if (r == 0)
2026 log_debug("Release agent already installed.");
2027 }
efdb0237 2028
09e24654 2029 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
10bd3e2e
LP
2030 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
2031 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2032 if (r < 0)
2033 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
c6c18be3 2034
09e24654 2035 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
10bd3e2e
LP
2036 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
2037 if (r < 0)
2038 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
0d8c31ff 2039
09e24654 2040 /* 6. And pin it, so that it cannot be unmounted */
10bd3e2e
LP
2041 safe_close(m->pin_cgroupfs_fd);
2042 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
2043 if (m->pin_cgroupfs_fd < 0)
2044 return log_error_errno(errno, "Failed to open pin file: %m");
2045
09e24654 2046 /* 7. Always enable hierarchical support if it exists... */
10bd3e2e
LP
2047 if (!all_unified && m->test_run_flags == 0)
2048 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
c6c18be3 2049
09e24654 2050 /* 8. Figure out which controllers are supported, and log about it */
efdb0237
LP
2051 r = cg_mask_supported(&m->cgroup_supported);
2052 if (r < 0)
2053 return log_error_errno(r, "Failed to determine supported controllers: %m");
efdb0237 2054 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
eee0a1e4 2055 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & CGROUP_CONTROLLER_TO_MASK(c)));
9156e799 2056
a32360f1 2057 return 0;
8e274523
LP
2058}
2059
c6c18be3 2060void manager_shutdown_cgroup(Manager *m, bool delete) {
8e274523
LP
2061 assert(m);
2062
9444b1f2
LP
2063 /* We can't really delete the group, since we are in it. But
2064 * let's trim it. */
2065 if (delete && m->cgroup_root)
efdb0237
LP
2066 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
2067
09e24654
LP
2068 m->cgroup_empty_event_source = sd_event_source_unref(m->cgroup_empty_event_source);
2069
efdb0237
LP
2070 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
2071
2072 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
2073 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
8e274523 2074
03e334a1 2075 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
c6c18be3 2076
efdb0237 2077 m->cgroup_root = mfree(m->cgroup_root);
8e274523
LP
2078}
2079
4ad49000 2080Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
acb14d31 2081 char *p;
4ad49000 2082 Unit *u;
acb14d31
LP
2083
2084 assert(m);
2085 assert(cgroup);
acb14d31 2086
4ad49000
LP
2087 u = hashmap_get(m->cgroup_unit, cgroup);
2088 if (u)
2089 return u;
acb14d31 2090
8e70580b 2091 p = strdupa(cgroup);
acb14d31
LP
2092 for (;;) {
2093 char *e;
2094
2095 e = strrchr(p, '/');
efdb0237
LP
2096 if (!e || e == p)
2097 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
acb14d31
LP
2098
2099 *e = 0;
2100
4ad49000
LP
2101 u = hashmap_get(m->cgroup_unit, p);
2102 if (u)
2103 return u;
acb14d31
LP
2104 }
2105}
2106
b3ac818b 2107Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
4ad49000 2108 _cleanup_free_ char *cgroup = NULL;
acb14d31 2109 int r;
8e274523 2110
8c47c732
LP
2111 assert(m);
2112
b3ac818b
LP
2113 if (pid <= 0)
2114 return NULL;
2115
2116 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
2117 if (r < 0)
2118 return NULL;
2119
2120 return manager_get_unit_by_cgroup(m, cgroup);
2121}
2122
2123Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
2124 Unit *u;
2125
2126 assert(m);
2127
efdb0237 2128 if (pid <= 0)
8c47c732
LP
2129 return NULL;
2130
efdb0237
LP
2131 if (pid == 1)
2132 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
2133
fea72cc0 2134 u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
5fe8876b
LP
2135 if (u)
2136 return u;
2137
fea72cc0 2138 u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
5fe8876b
LP
2139 if (u)
2140 return u;
2141
b3ac818b 2142 return manager_get_unit_by_pid_cgroup(m, pid);
6dde1f33 2143}
4fbf50b3 2144
4ad49000
LP
2145int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
2146 Unit *u;
4fbf50b3 2147
4ad49000
LP
2148 assert(m);
2149 assert(cgroup);
4fbf50b3 2150
09e24654
LP
2151 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2152 * or from the --system instance */
2153
d8fdc620
LP
2154 log_debug("Got cgroup empty notification for: %s", cgroup);
2155
4ad49000 2156 u = manager_get_unit_by_cgroup(m, cgroup);
5ad096b3
LP
2157 if (!u)
2158 return 0;
b56c28c3 2159
09e24654
LP
2160 unit_add_to_cgroup_empty_queue(u);
2161 return 1;
5ad096b3
LP
2162}
2163
2164int unit_get_memory_current(Unit *u, uint64_t *ret) {
2165 _cleanup_free_ char *v = NULL;
2166 int r;
2167
2168 assert(u);
2169 assert(ret);
2170
2e4025c0 2171 if (!UNIT_CGROUP_BOOL(u, memory_accounting))
cf3b4be1
LP
2172 return -ENODATA;
2173
5ad096b3
LP
2174 if (!u->cgroup_path)
2175 return -ENODATA;
2176
efdb0237 2177 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
5ad096b3
LP
2178 return -ENODATA;
2179
b4cccbc1
LP
2180 r = cg_all_unified();
2181 if (r < 0)
2182 return r;
2183 if (r > 0)
efdb0237 2184 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
b4cccbc1
LP
2185 else
2186 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
5ad096b3
LP
2187 if (r == -ENOENT)
2188 return -ENODATA;
2189 if (r < 0)
2190 return r;
2191
2192 return safe_atou64(v, ret);
2193}
2194
03a7b521
LP
2195int unit_get_tasks_current(Unit *u, uint64_t *ret) {
2196 _cleanup_free_ char *v = NULL;
2197 int r;
2198
2199 assert(u);
2200 assert(ret);
2201
2e4025c0 2202 if (!UNIT_CGROUP_BOOL(u, tasks_accounting))
cf3b4be1
LP
2203 return -ENODATA;
2204
03a7b521
LP
2205 if (!u->cgroup_path)
2206 return -ENODATA;
2207
2208 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
2209 return -ENODATA;
2210
2211 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
2212 if (r == -ENOENT)
2213 return -ENODATA;
2214 if (r < 0)
2215 return r;
2216
2217 return safe_atou64(v, ret);
2218}
2219
5ad096b3
LP
2220static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
2221 _cleanup_free_ char *v = NULL;
2222 uint64_t ns;
2223 int r;
2224
2225 assert(u);
2226 assert(ret);
2227
2228 if (!u->cgroup_path)
2229 return -ENODATA;
2230
b4cccbc1
LP
2231 r = cg_all_unified();
2232 if (r < 0)
2233 return r;
2234 if (r > 0) {
66ebf6c0
TH
2235 const char *keys[] = { "usage_usec", NULL };
2236 _cleanup_free_ char *val = NULL;
2237 uint64_t us;
5ad096b3 2238
66ebf6c0
TH
2239 if ((u->cgroup_realized_mask & CGROUP_MASK_CPU) == 0)
2240 return -ENODATA;
5ad096b3 2241
66ebf6c0
TH
2242 r = cg_get_keyed_attribute("cpu", u->cgroup_path, "cpu.stat", keys, &val);
2243 if (r < 0)
2244 return r;
2245
2246 r = safe_atou64(val, &us);
2247 if (r < 0)
2248 return r;
2249
2250 ns = us * NSEC_PER_USEC;
2251 } else {
2252 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
2253 return -ENODATA;
2254
2255 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
2256 if (r == -ENOENT)
2257 return -ENODATA;
2258 if (r < 0)
2259 return r;
2260
2261 r = safe_atou64(v, &ns);
2262 if (r < 0)
2263 return r;
2264 }
5ad096b3
LP
2265
2266 *ret = ns;
2267 return 0;
2268}
2269
2270int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
2271 nsec_t ns;
2272 int r;
2273
fe700f46
LP
2274 assert(u);
2275
2276 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2277 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2278 * call this function with a NULL return value. */
2279
2e4025c0 2280 if (!UNIT_CGROUP_BOOL(u, cpu_accounting))
cf3b4be1
LP
2281 return -ENODATA;
2282
5ad096b3 2283 r = unit_get_cpu_usage_raw(u, &ns);
fe700f46
LP
2284 if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
2285 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2286 * cached value. */
2287
2288 if (ret)
2289 *ret = u->cpu_usage_last;
2290 return 0;
2291 }
5ad096b3
LP
2292 if (r < 0)
2293 return r;
2294
66ebf6c0
TH
2295 if (ns > u->cpu_usage_base)
2296 ns -= u->cpu_usage_base;
5ad096b3
LP
2297 else
2298 ns = 0;
2299
fe700f46
LP
2300 u->cpu_usage_last = ns;
2301 if (ret)
2302 *ret = ns;
2303
5ad096b3
LP
2304 return 0;
2305}
2306
906c06f6
DM
2307int unit_get_ip_accounting(
2308 Unit *u,
2309 CGroupIPAccountingMetric metric,
2310 uint64_t *ret) {
2311
6b659ed8 2312 uint64_t value;
906c06f6
DM
2313 int fd, r;
2314
2315 assert(u);
2316 assert(metric >= 0);
2317 assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
2318 assert(ret);
2319
cf3b4be1
LP
2320 /* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
2321 * inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
2322 * anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
2323 * filters. */
2324 if (u->type == UNIT_SLICE)
2325 return -ENODATA;
2326
2e4025c0 2327 if (!UNIT_CGROUP_BOOL(u, ip_accounting))
cf3b4be1
LP
2328 return -ENODATA;
2329
906c06f6
DM
2330 fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
2331 u->ip_accounting_ingress_map_fd :
2332 u->ip_accounting_egress_map_fd;
2333
2334 if (fd < 0)
2335 return -ENODATA;
2336
2337 if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
6b659ed8 2338 r = bpf_firewall_read_accounting(fd, &value, NULL);
906c06f6 2339 else
6b659ed8
LP
2340 r = bpf_firewall_read_accounting(fd, NULL, &value);
2341 if (r < 0)
2342 return r;
2343
2344 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2345 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2346 * ip_accounting_extra[] field, and add them in here transparently. */
2347
2348 *ret = value + u->ip_accounting_extra[metric];
906c06f6
DM
2349
2350 return r;
2351}
2352
2353int unit_reset_cpu_accounting(Unit *u) {
5ad096b3
LP
2354 nsec_t ns;
2355 int r;
2356
2357 assert(u);
2358
fe700f46
LP
2359 u->cpu_usage_last = NSEC_INFINITY;
2360
5ad096b3
LP
2361 r = unit_get_cpu_usage_raw(u, &ns);
2362 if (r < 0) {
66ebf6c0 2363 u->cpu_usage_base = 0;
5ad096b3 2364 return r;
b56c28c3 2365 }
2633eb83 2366
66ebf6c0 2367 u->cpu_usage_base = ns;
4ad49000 2368 return 0;
4fbf50b3
LP
2369}
2370
906c06f6
DM
2371int unit_reset_ip_accounting(Unit *u) {
2372 int r = 0, q = 0;
2373
2374 assert(u);
2375
2376 if (u->ip_accounting_ingress_map_fd >= 0)
2377 r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
2378
2379 if (u->ip_accounting_egress_map_fd >= 0)
2380 q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
2381
6b659ed8
LP
2382 zero(u->ip_accounting_extra);
2383
906c06f6
DM
2384 return r < 0 ? r : q;
2385}
2386
e7ab4d1a
LP
2387void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
2388 assert(u);
2389
2390 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2391 return;
2392
2393 if (m == 0)
2394 return;
2395
538b4852
TH
2396 /* always invalidate compat pairs together */
2397 if (m & (CGROUP_MASK_IO | CGROUP_MASK_BLKIO))
2398 m |= CGROUP_MASK_IO | CGROUP_MASK_BLKIO;
2399
7cce4fb7
LP
2400 if (m & (CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT))
2401 m |= CGROUP_MASK_CPU | CGROUP_MASK_CPUACCT;
2402
e7ab4d1a
LP
2403 if ((u->cgroup_realized_mask & m) == 0)
2404 return;
2405
2406 u->cgroup_realized_mask &= ~m;
91a6073e 2407 unit_add_to_cgroup_realize_queue(u);
e7ab4d1a
LP
2408}
2409
906c06f6
DM
2410void unit_invalidate_cgroup_bpf(Unit *u) {
2411 assert(u);
2412
2413 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2414 return;
2415
2416 if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED)
2417 return;
2418
2419 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
91a6073e 2420 unit_add_to_cgroup_realize_queue(u);
906c06f6
DM
2421
2422 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2423 * list of our children includes our own. */
2424 if (u->type == UNIT_SLICE) {
2425 Unit *member;
2426 Iterator i;
eef85c4a 2427 void *v;
906c06f6 2428
eef85c4a 2429 HASHMAP_FOREACH_KEY(v, member, u->dependencies[UNIT_BEFORE], i) {
906c06f6
DM
2430 if (member == u)
2431 continue;
2432
2433 if (UNIT_DEREF(member->slice) != u)
2434 continue;
2435
2436 unit_invalidate_cgroup_bpf(member);
2437 }
2438 }
2439}
2440
e7ab4d1a
LP
2441void manager_invalidate_startup_units(Manager *m) {
2442 Iterator i;
2443 Unit *u;
2444
2445 assert(m);
2446
2447 SET_FOREACH(u, m->startup_units, i)
13c31542 2448 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_IO|CGROUP_MASK_BLKIO);
e7ab4d1a
LP
2449}
2450
4ad49000
LP
2451static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
2452 [CGROUP_AUTO] = "auto",
2453 [CGROUP_CLOSED] = "closed",
2454 [CGROUP_STRICT] = "strict",
2455};
4fbf50b3 2456
4ad49000 2457DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);