2 This file is part of systemd.
4 Copyright 2013 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
23 #include "alloc-util.h"
24 #include "bpf-firewall.h"
25 #include "cgroup-util.h"
30 #include "parse-util.h"
31 #include "path-util.h"
32 #include "process-util.h"
34 #include "stdio-util.h"
35 #include "string-table.h"
36 #include "string-util.h"
38 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
40 static void cgroup_compat_warn(void) {
41 static bool cgroup_compat_warned
= false;
43 if (cgroup_compat_warned
)
46 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. See cgroup-compat debug messages for details.");
47 cgroup_compat_warned
= true;
50 #define log_cgroup_compat(unit, fmt, ...) do { \
51 cgroup_compat_warn(); \
52 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
55 void cgroup_context_init(CGroupContext
*c
) {
58 /* Initialize everything to the kernel defaults, assuming the
59 * structure is preinitialized to 0 */
61 c
->cpu_weight
= CGROUP_WEIGHT_INVALID
;
62 c
->startup_cpu_weight
= CGROUP_WEIGHT_INVALID
;
63 c
->cpu_quota_per_sec_usec
= USEC_INFINITY
;
65 c
->cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
66 c
->startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
68 c
->memory_high
= CGROUP_LIMIT_MAX
;
69 c
->memory_max
= CGROUP_LIMIT_MAX
;
70 c
->memory_swap_max
= CGROUP_LIMIT_MAX
;
72 c
->memory_limit
= CGROUP_LIMIT_MAX
;
74 c
->io_weight
= CGROUP_WEIGHT_INVALID
;
75 c
->startup_io_weight
= CGROUP_WEIGHT_INVALID
;
77 c
->blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
78 c
->startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
80 c
->tasks_max
= (uint64_t) -1;
83 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
87 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
92 void cgroup_context_free_io_device_weight(CGroupContext
*c
, CGroupIODeviceWeight
*w
) {
96 LIST_REMOVE(device_weights
, c
->io_device_weights
, w
);
101 void cgroup_context_free_io_device_limit(CGroupContext
*c
, CGroupIODeviceLimit
*l
) {
105 LIST_REMOVE(device_limits
, c
->io_device_limits
, l
);
110 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
114 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
119 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
123 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
128 void cgroup_context_done(CGroupContext
*c
) {
131 while (c
->io_device_weights
)
132 cgroup_context_free_io_device_weight(c
, c
->io_device_weights
);
134 while (c
->io_device_limits
)
135 cgroup_context_free_io_device_limit(c
, c
->io_device_limits
);
137 while (c
->blockio_device_weights
)
138 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
140 while (c
->blockio_device_bandwidths
)
141 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
143 while (c
->device_allow
)
144 cgroup_context_free_device_allow(c
, c
->device_allow
);
146 c
->ip_address_allow
= ip_address_access_free_all(c
->ip_address_allow
);
147 c
->ip_address_deny
= ip_address_access_free_all(c
->ip_address_deny
);
150 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
151 CGroupIODeviceLimit
*il
;
152 CGroupIODeviceWeight
*iw
;
153 CGroupBlockIODeviceBandwidth
*b
;
154 CGroupBlockIODeviceWeight
*w
;
155 CGroupDeviceAllow
*a
;
156 IPAddressAccessItem
*iaai
;
157 char u
[FORMAT_TIMESPAN_MAX
];
162 prefix
= strempty(prefix
);
165 "%sCPUAccounting=%s\n"
166 "%sIOAccounting=%s\n"
167 "%sBlockIOAccounting=%s\n"
168 "%sMemoryAccounting=%s\n"
169 "%sTasksAccounting=%s\n"
170 "%sIPAccounting=%s\n"
171 "%sCPUWeight=%" PRIu64
"\n"
172 "%sStartupCPUWeight=%" PRIu64
"\n"
173 "%sCPUShares=%" PRIu64
"\n"
174 "%sStartupCPUShares=%" PRIu64
"\n"
175 "%sCPUQuotaPerSecSec=%s\n"
176 "%sIOWeight=%" PRIu64
"\n"
177 "%sStartupIOWeight=%" PRIu64
"\n"
178 "%sBlockIOWeight=%" PRIu64
"\n"
179 "%sStartupBlockIOWeight=%" PRIu64
"\n"
180 "%sMemoryLow=%" PRIu64
"\n"
181 "%sMemoryHigh=%" PRIu64
"\n"
182 "%sMemoryMax=%" PRIu64
"\n"
183 "%sMemorySwapMax=%" PRIu64
"\n"
184 "%sMemoryLimit=%" PRIu64
"\n"
185 "%sTasksMax=%" PRIu64
"\n"
186 "%sDevicePolicy=%s\n"
188 prefix
, yes_no(c
->cpu_accounting
),
189 prefix
, yes_no(c
->io_accounting
),
190 prefix
, yes_no(c
->blockio_accounting
),
191 prefix
, yes_no(c
->memory_accounting
),
192 prefix
, yes_no(c
->tasks_accounting
),
193 prefix
, yes_no(c
->ip_accounting
),
194 prefix
, c
->cpu_weight
,
195 prefix
, c
->startup_cpu_weight
,
196 prefix
, c
->cpu_shares
,
197 prefix
, c
->startup_cpu_shares
,
198 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
199 prefix
, c
->io_weight
,
200 prefix
, c
->startup_io_weight
,
201 prefix
, c
->blockio_weight
,
202 prefix
, c
->startup_blockio_weight
,
203 prefix
, c
->memory_low
,
204 prefix
, c
->memory_high
,
205 prefix
, c
->memory_max
,
206 prefix
, c
->memory_swap_max
,
207 prefix
, c
->memory_limit
,
208 prefix
, c
->tasks_max
,
209 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
210 prefix
, yes_no(c
->delegate
));
212 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
214 "%sDeviceAllow=%s %s%s%s\n",
217 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
219 LIST_FOREACH(device_weights
, iw
, c
->io_device_weights
)
221 "%sIODeviceWeight=%s %" PRIu64
,
226 LIST_FOREACH(device_limits
, il
, c
->io_device_limits
) {
227 char buf
[FORMAT_BYTES_MAX
];
228 CGroupIOLimitType type
;
230 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
231 if (il
->limits
[type
] != cgroup_io_limit_defaults
[type
])
235 cgroup_io_limit_type_to_string(type
),
237 format_bytes(buf
, sizeof(buf
), il
->limits
[type
]));
240 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
242 "%sBlockIODeviceWeight=%s %" PRIu64
,
247 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
248 char buf
[FORMAT_BYTES_MAX
];
250 if (b
->rbps
!= CGROUP_LIMIT_MAX
)
252 "%sBlockIOReadBandwidth=%s %s\n",
255 format_bytes(buf
, sizeof(buf
), b
->rbps
));
256 if (b
->wbps
!= CGROUP_LIMIT_MAX
)
258 "%sBlockIOWriteBandwidth=%s %s\n",
261 format_bytes(buf
, sizeof(buf
), b
->wbps
));
264 LIST_FOREACH(items
, iaai
, c
->ip_address_allow
) {
265 _cleanup_free_
char *k
= NULL
;
267 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
268 fprintf(f
, "%sIPAddressAllow=%s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
271 LIST_FOREACH(items
, iaai
, c
->ip_address_deny
) {
272 _cleanup_free_
char *k
= NULL
;
274 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
275 fprintf(f
, "%sIPAddressDeny=%s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
279 static int lookup_block_device(const char *p
, dev_t
*dev
) {
288 return log_warning_errno(errno
, "Couldn't stat device %s: %m", p
);
290 if (S_ISBLK(st
.st_mode
))
292 else if (major(st
.st_dev
) != 0) {
293 /* If this is not a device node then find the block
294 * device this file is stored on */
297 /* If this is a partition, try to get the originating
299 block_get_whole_disk(*dev
, dev
);
301 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p
);
308 static int whitelist_device(const char *path
, const char *node
, const char *acc
) {
309 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
311 bool ignore_notfound
;
317 if (node
[0] == '-') {
318 /* Non-existent paths starting with "-" must be silently ignored */
320 ignore_notfound
= true;
322 ignore_notfound
= false;
324 if (stat(node
, &st
) < 0) {
325 if (errno
== ENOENT
&& ignore_notfound
)
328 return log_warning_errno(errno
, "Couldn't stat device %s: %m", node
);
331 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
332 log_warning("%s is not a device.", node
);
338 S_ISCHR(st
.st_mode
) ? 'c' : 'b',
339 major(st
.st_rdev
), minor(st
.st_rdev
),
342 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
344 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
345 "Failed to set devices.allow on %s: %m", path
);
350 static int whitelist_major(const char *path
, const char *name
, char type
, const char *acc
) {
351 _cleanup_fclose_
FILE *f
= NULL
;
358 assert(IN_SET(type
, 'b', 'c'));
360 f
= fopen("/proc/devices", "re");
362 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
364 FOREACH_LINE(line
, f
, goto fail
) {
365 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4], *p
, *w
;
370 if (type
== 'c' && streq(line
, "Character devices:")) {
375 if (type
== 'b' && streq(line
, "Block devices:")) {
390 w
= strpbrk(p
, WHITESPACE
);
395 r
= safe_atou(p
, &maj
);
402 w
+= strspn(w
, WHITESPACE
);
404 if (fnmatch(name
, w
, 0) != 0)
413 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
415 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
416 "Failed to set devices.allow on %s: %m", path
);
422 return log_warning_errno(errno
, "Failed to read /proc/devices: %m");
425 static bool cgroup_context_has_cpu_weight(CGroupContext
*c
) {
426 return c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
||
427 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
;
430 static bool cgroup_context_has_cpu_shares(CGroupContext
*c
) {
431 return c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
432 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
;
435 static uint64_t cgroup_context_cpu_weight(CGroupContext
*c
, ManagerState state
) {
436 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
437 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
)
438 return c
->startup_cpu_weight
;
439 else if (c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
)
440 return c
->cpu_weight
;
442 return CGROUP_WEIGHT_DEFAULT
;
445 static uint64_t cgroup_context_cpu_shares(CGroupContext
*c
, ManagerState state
) {
446 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
447 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
448 return c
->startup_cpu_shares
;
449 else if (c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
450 return c
->cpu_shares
;
452 return CGROUP_CPU_SHARES_DEFAULT
;
455 static void cgroup_apply_unified_cpu_config(Unit
*u
, uint64_t weight
, uint64_t quota
) {
456 char buf
[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t
) + 1) * 2)];
459 xsprintf(buf
, "%" PRIu64
"\n", weight
);
460 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.weight", buf
);
462 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
463 "Failed to set cpu.weight: %m");
465 if (quota
!= USEC_INFINITY
)
466 xsprintf(buf
, USEC_FMT
" " USEC_FMT
"\n",
467 quota
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
, CGROUP_CPU_QUOTA_PERIOD_USEC
);
469 xsprintf(buf
, "max " USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
471 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.max", buf
);
474 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
475 "Failed to set cpu.max: %m");
478 static void cgroup_apply_legacy_cpu_config(Unit
*u
, uint64_t shares
, uint64_t quota
) {
479 char buf
[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t
)) + 1];
482 xsprintf(buf
, "%" PRIu64
"\n", shares
);
483 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.shares", buf
);
485 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
486 "Failed to set cpu.shares: %m");
488 xsprintf(buf
, USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
489 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.cfs_period_us", buf
);
491 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
492 "Failed to set cpu.cfs_period_us: %m");
494 if (quota
!= USEC_INFINITY
) {
495 xsprintf(buf
, USEC_FMT
"\n", quota
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
);
496 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.cfs_quota_us", buf
);
498 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.cfs_quota_us", "-1");
500 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
501 "Failed to set cpu.cfs_quota_us: %m");
504 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares
) {
505 return CLAMP(shares
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_CPU_SHARES_DEFAULT
,
506 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
509 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight
) {
510 return CLAMP(weight
* CGROUP_CPU_SHARES_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
511 CGROUP_CPU_SHARES_MIN
, CGROUP_CPU_SHARES_MAX
);
514 static bool cgroup_context_has_io_config(CGroupContext
*c
) {
515 return c
->io_accounting
||
516 c
->io_weight
!= CGROUP_WEIGHT_INVALID
||
517 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
518 c
->io_device_weights
||
522 static bool cgroup_context_has_blockio_config(CGroupContext
*c
) {
523 return c
->blockio_accounting
||
524 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
525 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
526 c
->blockio_device_weights
||
527 c
->blockio_device_bandwidths
;
530 static uint64_t cgroup_context_io_weight(CGroupContext
*c
, ManagerState state
) {
531 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
532 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
)
533 return c
->startup_io_weight
;
534 else if (c
->io_weight
!= CGROUP_WEIGHT_INVALID
)
537 return CGROUP_WEIGHT_DEFAULT
;
540 static uint64_t cgroup_context_blkio_weight(CGroupContext
*c
, ManagerState state
) {
541 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
542 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
543 return c
->startup_blockio_weight
;
544 else if (c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
545 return c
->blockio_weight
;
547 return CGROUP_BLKIO_WEIGHT_DEFAULT
;
550 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight
) {
551 return CLAMP(blkio_weight
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_BLKIO_WEIGHT_DEFAULT
,
552 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
555 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight
) {
556 return CLAMP(io_weight
* CGROUP_BLKIO_WEIGHT_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
557 CGROUP_BLKIO_WEIGHT_MIN
, CGROUP_BLKIO_WEIGHT_MAX
);
560 static void cgroup_apply_io_device_weight(Unit
*u
, const char *dev_path
, uint64_t io_weight
) {
561 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
565 r
= lookup_block_device(dev_path
, &dev
);
569 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), io_weight
);
570 r
= cg_set_attribute("io", u
->cgroup_path
, "io.weight", buf
);
572 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
573 "Failed to set io.weight: %m");
576 static void cgroup_apply_blkio_device_weight(Unit
*u
, const char *dev_path
, uint64_t blkio_weight
) {
577 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
581 r
= lookup_block_device(dev_path
, &dev
);
585 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), blkio_weight
);
586 r
= cg_set_attribute("blkio", u
->cgroup_path
, "blkio.weight_device", buf
);
588 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
589 "Failed to set blkio.weight_device: %m");
592 static unsigned cgroup_apply_io_device_limit(Unit
*u
, const char *dev_path
, uint64_t *limits
) {
593 char limit_bufs
[_CGROUP_IO_LIMIT_TYPE_MAX
][DECIMAL_STR_MAX(uint64_t)];
594 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
595 CGroupIOLimitType type
;
600 r
= lookup_block_device(dev_path
, &dev
);
604 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++) {
605 if (limits
[type
] != cgroup_io_limit_defaults
[type
]) {
606 xsprintf(limit_bufs
[type
], "%" PRIu64
, limits
[type
]);
609 xsprintf(limit_bufs
[type
], "%s", limits
[type
] == CGROUP_LIMIT_MAX
? "max" : "0");
613 xsprintf(buf
, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev
), minor(dev
),
614 limit_bufs
[CGROUP_IO_RBPS_MAX
], limit_bufs
[CGROUP_IO_WBPS_MAX
],
615 limit_bufs
[CGROUP_IO_RIOPS_MAX
], limit_bufs
[CGROUP_IO_WIOPS_MAX
]);
616 r
= cg_set_attribute("io", u
->cgroup_path
, "io.max", buf
);
618 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
619 "Failed to set io.max: %m");
623 static unsigned cgroup_apply_blkio_device_limit(Unit
*u
, const char *dev_path
, uint64_t rbps
, uint64_t wbps
) {
624 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
629 r
= lookup_block_device(dev_path
, &dev
);
633 if (rbps
!= CGROUP_LIMIT_MAX
)
635 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), rbps
);
636 r
= cg_set_attribute("blkio", u
->cgroup_path
, "blkio.throttle.read_bps_device", buf
);
638 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
639 "Failed to set blkio.throttle.read_bps_device: %m");
641 if (wbps
!= CGROUP_LIMIT_MAX
)
643 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), wbps
);
644 r
= cg_set_attribute("blkio", u
->cgroup_path
, "blkio.throttle.write_bps_device", buf
);
646 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
647 "Failed to set blkio.throttle.write_bps_device: %m");
652 static bool cgroup_context_has_unified_memory_config(CGroupContext
*c
) {
653 return c
->memory_low
> 0 || c
->memory_high
!= CGROUP_LIMIT_MAX
|| c
->memory_max
!= CGROUP_LIMIT_MAX
|| c
->memory_swap_max
!= CGROUP_LIMIT_MAX
;
656 static void cgroup_apply_unified_memory_limit(Unit
*u
, const char *file
, uint64_t v
) {
657 char buf
[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
660 if (v
!= CGROUP_LIMIT_MAX
)
661 xsprintf(buf
, "%" PRIu64
"\n", v
);
663 r
= cg_set_attribute("memory", u
->cgroup_path
, file
, buf
);
665 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
666 "Failed to set %s: %m", file
);
669 static void cgroup_apply_firewall(Unit
*u
, CGroupContext
*c
) {
672 if (u
->type
== UNIT_SLICE
) /* Skip this for slice units, they are inner cgroup nodes, and since bpf/cgroup is
673 * not recursive we don't ever touch the bpf on them */
676 r
= bpf_firewall_compile(u
);
680 (void) bpf_firewall_install(u
);
684 static void cgroup_context_apply(
686 CGroupMask apply_mask
,
688 ManagerState state
) {
697 c
= unit_get_cgroup_context(u
);
698 path
= u
->cgroup_path
;
703 /* Nothing to do? Exit early! */
704 if (apply_mask
== 0 && !apply_bpf
)
707 /* Some cgroup attributes are not supported on the root cgroup,
708 * hence silently ignore */
709 is_root
= isempty(path
) || path_equal(path
, "/");
711 /* Make sure we don't try to display messages with an empty path. */
714 /* We generally ignore errors caused by read-only mounted
715 * cgroup trees (assuming we are running in a container then),
716 * and missing cgroups, i.e. EROFS and ENOENT. */
718 if ((apply_mask
& CGROUP_MASK_CPU
) && !is_root
) {
719 bool has_weight
, has_shares
;
721 has_weight
= cgroup_context_has_cpu_weight(c
);
722 has_shares
= cgroup_context_has_cpu_shares(c
);
724 if (cg_all_unified() > 0) {
728 weight
= cgroup_context_cpu_weight(c
, state
);
729 else if (has_shares
) {
730 uint64_t shares
= cgroup_context_cpu_shares(c
, state
);
732 weight
= cgroup_cpu_shares_to_weight(shares
);
734 log_cgroup_compat(u
, "Applying [Startup]CpuShares %" PRIu64
" as [Startup]CpuWeight %" PRIu64
" on %s",
735 shares
, weight
, path
);
737 weight
= CGROUP_WEIGHT_DEFAULT
;
739 cgroup_apply_unified_cpu_config(u
, weight
, c
->cpu_quota_per_sec_usec
);
744 uint64_t weight
= cgroup_context_cpu_weight(c
, state
);
746 shares
= cgroup_cpu_weight_to_shares(weight
);
748 log_cgroup_compat(u
, "Applying [Startup]CpuWeight %" PRIu64
" as [Startup]CpuShares %" PRIu64
" on %s",
749 weight
, shares
, path
);
750 } else if (has_shares
)
751 shares
= cgroup_context_cpu_shares(c
, state
);
753 shares
= CGROUP_CPU_SHARES_DEFAULT
;
755 cgroup_apply_legacy_cpu_config(u
, shares
, c
->cpu_quota_per_sec_usec
);
759 if (apply_mask
& CGROUP_MASK_IO
) {
760 bool has_io
= cgroup_context_has_io_config(c
);
761 bool has_blockio
= cgroup_context_has_blockio_config(c
);
764 char buf
[8+DECIMAL_STR_MAX(uint64_t)+1];
768 weight
= cgroup_context_io_weight(c
, state
);
769 else if (has_blockio
) {
770 uint64_t blkio_weight
= cgroup_context_blkio_weight(c
, state
);
772 weight
= cgroup_weight_blkio_to_io(blkio_weight
);
774 log_cgroup_compat(u
, "Applying [Startup]BlockIOWeight %" PRIu64
" as [Startup]IOWeight %" PRIu64
,
775 blkio_weight
, weight
);
777 weight
= CGROUP_WEIGHT_DEFAULT
;
779 xsprintf(buf
, "default %" PRIu64
"\n", weight
);
780 r
= cg_set_attribute("io", path
, "io.weight", buf
);
782 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
783 "Failed to set io.weight: %m");
786 CGroupIODeviceWeight
*w
;
788 /* FIXME: no way to reset this list */
789 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
)
790 cgroup_apply_io_device_weight(u
, w
->path
, w
->weight
);
791 } else if (has_blockio
) {
792 CGroupBlockIODeviceWeight
*w
;
794 /* FIXME: no way to reset this list */
795 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
796 weight
= cgroup_weight_blkio_to_io(w
->weight
);
798 log_cgroup_compat(u
, "Applying BlockIODeviceWeight %" PRIu64
" as IODeviceWeight %" PRIu64
" for %s",
799 w
->weight
, weight
, w
->path
);
801 cgroup_apply_io_device_weight(u
, w
->path
, weight
);
806 /* Apply limits and free ones without config. */
808 CGroupIODeviceLimit
*l
, *next
;
810 LIST_FOREACH_SAFE(device_limits
, l
, next
, c
->io_device_limits
) {
811 if (!cgroup_apply_io_device_limit(u
, l
->path
, l
->limits
))
812 cgroup_context_free_io_device_limit(c
, l
);
814 } else if (has_blockio
) {
815 CGroupBlockIODeviceBandwidth
*b
, *next
;
817 LIST_FOREACH_SAFE(device_bandwidths
, b
, next
, c
->blockio_device_bandwidths
) {
818 uint64_t limits
[_CGROUP_IO_LIMIT_TYPE_MAX
];
819 CGroupIOLimitType type
;
821 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
822 limits
[type
] = cgroup_io_limit_defaults
[type
];
824 limits
[CGROUP_IO_RBPS_MAX
] = b
->rbps
;
825 limits
[CGROUP_IO_WBPS_MAX
] = b
->wbps
;
827 log_cgroup_compat(u
, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64
" %" PRIu64
" as IO{Read|Write}BandwidthMax for %s",
828 b
->rbps
, b
->wbps
, b
->path
);
830 if (!cgroup_apply_io_device_limit(u
, b
->path
, limits
))
831 cgroup_context_free_blockio_device_bandwidth(c
, b
);
836 if (apply_mask
& CGROUP_MASK_BLKIO
) {
837 bool has_io
= cgroup_context_has_io_config(c
);
838 bool has_blockio
= cgroup_context_has_blockio_config(c
);
841 char buf
[DECIMAL_STR_MAX(uint64_t)+1];
845 uint64_t io_weight
= cgroup_context_io_weight(c
, state
);
847 weight
= cgroup_weight_io_to_blkio(cgroup_context_io_weight(c
, state
));
849 log_cgroup_compat(u
, "Applying [Startup]IOWeight %" PRIu64
" as [Startup]BlockIOWeight %" PRIu64
,
851 } else if (has_blockio
)
852 weight
= cgroup_context_blkio_weight(c
, state
);
854 weight
= CGROUP_BLKIO_WEIGHT_DEFAULT
;
856 xsprintf(buf
, "%" PRIu64
"\n", weight
);
857 r
= cg_set_attribute("blkio", path
, "blkio.weight", buf
);
859 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
860 "Failed to set blkio.weight: %m");
863 CGroupIODeviceWeight
*w
;
865 /* FIXME: no way to reset this list */
866 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
) {
867 weight
= cgroup_weight_io_to_blkio(w
->weight
);
869 log_cgroup_compat(u
, "Applying IODeviceWeight %" PRIu64
" as BlockIODeviceWeight %" PRIu64
" for %s",
870 w
->weight
, weight
, w
->path
);
872 cgroup_apply_blkio_device_weight(u
, w
->path
, weight
);
874 } else if (has_blockio
) {
875 CGroupBlockIODeviceWeight
*w
;
877 /* FIXME: no way to reset this list */
878 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
879 cgroup_apply_blkio_device_weight(u
, w
->path
, w
->weight
);
883 /* Apply limits and free ones without config. */
885 CGroupIODeviceLimit
*l
, *next
;
887 LIST_FOREACH_SAFE(device_limits
, l
, next
, c
->io_device_limits
) {
888 log_cgroup_compat(u
, "Applying IO{Read|Write}Bandwidth %" PRIu64
" %" PRIu64
" as BlockIO{Read|Write}BandwidthMax for %s",
889 l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
], l
->path
);
891 if (!cgroup_apply_blkio_device_limit(u
, l
->path
, l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
]))
892 cgroup_context_free_io_device_limit(c
, l
);
894 } else if (has_blockio
) {
895 CGroupBlockIODeviceBandwidth
*b
, *next
;
897 LIST_FOREACH_SAFE(device_bandwidths
, b
, next
, c
->blockio_device_bandwidths
)
898 if (!cgroup_apply_blkio_device_limit(u
, b
->path
, b
->rbps
, b
->wbps
))
899 cgroup_context_free_blockio_device_bandwidth(c
, b
);
903 if ((apply_mask
& CGROUP_MASK_MEMORY
) && !is_root
) {
904 if (cg_all_unified() > 0) {
905 uint64_t max
, swap_max
= CGROUP_LIMIT_MAX
;
907 if (cgroup_context_has_unified_memory_config(c
)) {
909 swap_max
= c
->memory_swap_max
;
911 max
= c
->memory_limit
;
913 if (max
!= CGROUP_LIMIT_MAX
)
914 log_cgroup_compat(u
, "Applying MemoryLimit %" PRIu64
" as MemoryMax", max
);
917 cgroup_apply_unified_memory_limit(u
, "memory.low", c
->memory_low
);
918 cgroup_apply_unified_memory_limit(u
, "memory.high", c
->memory_high
);
919 cgroup_apply_unified_memory_limit(u
, "memory.max", max
);
920 cgroup_apply_unified_memory_limit(u
, "memory.swap.max", swap_max
);
922 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
925 if (cgroup_context_has_unified_memory_config(c
)) {
927 log_cgroup_compat(u
, "Applying MemoryMax %" PRIi64
" as MemoryLimit", val
);
929 val
= c
->memory_limit
;
931 if (val
== CGROUP_LIMIT_MAX
)
932 strncpy(buf
, "-1\n", sizeof(buf
));
934 xsprintf(buf
, "%" PRIu64
"\n", val
);
936 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", buf
);
938 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
939 "Failed to set memory.limit_in_bytes: %m");
943 if ((apply_mask
& CGROUP_MASK_DEVICES
) && !is_root
) {
944 CGroupDeviceAllow
*a
;
946 /* Changing the devices list of a populated cgroup
947 * might result in EINVAL, hence ignore EINVAL
950 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
951 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
953 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
955 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
956 "Failed to reset devices.list: %m");
958 if (c
->device_policy
== CGROUP_CLOSED
||
959 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
960 static const char auto_devices
[] =
961 "/dev/null\0" "rwm\0"
962 "/dev/zero\0" "rwm\0"
963 "/dev/full\0" "rwm\0"
964 "/dev/random\0" "rwm\0"
965 "/dev/urandom\0" "rwm\0"
967 "/dev/pts/ptmx\0" "rw\0" /* /dev/pts/ptmx may not be duplicated, but accessed */
968 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
969 "-/run/systemd/inaccessible/chr\0" "rwm\0"
970 "-/run/systemd/inaccessible/blk\0" "rwm\0";
974 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
975 whitelist_device(path
, x
, y
);
977 whitelist_major(path
, "pts", 'c', "rw");
980 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
996 if (path_startswith(a
->path
, "/dev/"))
997 whitelist_device(path
, a
->path
, acc
);
998 else if ((val
= startswith(a
->path
, "block-")))
999 whitelist_major(path
, val
, 'b', acc
);
1000 else if ((val
= startswith(a
->path
, "char-")))
1001 whitelist_major(path
, val
, 'c', acc
);
1003 log_unit_debug(u
, "Ignoring device %s while writing cgroup attribute.", a
->path
);
1007 if ((apply_mask
& CGROUP_MASK_PIDS
) && !is_root
) {
1009 if (c
->tasks_max
!= CGROUP_LIMIT_MAX
) {
1010 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
1012 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
1013 r
= cg_set_attribute("pids", path
, "pids.max", buf
);
1015 r
= cg_set_attribute("pids", path
, "pids.max", "max");
1018 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1019 "Failed to set pids.max: %m");
1023 cgroup_apply_firewall(u
, c
);
1026 CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
1027 CGroupMask mask
= 0;
1029 /* Figure out which controllers we need */
1031 if (c
->cpu_accounting
||
1032 cgroup_context_has_cpu_weight(c
) ||
1033 cgroup_context_has_cpu_shares(c
) ||
1034 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
1035 mask
|= CGROUP_MASK_CPUACCT
| CGROUP_MASK_CPU
;
1037 if (cgroup_context_has_io_config(c
) || cgroup_context_has_blockio_config(c
))
1038 mask
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
1040 if (c
->memory_accounting
||
1041 c
->memory_limit
!= CGROUP_LIMIT_MAX
||
1042 cgroup_context_has_unified_memory_config(c
))
1043 mask
|= CGROUP_MASK_MEMORY
;
1045 if (c
->device_allow
||
1046 c
->device_policy
!= CGROUP_AUTO
)
1047 mask
|= CGROUP_MASK_DEVICES
;
1049 if (c
->tasks_accounting
||
1050 c
->tasks_max
!= (uint64_t) -1)
1051 mask
|= CGROUP_MASK_PIDS
;
1056 CGroupMask
unit_get_own_mask(Unit
*u
) {
1059 /* Returns the mask of controllers the unit needs for itself */
1061 c
= unit_get_cgroup_context(u
);
1065 /* If delegation is turned on, then turn on all cgroups,
1066 * unless we are on the legacy hierarchy and the process we
1067 * fork into it is known to drop privileges, and hence
1068 * shouldn't get access to the controllers.
1070 * Note that on the unified hierarchy it is safe to delegate
1071 * controllers to unprivileged services. */
1076 e
= unit_get_exec_context(u
);
1078 exec_context_maintains_privileges(e
) ||
1079 cg_all_unified() > 0)
1080 return _CGROUP_MASK_ALL
;
1083 return cgroup_context_get_mask(c
);
1086 CGroupMask
unit_get_members_mask(Unit
*u
) {
1089 /* Returns the mask of controllers all of the unit's children
1090 * require, merged */
1092 if (u
->cgroup_members_mask_valid
)
1093 return u
->cgroup_members_mask
;
1095 u
->cgroup_members_mask
= 0;
1097 if (u
->type
== UNIT_SLICE
) {
1101 SET_FOREACH(member
, u
->dependencies
[UNIT_BEFORE
], i
) {
1106 if (UNIT_DEREF(member
->slice
) != u
)
1109 u
->cgroup_members_mask
|=
1110 unit_get_own_mask(member
) |
1111 unit_get_members_mask(member
);
1115 u
->cgroup_members_mask_valid
= true;
1116 return u
->cgroup_members_mask
;
1119 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
1122 /* Returns the mask of controllers all of the unit's siblings
1123 * require, i.e. the members mask of the unit's parent slice
1124 * if there is one. */
1126 if (UNIT_ISSET(u
->slice
))
1127 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
1129 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
1132 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
1134 /* Returns the mask of this subtree, meaning of the group
1135 * itself and its children. */
1137 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
1140 CGroupMask
unit_get_target_mask(Unit
*u
) {
1143 /* This returns the cgroup mask of all controllers to enable
1144 * for a specific cgroup, i.e. everything it needs itself,
1145 * plus all that its children need, plus all that its siblings
1146 * need. This is primarily useful on the legacy cgroup
1147 * hierarchy, where we need to duplicate each cgroup in each
1148 * hierarchy that shall be enabled for it. */
1150 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
1151 mask
&= u
->manager
->cgroup_supported
;
1156 CGroupMask
unit_get_enable_mask(Unit
*u
) {
1159 /* This returns the cgroup mask of all controllers to enable
1160 * for the children of a specific cgroup. This is primarily
1161 * useful for the unified cgroup hierarchy, where each cgroup
1162 * controls which controllers are enabled for its children. */
1164 mask
= unit_get_members_mask(u
);
1165 mask
&= u
->manager
->cgroup_supported
;
1170 bool unit_get_needs_bpf(Unit
*u
) {
1175 /* We never attach BPF to slice units, as they are inner cgroup nodes and cgroup/BPF is not recursive at the
1177 if (u
->type
== UNIT_SLICE
)
1180 c
= unit_get_cgroup_context(u
);
1184 if (c
->ip_accounting
||
1185 c
->ip_address_allow
||
1189 /* If any parent slice has an IP access list defined, it applies too */
1190 for (p
= UNIT_DEREF(u
->slice
); p
; p
= UNIT_DEREF(p
->slice
)) {
1191 c
= unit_get_cgroup_context(p
);
1195 if (c
->ip_address_allow
||
1203 /* Recurse from a unit up through its containing slices, propagating
1204 * mask bits upward. A unit is also member of itself. */
1205 void unit_update_cgroup_members_masks(Unit
*u
) {
1211 /* Calculate subtree mask */
1212 m
= unit_get_subtree_mask(u
);
1214 /* See if anything changed from the previous invocation. If
1215 * not, we're done. */
1216 if (u
->cgroup_subtree_mask_valid
&& m
== u
->cgroup_subtree_mask
)
1220 u
->cgroup_subtree_mask_valid
&&
1221 ((m
& ~u
->cgroup_subtree_mask
) != 0) &&
1222 ((~m
& u
->cgroup_subtree_mask
) == 0);
1224 u
->cgroup_subtree_mask
= m
;
1225 u
->cgroup_subtree_mask_valid
= true;
1227 if (UNIT_ISSET(u
->slice
)) {
1228 Unit
*s
= UNIT_DEREF(u
->slice
);
1231 /* There's more set now than before. We
1232 * propagate the new mask to the parent's mask
1233 * (not caring if it actually was valid or
1236 s
->cgroup_members_mask
|= m
;
1239 /* There's less set now than before (or we
1240 * don't know), we need to recalculate
1241 * everything, so let's invalidate the
1242 * parent's members mask */
1244 s
->cgroup_members_mask_valid
= false;
1246 /* And now make sure that this change also hits our
1248 unit_update_cgroup_members_masks(s
);
1252 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
1259 if (u
->cgroup_path
&&
1260 u
->cgroup_realized
&&
1261 (u
->cgroup_realized_mask
& mask
) == mask
)
1262 return u
->cgroup_path
;
1264 u
= UNIT_DEREF(u
->slice
);
1270 char *unit_default_cgroup_path(Unit
*u
) {
1271 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
1276 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1277 return strdup(u
->manager
->cgroup_root
);
1279 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
1280 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
1285 escaped
= cg_escape(u
->id
);
1290 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/",
1293 return strjoin(u
->manager
->cgroup_root
, "/", escaped
);
1296 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
1297 _cleanup_free_
char *p
= NULL
;
1309 if (streq_ptr(u
->cgroup_path
, p
))
1313 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
1318 unit_release_cgroup(u
);
1326 int unit_watch_cgroup(Unit
*u
) {
1327 _cleanup_free_
char *events
= NULL
;
1332 if (!u
->cgroup_path
)
1335 if (u
->cgroup_inotify_wd
>= 0)
1338 /* Only applies to the unified hierarchy */
1339 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
1341 return log_error_errno(r
, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1345 /* Don't watch the root slice, it's pointless. */
1346 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1349 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
1353 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events", &events
);
1357 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
1358 if (u
->cgroup_inotify_wd
< 0) {
1360 if (errno
== ENOENT
) /* If the directory is already
1361 * gone we don't need to track
1362 * it, so this is not an error */
1365 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
1368 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
1370 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
1375 static int unit_create_cgroup(
1377 CGroupMask target_mask
,
1378 CGroupMask enable_mask
,
1386 c
= unit_get_cgroup_context(u
);
1390 if (!u
->cgroup_path
) {
1391 _cleanup_free_
char *path
= NULL
;
1393 path
= unit_default_cgroup_path(u
);
1397 r
= unit_set_cgroup_path(u
, path
);
1399 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
1401 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
1404 /* First, create our own group */
1405 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
1407 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
1409 /* Start watching it */
1410 (void) unit_watch_cgroup(u
);
1412 /* Enable all controllers we need */
1413 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
);
1415 log_unit_warning_errno(u
, r
, "Failed to enable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
1417 /* Keep track that this is now realized */
1418 u
->cgroup_realized
= true;
1419 u
->cgroup_realized_mask
= target_mask
;
1420 u
->cgroup_enabled_mask
= enable_mask
;
1421 u
->cgroup_bpf_state
= needs_bpf
? UNIT_CGROUP_BPF_ON
: UNIT_CGROUP_BPF_OFF
;
1423 if (u
->type
!= UNIT_SLICE
&& !c
->delegate
) {
1425 /* Then, possibly move things over, but not if
1426 * subgroups may contain processes, which is the case
1427 * for slice and delegation units. */
1428 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
1430 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
1436 int unit_attach_pids_to_cgroup(Unit
*u
) {
1440 r
= unit_realize_cgroup(u
);
1444 r
= cg_attach_many_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->pids
, migrate_callback
, u
);
1451 static void cgroup_xattr_apply(Unit
*u
) {
1452 char ids
[SD_ID128_STRING_MAX
];
1457 if (!MANAGER_IS_SYSTEM(u
->manager
))
1460 if (sd_id128_is_null(u
->invocation_id
))
1463 r
= cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
1464 "trusted.invocation_id",
1465 sd_id128_to_string(u
->invocation_id
, ids
), 32,
1468 log_unit_warning_errno(u
, r
, "Failed to set invocation ID on control group %s, ignoring: %m", u
->cgroup_path
);
1471 static bool unit_has_mask_realized(
1473 CGroupMask target_mask
,
1474 CGroupMask enable_mask
,
1479 return u
->cgroup_realized
&&
1480 u
->cgroup_realized_mask
== target_mask
&&
1481 u
->cgroup_enabled_mask
== enable_mask
&&
1482 ((needs_bpf
&& u
->cgroup_bpf_state
== UNIT_CGROUP_BPF_ON
) ||
1483 (!needs_bpf
&& u
->cgroup_bpf_state
== UNIT_CGROUP_BPF_OFF
));
1486 /* Check if necessary controllers and attributes for a unit are in place.
1488 * If so, do nothing.
1489 * If not, create paths, move processes over, and set attributes.
1491 * Returns 0 on success and < 0 on failure. */
1492 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
1493 CGroupMask target_mask
, enable_mask
;
1494 bool needs_bpf
, apply_bpf
;
1499 if (u
->in_cgroup_realize_queue
) {
1500 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
1501 u
->in_cgroup_realize_queue
= false;
1504 target_mask
= unit_get_target_mask(u
);
1505 enable_mask
= unit_get_enable_mask(u
);
1506 needs_bpf
= unit_get_needs_bpf(u
);
1508 if (unit_has_mask_realized(u
, target_mask
, enable_mask
, needs_bpf
))
1511 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1512 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1513 * this will trickle down properly to cgroupfs. */
1514 apply_bpf
= needs_bpf
|| u
->cgroup_bpf_state
!= UNIT_CGROUP_BPF_OFF
;
1516 /* First, realize parents */
1517 if (UNIT_ISSET(u
->slice
)) {
1518 r
= unit_realize_cgroup_now(UNIT_DEREF(u
->slice
), state
);
1523 /* And then do the real work */
1524 r
= unit_create_cgroup(u
, target_mask
, enable_mask
, needs_bpf
);
1528 /* Finally, apply the necessary attributes. */
1529 cgroup_context_apply(u
, target_mask
, apply_bpf
, state
);
1530 cgroup_xattr_apply(u
);
1535 static void unit_add_to_cgroup_realize_queue(Unit
*u
) {
1538 if (u
->in_cgroup_realize_queue
)
1541 LIST_PREPEND(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
1542 u
->in_cgroup_realize_queue
= true;
1545 unsigned manager_dispatch_cgroup_realize_queue(Manager
*m
) {
1553 state
= manager_state(m
);
1555 while ((i
= m
->cgroup_realize_queue
)) {
1556 assert(i
->in_cgroup_realize_queue
);
1558 r
= unit_realize_cgroup_now(i
, state
);
1560 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
1568 static void unit_add_siblings_to_cgroup_realize_queue(Unit
*u
) {
1571 /* This adds the siblings of the specified unit and the
1572 * siblings of all parent units to the cgroup queue. (But
1573 * neither the specified unit itself nor the parents.) */
1575 while ((slice
= UNIT_DEREF(u
->slice
))) {
1579 SET_FOREACH(m
, slice
->dependencies
[UNIT_BEFORE
], i
) {
1583 /* Skip units that have a dependency on the slice
1584 * but aren't actually in it. */
1585 if (UNIT_DEREF(m
->slice
) != slice
)
1588 /* No point in doing cgroup application for units
1589 * without active processes. */
1590 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
1593 /* If the unit doesn't need any new controllers
1594 * and has current ones realized, it doesn't need
1596 if (unit_has_mask_realized(m
,
1597 unit_get_target_mask(m
),
1598 unit_get_enable_mask(m
),
1599 unit_get_needs_bpf(m
)))
1602 unit_add_to_cgroup_realize_queue(m
);
1609 int unit_realize_cgroup(Unit
*u
) {
1612 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1615 /* So, here's the deal: when realizing the cgroups for this
1616 * unit, we need to first create all parents, but there's more
1617 * actually: for the weight-based controllers we also need to
1618 * make sure that all our siblings (i.e. units that are in the
1619 * same slice as we are) have cgroups, too. Otherwise, things
1620 * would become very uneven as each of their processes would
1621 * get as much resources as all our group together. This call
1622 * will synchronously create the parent cgroups, but will
1623 * defer work on the siblings to the next event loop
1626 /* Add all sibling slices to the cgroup queue. */
1627 unit_add_siblings_to_cgroup_realize_queue(u
);
1629 /* And realize this one now (and apply the values) */
1630 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
1633 void unit_release_cgroup(Unit
*u
) {
1636 /* Forgets all cgroup details for this cgroup */
1638 if (u
->cgroup_path
) {
1639 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
1640 u
->cgroup_path
= mfree(u
->cgroup_path
);
1643 if (u
->cgroup_inotify_wd
>= 0) {
1644 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
1645 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring", u
->cgroup_inotify_wd
, u
->id
);
1647 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
1648 u
->cgroup_inotify_wd
= -1;
1652 void unit_prune_cgroup(Unit
*u
) {
1658 /* Removes the cgroup, if empty and possible, and stops watching it. */
1660 if (!u
->cgroup_path
)
1663 (void) unit_get_cpu_usage(u
, NULL
); /* Cache the last CPU usage value before we destroy the cgroup */
1665 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1667 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
1669 log_unit_debug_errno(u
, r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
1676 unit_release_cgroup(u
);
1678 u
->cgroup_realized
= false;
1679 u
->cgroup_realized_mask
= 0;
1680 u
->cgroup_enabled_mask
= 0;
1683 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
1684 _cleanup_fclose_
FILE *f
= NULL
;
1685 pid_t pid
= 0, npid
, mypid
;
1691 if (!u
->cgroup_path
)
1694 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
1698 mypid
= getpid_cached();
1699 while (cg_read_pid(f
, &npid
) > 0) {
1705 /* Ignore processes that aren't our kids */
1706 if (get_process_ppid(npid
, &ppid
) >= 0 && ppid
!= mypid
)
1710 /* Dang, there's more than one daemonized PID
1711 in this group, so we don't know what process
1712 is the main process. */
1723 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
1724 _cleanup_closedir_
DIR *d
= NULL
;
1725 _cleanup_fclose_
FILE *f
= NULL
;
1731 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
1737 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
1738 r
= unit_watch_pid(u
, pid
);
1739 if (r
< 0 && ret
>= 0)
1743 if (r
< 0 && ret
>= 0)
1747 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
1754 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
1755 _cleanup_free_
char *p
= NULL
;
1757 p
= strjoin(path
, "/", fn
);
1763 r
= unit_watch_pids_in_path(u
, p
);
1764 if (r
< 0 && ret
>= 0)
1768 if (r
< 0 && ret
>= 0)
1775 int unit_watch_all_pids(Unit
*u
) {
1780 /* Adds all PIDs from our cgroup to the set of PIDs we
1781 * watch. This is a fallback logic for cases where we do not
1782 * get reliable cgroup empty notifications: we try to use
1783 * SIGCHLD as replacement. */
1785 if (!u
->cgroup_path
)
1788 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
1791 if (r
> 0) /* On unified we can use proper notifications */
1794 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
1797 static int on_cgroup_empty_event(sd_event_source
*s
, void *userdata
) {
1798 Manager
*m
= userdata
;
1805 u
= m
->cgroup_empty_queue
;
1809 assert(u
->in_cgroup_empty_queue
);
1810 u
->in_cgroup_empty_queue
= false;
1811 LIST_REMOVE(cgroup_empty_queue
, m
->cgroup_empty_queue
, u
);
1813 if (m
->cgroup_empty_queue
) {
1814 /* More stuff queued, let's make sure we remain enabled */
1815 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
1817 log_debug_errno(r
, "Failed to reenable cgroup empty event source: %m");
1820 unit_add_to_gc_queue(u
);
1822 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
1823 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
1828 void unit_add_to_cgroup_empty_queue(Unit
*u
) {
1833 /* Note that there are four different ways how cgroup empty events reach us:
1835 * 1. On the unified hierarchy we get an inotify event on the cgroup
1837 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
1839 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
1841 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
1842 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
1844 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
1845 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
1846 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
1847 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
1848 * case for scope units). */
1850 if (u
->in_cgroup_empty_queue
)
1853 /* Let's verify that the cgroup is really empty */
1854 if (!u
->cgroup_path
)
1856 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
1858 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
1864 LIST_PREPEND(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
1865 u
->in_cgroup_empty_queue
= true;
1867 /* Trigger the defer event */
1868 r
= sd_event_source_set_enabled(u
->manager
->cgroup_empty_event_source
, SD_EVENT_ONESHOT
);
1870 log_debug_errno(r
, "Failed to enable cgroup empty event source: %m");
1873 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1874 Manager
*m
= userdata
;
1881 union inotify_event_buffer buffer
;
1882 struct inotify_event
*e
;
1885 l
= read(fd
, &buffer
, sizeof(buffer
));
1887 if (IN_SET(errno
, EINTR
, EAGAIN
))
1890 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
1893 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1897 /* Queue overflow has no watch descriptor */
1900 if (e
->mask
& IN_IGNORED
)
1901 /* The watch was just removed */
1904 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
1905 if (!u
) /* Not that inotify might deliver
1906 * events for a watch even after it
1907 * was removed, because it was queued
1908 * before the removal. Let's ignore
1909 * this here safely. */
1912 unit_add_to_cgroup_empty_queue(u
);
1917 int manager_setup_cgroup(Manager
*m
) {
1918 _cleanup_free_
char *path
= NULL
;
1919 const char *scope_path
;
1926 /* 1. Determine hierarchy */
1927 m
->cgroup_root
= mfree(m
->cgroup_root
);
1928 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
1930 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
1932 /* Chop off the init scope, if we are already located in it */
1933 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1935 /* LEGACY: Also chop off the system slice if we are in
1936 * it. This is to support live upgrades from older systemd
1937 * versions where PID 1 was moved there. Also see
1938 * cg_get_root_path(). */
1939 if (!e
&& MANAGER_IS_SYSTEM(m
)) {
1940 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
1942 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
1947 /* And make sure to store away the root value without trailing
1948 * slash, even for the root dir, so that we can easily prepend
1950 while ((e
= endswith(m
->cgroup_root
, "/")))
1954 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
1956 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
1958 r
= cg_unified_flush();
1960 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
1962 all_unified
= cg_all_unified();
1964 return log_error_errno(r
, "Couldn't determine whether we are in all unified mode: %m");
1966 log_debug("Unified cgroup hierarchy is located at %s.", path
);
1968 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
1970 return log_error_errno(r
, "Failed to determine whether systemd's own controller is in unified mode: %m");
1972 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path
);
1974 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY
". File system hierarchy is at %s.", path
);
1977 /* 3. Allocate cgroup empty defer event source */
1978 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
1979 r
= sd_event_add_defer(m
->event
, &m
->cgroup_empty_event_source
, on_cgroup_empty_event
, m
);
1981 return log_error_errno(r
, "Failed to create cgroup empty event source: %m");
1983 r
= sd_event_source_set_priority(m
->cgroup_empty_event_source
, SD_EVENT_PRIORITY_NORMAL
-5);
1985 return log_error_errno(r
, "Failed to set priority of cgroup empty event source: %m");
1987 r
= sd_event_source_set_enabled(m
->cgroup_empty_event_source
, SD_EVENT_OFF
);
1989 return log_error_errno(r
, "Failed to disable cgroup empty event source: %m");
1991 (void) sd_event_source_set_description(m
->cgroup_empty_event_source
, "cgroup-empty");
1993 /* 4. Install notifier inotify object, or agent */
1994 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
1996 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
1998 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1999 safe_close(m
->cgroup_inotify_fd
);
2001 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
2002 if (m
->cgroup_inotify_fd
< 0)
2003 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
2005 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
2007 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
2009 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2010 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2011 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_NORMAL
-4);
2013 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
2015 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
2017 } else if (MANAGER_IS_SYSTEM(m
) && m
->test_run_flags
== 0) {
2019 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2020 * since it does not generate events when control groups with children run empty. */
2022 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
2024 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
2026 log_debug("Installed release agent.");
2028 log_debug("Release agent already installed.");
2031 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2032 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2033 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2035 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
2037 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2038 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2040 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
2042 /* 6. And pin it, so that it cannot be unmounted */
2043 safe_close(m
->pin_cgroupfs_fd
);
2044 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
2045 if (m
->pin_cgroupfs_fd
< 0)
2046 return log_error_errno(errno
, "Failed to open pin file: %m");
2048 /* 7. Always enable hierarchical support if it exists... */
2049 if (!all_unified
&& m
->test_run_flags
== 0)
2050 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2052 /* 8. Figure out which controllers are supported, and log about it */
2053 r
= cg_mask_supported(&m
->cgroup_supported
);
2055 return log_error_errno(r
, "Failed to determine supported controllers: %m");
2056 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
2057 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& CGROUP_CONTROLLER_TO_MASK(c
)));
2062 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
2065 /* We can't really delete the group, since we are in it. But
2067 if (delete && m
->cgroup_root
)
2068 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
2070 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2072 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
2074 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2075 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
2077 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
2079 m
->cgroup_root
= mfree(m
->cgroup_root
);
2082 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
2089 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
2093 p
= strdupa(cgroup
);
2097 e
= strrchr(p
, '/');
2099 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
2103 u
= hashmap_get(m
->cgroup_unit
, p
);
2109 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
2110 _cleanup_free_
char *cgroup
= NULL
;
2118 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
);
2122 return manager_get_unit_by_cgroup(m
, cgroup
);
2125 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
2134 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
2136 u
= hashmap_get(m
->watch_pids1
, PID_TO_PTR(pid
));
2140 u
= hashmap_get(m
->watch_pids2
, PID_TO_PTR(pid
));
2144 return manager_get_unit_by_pid_cgroup(m
, pid
);
2147 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
2153 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2154 * or from the --system instance */
2156 log_debug("Got cgroup empty notification for: %s", cgroup
);
2158 u
= manager_get_unit_by_cgroup(m
, cgroup
);
2162 unit_add_to_cgroup_empty_queue(u
);
2166 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
2167 _cleanup_free_
char *v
= NULL
;
2173 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
2176 if (!u
->cgroup_path
)
2179 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
2182 r
= cg_all_unified();
2186 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
2188 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
2194 return safe_atou64(v
, ret
);
2197 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
2198 _cleanup_free_
char *v
= NULL
;
2204 if (!UNIT_CGROUP_BOOL(u
, tasks_accounting
))
2207 if (!u
->cgroup_path
)
2210 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
2213 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
2219 return safe_atou64(v
, ret
);
2222 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
2223 _cleanup_free_
char *v
= NULL
;
2230 if (!u
->cgroup_path
)
2233 r
= cg_all_unified();
2237 const char *keys
[] = { "usage_usec", NULL
};
2238 _cleanup_free_
char *val
= NULL
;
2241 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPU
) == 0)
2244 r
= cg_get_keyed_attribute("cpu", u
->cgroup_path
, "cpu.stat", keys
, &val
);
2248 r
= safe_atou64(val
, &us
);
2252 ns
= us
* NSEC_PER_USEC
;
2254 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUACCT
) == 0)
2257 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
2263 r
= safe_atou64(v
, &ns
);
2272 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
2278 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2279 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2280 * call this function with a NULL return value. */
2282 if (!UNIT_CGROUP_BOOL(u
, cpu_accounting
))
2285 r
= unit_get_cpu_usage_raw(u
, &ns
);
2286 if (r
== -ENODATA
&& u
->cpu_usage_last
!= NSEC_INFINITY
) {
2287 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2291 *ret
= u
->cpu_usage_last
;
2297 if (ns
> u
->cpu_usage_base
)
2298 ns
-= u
->cpu_usage_base
;
2302 u
->cpu_usage_last
= ns
;
2309 int unit_get_ip_accounting(
2311 CGroupIPAccountingMetric metric
,
2318 assert(metric
>= 0);
2319 assert(metric
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
);
2322 /* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
2323 * inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
2324 * anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
2326 if (u
->type
== UNIT_SLICE
)
2329 if (!UNIT_CGROUP_BOOL(u
, ip_accounting
))
2332 fd
= IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_INGRESS_PACKETS
) ?
2333 u
->ip_accounting_ingress_map_fd
:
2334 u
->ip_accounting_egress_map_fd
;
2339 if (IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
2340 r
= bpf_firewall_read_accounting(fd
, &value
, NULL
);
2342 r
= bpf_firewall_read_accounting(fd
, NULL
, &value
);
2346 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2347 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2348 * ip_accounting_extra[] field, and add them in here transparently. */
2350 *ret
= value
+ u
->ip_accounting_extra
[metric
];
2355 int unit_reset_cpu_accounting(Unit
*u
) {
2361 u
->cpu_usage_last
= NSEC_INFINITY
;
2363 r
= unit_get_cpu_usage_raw(u
, &ns
);
2365 u
->cpu_usage_base
= 0;
2369 u
->cpu_usage_base
= ns
;
2373 int unit_reset_ip_accounting(Unit
*u
) {
2378 if (u
->ip_accounting_ingress_map_fd
>= 0)
2379 r
= bpf_firewall_reset_accounting(u
->ip_accounting_ingress_map_fd
);
2381 if (u
->ip_accounting_egress_map_fd
>= 0)
2382 q
= bpf_firewall_reset_accounting(u
->ip_accounting_egress_map_fd
);
2384 zero(u
->ip_accounting_extra
);
2386 return r
< 0 ? r
: q
;
2389 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
2392 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2398 /* always invalidate compat pairs together */
2399 if (m
& (CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
))
2400 m
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
2402 if (m
& (CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
))
2403 m
|= CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
;
2405 if ((u
->cgroup_realized_mask
& m
) == 0)
2408 u
->cgroup_realized_mask
&= ~m
;
2409 unit_add_to_cgroup_realize_queue(u
);
2412 void unit_invalidate_cgroup_bpf(Unit
*u
) {
2415 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2418 if (u
->cgroup_bpf_state
== UNIT_CGROUP_BPF_INVALIDATED
)
2421 u
->cgroup_bpf_state
= UNIT_CGROUP_BPF_INVALIDATED
;
2422 unit_add_to_cgroup_realize_queue(u
);
2424 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2425 * list of our children includes our own. */
2426 if (u
->type
== UNIT_SLICE
) {
2430 SET_FOREACH(member
, u
->dependencies
[UNIT_BEFORE
], i
) {
2434 if (UNIT_DEREF(member
->slice
) != u
)
2437 unit_invalidate_cgroup_bpf(member
);
2442 void manager_invalidate_startup_units(Manager
*m
) {
2448 SET_FOREACH(u
, m
->startup_units
, i
)
2449 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_IO
|CGROUP_MASK_BLKIO
);
2452 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
2453 [CGROUP_AUTO
] = "auto",
2454 [CGROUP_CLOSED
] = "closed",
2455 [CGROUP_STRICT
] = "strict",
2458 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);