1 /* SPDX-License-Identifier: LGPL-2.1+ */
6 #include "alloc-util.h"
7 #include "blockdev-util.h"
8 #include "bpf-firewall.h"
9 #include "btrfs-util.h"
10 #include "bus-error.h"
11 #include "cgroup-util.h"
16 #include "parse-util.h"
17 #include "path-util.h"
18 #include "process-util.h"
19 #include "procfs-util.h"
21 #include "stdio-util.h"
22 #include "string-table.h"
23 #include "string-util.h"
26 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
28 bool manager_owns_root_cgroup(Manager
*m
) {
31 /* Returns true if we are managing the root cgroup. Note that it isn't sufficient to just check whether the
32 * group root path equals "/" since that will also be the case if CLONE_NEWCGROUP is in the mix. Since there's
33 * appears to be no nice way to detect whether we are in a CLONE_NEWCGROUP namespace we instead just check if
34 * we run in any kind of container virtualization. */
36 if (detect_container() > 0)
39 return empty_or_root(m
->cgroup_root
);
42 bool unit_has_root_cgroup(Unit
*u
) {
45 /* Returns whether this unit manages the root cgroup. This will return true if this unit is the root slice and
46 * the manager manages the root cgroup. */
48 if (!manager_owns_root_cgroup(u
->manager
))
51 return unit_has_name(u
, SPECIAL_ROOT_SLICE
);
54 static void cgroup_compat_warn(void) {
55 static bool cgroup_compat_warned
= false;
57 if (cgroup_compat_warned
)
60 log_warning("cgroup compatibility translation between legacy and unified hierarchy settings activated. "
61 "See cgroup-compat debug messages for details.");
63 cgroup_compat_warned
= true;
66 #define log_cgroup_compat(unit, fmt, ...) do { \
67 cgroup_compat_warn(); \
68 log_unit_debug(unit, "cgroup-compat: " fmt, ##__VA_ARGS__); \
71 void cgroup_context_init(CGroupContext
*c
) {
74 /* Initialize everything to the kernel defaults, assuming the
75 * structure is preinitialized to 0 */
77 c
->cpu_weight
= CGROUP_WEIGHT_INVALID
;
78 c
->startup_cpu_weight
= CGROUP_WEIGHT_INVALID
;
79 c
->cpu_quota_per_sec_usec
= USEC_INFINITY
;
81 c
->cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
82 c
->startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
84 c
->memory_high
= CGROUP_LIMIT_MAX
;
85 c
->memory_max
= CGROUP_LIMIT_MAX
;
86 c
->memory_swap_max
= CGROUP_LIMIT_MAX
;
88 c
->memory_limit
= CGROUP_LIMIT_MAX
;
90 c
->io_weight
= CGROUP_WEIGHT_INVALID
;
91 c
->startup_io_weight
= CGROUP_WEIGHT_INVALID
;
93 c
->blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
94 c
->startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
96 c
->tasks_max
= (uint64_t) -1;
99 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
103 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
108 void cgroup_context_free_io_device_weight(CGroupContext
*c
, CGroupIODeviceWeight
*w
) {
112 LIST_REMOVE(device_weights
, c
->io_device_weights
, w
);
117 void cgroup_context_free_io_device_latency(CGroupContext
*c
, CGroupIODeviceLatency
*l
) {
121 LIST_REMOVE(device_latencies
, c
->io_device_latencies
, l
);
126 void cgroup_context_free_io_device_limit(CGroupContext
*c
, CGroupIODeviceLimit
*l
) {
130 LIST_REMOVE(device_limits
, c
->io_device_limits
, l
);
135 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
139 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
144 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
148 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
153 void cgroup_context_done(CGroupContext
*c
) {
156 while (c
->io_device_weights
)
157 cgroup_context_free_io_device_weight(c
, c
->io_device_weights
);
159 while (c
->io_device_latencies
)
160 cgroup_context_free_io_device_latency(c
, c
->io_device_latencies
);
162 while (c
->io_device_limits
)
163 cgroup_context_free_io_device_limit(c
, c
->io_device_limits
);
165 while (c
->blockio_device_weights
)
166 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
168 while (c
->blockio_device_bandwidths
)
169 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
171 while (c
->device_allow
)
172 cgroup_context_free_device_allow(c
, c
->device_allow
);
174 c
->ip_address_allow
= ip_address_access_free_all(c
->ip_address_allow
);
175 c
->ip_address_deny
= ip_address_access_free_all(c
->ip_address_deny
);
178 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
179 CGroupIODeviceLimit
*il
;
180 CGroupIODeviceWeight
*iw
;
181 CGroupIODeviceLatency
*l
;
182 CGroupBlockIODeviceBandwidth
*b
;
183 CGroupBlockIODeviceWeight
*w
;
184 CGroupDeviceAllow
*a
;
185 IPAddressAccessItem
*iaai
;
186 char u
[FORMAT_TIMESPAN_MAX
];
191 prefix
= strempty(prefix
);
194 "%sCPUAccounting=%s\n"
195 "%sIOAccounting=%s\n"
196 "%sBlockIOAccounting=%s\n"
197 "%sMemoryAccounting=%s\n"
198 "%sTasksAccounting=%s\n"
199 "%sIPAccounting=%s\n"
200 "%sCPUWeight=%" PRIu64
"\n"
201 "%sStartupCPUWeight=%" PRIu64
"\n"
202 "%sCPUShares=%" PRIu64
"\n"
203 "%sStartupCPUShares=%" PRIu64
"\n"
204 "%sCPUQuotaPerSecSec=%s\n"
205 "%sIOWeight=%" PRIu64
"\n"
206 "%sStartupIOWeight=%" PRIu64
"\n"
207 "%sBlockIOWeight=%" PRIu64
"\n"
208 "%sStartupBlockIOWeight=%" PRIu64
"\n"
209 "%sMemoryMin=%" PRIu64
"\n"
210 "%sMemoryLow=%" PRIu64
"\n"
211 "%sMemoryHigh=%" PRIu64
"\n"
212 "%sMemoryMax=%" PRIu64
"\n"
213 "%sMemorySwapMax=%" PRIu64
"\n"
214 "%sMemoryLimit=%" PRIu64
"\n"
215 "%sTasksMax=%" PRIu64
"\n"
216 "%sDevicePolicy=%s\n"
218 prefix
, yes_no(c
->cpu_accounting
),
219 prefix
, yes_no(c
->io_accounting
),
220 prefix
, yes_no(c
->blockio_accounting
),
221 prefix
, yes_no(c
->memory_accounting
),
222 prefix
, yes_no(c
->tasks_accounting
),
223 prefix
, yes_no(c
->ip_accounting
),
224 prefix
, c
->cpu_weight
,
225 prefix
, c
->startup_cpu_weight
,
226 prefix
, c
->cpu_shares
,
227 prefix
, c
->startup_cpu_shares
,
228 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
229 prefix
, c
->io_weight
,
230 prefix
, c
->startup_io_weight
,
231 prefix
, c
->blockio_weight
,
232 prefix
, c
->startup_blockio_weight
,
233 prefix
, c
->memory_min
,
234 prefix
, c
->memory_low
,
235 prefix
, c
->memory_high
,
236 prefix
, c
->memory_max
,
237 prefix
, c
->memory_swap_max
,
238 prefix
, c
->memory_limit
,
239 prefix
, c
->tasks_max
,
240 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
241 prefix
, yes_no(c
->delegate
));
244 _cleanup_free_
char *t
= NULL
;
246 (void) cg_mask_to_string(c
->delegate_controllers
, &t
);
248 fprintf(f
, "%sDelegateControllers=%s\n",
253 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
255 "%sDeviceAllow=%s %s%s%s\n",
258 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
260 LIST_FOREACH(device_weights
, iw
, c
->io_device_weights
)
262 "%sIODeviceWeight=%s %" PRIu64
"\n",
267 LIST_FOREACH(device_latencies
, l
, c
->io_device_latencies
)
269 "%sIODeviceLatencyTargetSec=%s %s\n",
272 format_timespan(u
, sizeof(u
), l
->target_usec
, 1));
274 LIST_FOREACH(device_limits
, il
, c
->io_device_limits
) {
275 char buf
[FORMAT_BYTES_MAX
];
276 CGroupIOLimitType type
;
278 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
279 if (il
->limits
[type
] != cgroup_io_limit_defaults
[type
])
283 cgroup_io_limit_type_to_string(type
),
285 format_bytes(buf
, sizeof(buf
), il
->limits
[type
]));
288 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
290 "%sBlockIODeviceWeight=%s %" PRIu64
,
295 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
296 char buf
[FORMAT_BYTES_MAX
];
298 if (b
->rbps
!= CGROUP_LIMIT_MAX
)
300 "%sBlockIOReadBandwidth=%s %s\n",
303 format_bytes(buf
, sizeof(buf
), b
->rbps
));
304 if (b
->wbps
!= CGROUP_LIMIT_MAX
)
306 "%sBlockIOWriteBandwidth=%s %s\n",
309 format_bytes(buf
, sizeof(buf
), b
->wbps
));
312 LIST_FOREACH(items
, iaai
, c
->ip_address_allow
) {
313 _cleanup_free_
char *k
= NULL
;
315 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
316 fprintf(f
, "%sIPAddressAllow=%s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
319 LIST_FOREACH(items
, iaai
, c
->ip_address_deny
) {
320 _cleanup_free_
char *k
= NULL
;
322 (void) in_addr_to_string(iaai
->family
, &iaai
->address
, &k
);
323 fprintf(f
, "%sIPAddressDeny=%s/%u\n", prefix
, strnull(k
), iaai
->prefixlen
);
327 int cgroup_add_device_allow(CGroupContext
*c
, const char *dev
, const char *mode
) {
328 _cleanup_free_ CGroupDeviceAllow
*a
= NULL
;
329 _cleanup_free_
char *d
= NULL
;
333 assert(isempty(mode
) || in_charset(mode
, "rwm"));
335 a
= new(CGroupDeviceAllow
, 1);
343 *a
= (CGroupDeviceAllow
) {
345 .r
= isempty(mode
) || !!strchr(mode
, 'r'),
346 .w
= isempty(mode
) || !!strchr(mode
, 'w'),
347 .m
= isempty(mode
) || !!strchr(mode
, 'm'),
350 LIST_PREPEND(device_allow
, c
->device_allow
, a
);
356 static int lookup_block_device(const char *p
, dev_t
*ret
) {
363 if (stat(p
, &st
) < 0)
364 return log_warning_errno(errno
, "Couldn't stat device '%s': %m", p
);
366 if (S_ISBLK(st
.st_mode
))
368 else if (major(st
.st_dev
) != 0)
369 *ret
= st
.st_dev
; /* If this is not a device node then use the block device this file is stored on */
371 /* If this is btrfs, getting the backing block device is a bit harder */
372 r
= btrfs_get_block_device(p
, ret
);
373 if (r
< 0 && r
!= -ENOTTY
)
374 return log_warning_errno(r
, "Failed to determine block device backing btrfs file system '%s': %m", p
);
376 log_warning("'%s' is not a block device node, and file system block device cannot be determined or is not local.", p
);
381 /* If this is a LUKS device, try to get the originating block device */
382 (void) block_get_originating(*ret
, ret
);
384 /* If this is a partition, try to get the originating block device */
385 (void) block_get_whole_disk(*ret
, ret
);
389 static int whitelist_device(const char *path
, const char *node
, const char *acc
) {
390 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
392 bool ignore_notfound
;
398 if (node
[0] == '-') {
399 /* Non-existent paths starting with "-" must be silently ignored */
401 ignore_notfound
= true;
403 ignore_notfound
= false;
405 if (stat(node
, &st
) < 0) {
406 if (errno
== ENOENT
&& ignore_notfound
)
409 return log_warning_errno(errno
, "Couldn't stat device %s: %m", node
);
412 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
413 log_warning("%s is not a device.", node
);
419 S_ISCHR(st
.st_mode
) ? 'c' : 'b',
420 major(st
.st_rdev
), minor(st
.st_rdev
),
423 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
425 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
426 "Failed to set devices.allow on %s: %m", path
);
431 static int whitelist_major(const char *path
, const char *name
, char type
, const char *acc
) {
432 _cleanup_fclose_
FILE *f
= NULL
;
439 assert(IN_SET(type
, 'b', 'c'));
441 f
= fopen("/proc/devices", "re");
443 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
445 FOREACH_LINE(line
, f
, goto fail
) {
446 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4], *p
, *w
;
451 if (type
== 'c' && streq(line
, "Character devices:")) {
456 if (type
== 'b' && streq(line
, "Block devices:")) {
471 w
= strpbrk(p
, WHITESPACE
);
476 r
= safe_atou(p
, &maj
);
483 w
+= strspn(w
, WHITESPACE
);
485 if (fnmatch(name
, w
, 0) != 0)
494 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
496 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
497 "Failed to set devices.allow on %s: %m", path
);
503 return log_warning_errno(errno
, "Failed to read /proc/devices: %m");
506 static bool cgroup_context_has_cpu_weight(CGroupContext
*c
) {
507 return c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
||
508 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
;
511 static bool cgroup_context_has_cpu_shares(CGroupContext
*c
) {
512 return c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
513 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
;
516 static uint64_t cgroup_context_cpu_weight(CGroupContext
*c
, ManagerState state
) {
517 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
518 c
->startup_cpu_weight
!= CGROUP_WEIGHT_INVALID
)
519 return c
->startup_cpu_weight
;
520 else if (c
->cpu_weight
!= CGROUP_WEIGHT_INVALID
)
521 return c
->cpu_weight
;
523 return CGROUP_WEIGHT_DEFAULT
;
526 static uint64_t cgroup_context_cpu_shares(CGroupContext
*c
, ManagerState state
) {
527 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
528 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
529 return c
->startup_cpu_shares
;
530 else if (c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
)
531 return c
->cpu_shares
;
533 return CGROUP_CPU_SHARES_DEFAULT
;
536 static void cgroup_apply_unified_cpu_config(Unit
*u
, uint64_t weight
, uint64_t quota
) {
537 char buf
[MAX(DECIMAL_STR_MAX(uint64_t) + 1, (DECIMAL_STR_MAX(usec_t
) + 1) * 2)];
540 xsprintf(buf
, "%" PRIu64
"\n", weight
);
541 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.weight", buf
);
543 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
544 "Failed to set cpu.weight: %m");
546 if (quota
!= USEC_INFINITY
)
547 xsprintf(buf
, USEC_FMT
" " USEC_FMT
"\n",
548 quota
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
, CGROUP_CPU_QUOTA_PERIOD_USEC
);
550 xsprintf(buf
, "max " USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
552 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.max", buf
);
555 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
556 "Failed to set cpu.max: %m");
559 static void cgroup_apply_legacy_cpu_config(Unit
*u
, uint64_t shares
, uint64_t quota
) {
560 char buf
[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t
)) + 1];
563 xsprintf(buf
, "%" PRIu64
"\n", shares
);
564 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.shares", buf
);
566 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
567 "Failed to set cpu.shares: %m");
569 xsprintf(buf
, USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
570 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.cfs_period_us", buf
);
572 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
573 "Failed to set cpu.cfs_period_us: %m");
575 if (quota
!= USEC_INFINITY
) {
576 xsprintf(buf
, USEC_FMT
"\n", quota
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
);
577 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.cfs_quota_us", buf
);
579 r
= cg_set_attribute("cpu", u
->cgroup_path
, "cpu.cfs_quota_us", "-1");
581 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
582 "Failed to set cpu.cfs_quota_us: %m");
585 static uint64_t cgroup_cpu_shares_to_weight(uint64_t shares
) {
586 return CLAMP(shares
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_CPU_SHARES_DEFAULT
,
587 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
590 static uint64_t cgroup_cpu_weight_to_shares(uint64_t weight
) {
591 return CLAMP(weight
* CGROUP_CPU_SHARES_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
592 CGROUP_CPU_SHARES_MIN
, CGROUP_CPU_SHARES_MAX
);
595 static bool cgroup_context_has_io_config(CGroupContext
*c
) {
596 return c
->io_accounting
||
597 c
->io_weight
!= CGROUP_WEIGHT_INVALID
||
598 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
||
599 c
->io_device_weights
||
600 c
->io_device_latencies
||
604 static bool cgroup_context_has_blockio_config(CGroupContext
*c
) {
605 return c
->blockio_accounting
||
606 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
607 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
608 c
->blockio_device_weights
||
609 c
->blockio_device_bandwidths
;
612 static uint64_t cgroup_context_io_weight(CGroupContext
*c
, ManagerState state
) {
613 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
614 c
->startup_io_weight
!= CGROUP_WEIGHT_INVALID
)
615 return c
->startup_io_weight
;
616 else if (c
->io_weight
!= CGROUP_WEIGHT_INVALID
)
619 return CGROUP_WEIGHT_DEFAULT
;
622 static uint64_t cgroup_context_blkio_weight(CGroupContext
*c
, ManagerState state
) {
623 if (IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) &&
624 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
625 return c
->startup_blockio_weight
;
626 else if (c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
)
627 return c
->blockio_weight
;
629 return CGROUP_BLKIO_WEIGHT_DEFAULT
;
632 static uint64_t cgroup_weight_blkio_to_io(uint64_t blkio_weight
) {
633 return CLAMP(blkio_weight
* CGROUP_WEIGHT_DEFAULT
/ CGROUP_BLKIO_WEIGHT_DEFAULT
,
634 CGROUP_WEIGHT_MIN
, CGROUP_WEIGHT_MAX
);
637 static uint64_t cgroup_weight_io_to_blkio(uint64_t io_weight
) {
638 return CLAMP(io_weight
* CGROUP_BLKIO_WEIGHT_DEFAULT
/ CGROUP_WEIGHT_DEFAULT
,
639 CGROUP_BLKIO_WEIGHT_MIN
, CGROUP_BLKIO_WEIGHT_MAX
);
642 static void cgroup_apply_io_device_weight(Unit
*u
, const char *dev_path
, uint64_t io_weight
) {
643 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
647 r
= lookup_block_device(dev_path
, &dev
);
651 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), io_weight
);
652 r
= cg_set_attribute("io", u
->cgroup_path
, "io.weight", buf
);
654 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
655 "Failed to set io.weight: %m");
658 static void cgroup_apply_blkio_device_weight(Unit
*u
, const char *dev_path
, uint64_t blkio_weight
) {
659 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
663 r
= lookup_block_device(dev_path
, &dev
);
667 xsprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), blkio_weight
);
668 r
= cg_set_attribute("blkio", u
->cgroup_path
, "blkio.weight_device", buf
);
670 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
671 "Failed to set blkio.weight_device: %m");
674 static void cgroup_apply_io_device_latency(Unit
*u
, const char *dev_path
, usec_t target
) {
675 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+7+DECIMAL_STR_MAX(uint64_t)+1];
679 r
= lookup_block_device(dev_path
, &dev
);
683 if (target
!= USEC_INFINITY
)
684 xsprintf(buf
, "%u:%u target=%" PRIu64
"\n", major(dev
), minor(dev
), target
);
686 xsprintf(buf
, "%u:%u target=max\n", major(dev
), minor(dev
));
688 r
= cg_set_attribute("io", u
->cgroup_path
, "io.latency", buf
);
690 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
691 "Failed to set io.latency on cgroup %s: %m", u
->cgroup_path
);
694 static void cgroup_apply_io_device_limit(Unit
*u
, const char *dev_path
, uint64_t *limits
) {
695 char limit_bufs
[_CGROUP_IO_LIMIT_TYPE_MAX
][DECIMAL_STR_MAX(uint64_t)];
696 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+(6+DECIMAL_STR_MAX(uint64_t)+1)*4];
697 CGroupIOLimitType type
;
701 r
= lookup_block_device(dev_path
, &dev
);
705 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
706 if (limits
[type
] != cgroup_io_limit_defaults
[type
])
707 xsprintf(limit_bufs
[type
], "%" PRIu64
, limits
[type
]);
709 xsprintf(limit_bufs
[type
], "%s", limits
[type
] == CGROUP_LIMIT_MAX
? "max" : "0");
711 xsprintf(buf
, "%u:%u rbps=%s wbps=%s riops=%s wiops=%s\n", major(dev
), minor(dev
),
712 limit_bufs
[CGROUP_IO_RBPS_MAX
], limit_bufs
[CGROUP_IO_WBPS_MAX
],
713 limit_bufs
[CGROUP_IO_RIOPS_MAX
], limit_bufs
[CGROUP_IO_WIOPS_MAX
]);
714 r
= cg_set_attribute("io", u
->cgroup_path
, "io.max", buf
);
716 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
717 "Failed to set io.max: %m");
720 static void cgroup_apply_blkio_device_limit(Unit
*u
, const char *dev_path
, uint64_t rbps
, uint64_t wbps
) {
721 char buf
[DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1];
725 r
= lookup_block_device(dev_path
, &dev
);
729 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), rbps
);
730 r
= cg_set_attribute("blkio", u
->cgroup_path
, "blkio.throttle.read_bps_device", buf
);
732 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
733 "Failed to set blkio.throttle.read_bps_device: %m");
735 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), wbps
);
736 r
= cg_set_attribute("blkio", u
->cgroup_path
, "blkio.throttle.write_bps_device", buf
);
738 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
739 "Failed to set blkio.throttle.write_bps_device: %m");
742 static bool cgroup_context_has_unified_memory_config(CGroupContext
*c
) {
743 return c
->memory_min
> 0 || c
->memory_low
> 0 || c
->memory_high
!= CGROUP_LIMIT_MAX
|| c
->memory_max
!= CGROUP_LIMIT_MAX
|| c
->memory_swap_max
!= CGROUP_LIMIT_MAX
;
746 static void cgroup_apply_unified_memory_limit(Unit
*u
, const char *file
, uint64_t v
) {
747 char buf
[DECIMAL_STR_MAX(uint64_t) + 1] = "max";
750 if (v
!= CGROUP_LIMIT_MAX
)
751 xsprintf(buf
, "%" PRIu64
"\n", v
);
753 r
= cg_set_attribute("memory", u
->cgroup_path
, file
, buf
);
755 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
756 "Failed to set %s: %m", file
);
759 static void cgroup_apply_firewall(Unit
*u
) {
762 /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
764 if (bpf_firewall_compile(u
) < 0)
767 (void) bpf_firewall_install(u
);
770 static void cgroup_context_apply(
772 CGroupMask apply_mask
,
774 ManagerState state
) {
783 /* Nothing to do? Exit early! */
784 if (apply_mask
== 0 && !apply_bpf
)
787 /* Some cgroup attributes are not supported on the root cgroup, hence silently ignore */
788 is_root
= unit_has_root_cgroup(u
);
790 assert_se(c
= unit_get_cgroup_context(u
));
791 assert_se(path
= u
->cgroup_path
);
793 if (is_root
) /* Make sure we don't try to display messages with an empty path. */
796 /* We generally ignore errors caused by read-only mounted
797 * cgroup trees (assuming we are running in a container then),
798 * and missing cgroups, i.e. EROFS and ENOENT. */
800 if ((apply_mask
& CGROUP_MASK_CPU
) && !is_root
) {
801 bool has_weight
, has_shares
;
803 has_weight
= cgroup_context_has_cpu_weight(c
);
804 has_shares
= cgroup_context_has_cpu_shares(c
);
806 if (cg_all_unified() > 0) {
810 weight
= cgroup_context_cpu_weight(c
, state
);
811 else if (has_shares
) {
812 uint64_t shares
= cgroup_context_cpu_shares(c
, state
);
814 weight
= cgroup_cpu_shares_to_weight(shares
);
816 log_cgroup_compat(u
, "Applying [Startup]CpuShares %" PRIu64
" as [Startup]CpuWeight %" PRIu64
" on %s",
817 shares
, weight
, path
);
819 weight
= CGROUP_WEIGHT_DEFAULT
;
821 cgroup_apply_unified_cpu_config(u
, weight
, c
->cpu_quota_per_sec_usec
);
826 uint64_t weight
= cgroup_context_cpu_weight(c
, state
);
828 shares
= cgroup_cpu_weight_to_shares(weight
);
830 log_cgroup_compat(u
, "Applying [Startup]CpuWeight %" PRIu64
" as [Startup]CpuShares %" PRIu64
" on %s",
831 weight
, shares
, path
);
832 } else if (has_shares
)
833 shares
= cgroup_context_cpu_shares(c
, state
);
835 shares
= CGROUP_CPU_SHARES_DEFAULT
;
837 cgroup_apply_legacy_cpu_config(u
, shares
, c
->cpu_quota_per_sec_usec
);
841 if (apply_mask
& CGROUP_MASK_IO
) {
842 bool has_io
= cgroup_context_has_io_config(c
);
843 bool has_blockio
= cgroup_context_has_blockio_config(c
);
846 char buf
[8+DECIMAL_STR_MAX(uint64_t)+1];
850 weight
= cgroup_context_io_weight(c
, state
);
851 else if (has_blockio
) {
852 uint64_t blkio_weight
= cgroup_context_blkio_weight(c
, state
);
854 weight
= cgroup_weight_blkio_to_io(blkio_weight
);
856 log_cgroup_compat(u
, "Applying [Startup]BlockIOWeight %" PRIu64
" as [Startup]IOWeight %" PRIu64
,
857 blkio_weight
, weight
);
859 weight
= CGROUP_WEIGHT_DEFAULT
;
861 xsprintf(buf
, "default %" PRIu64
"\n", weight
);
862 r
= cg_set_attribute("io", path
, "io.weight", buf
);
864 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
865 "Failed to set io.weight: %m");
868 CGroupIODeviceWeight
*w
;
870 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
)
871 cgroup_apply_io_device_weight(u
, w
->path
, w
->weight
);
872 } else if (has_blockio
) {
873 CGroupBlockIODeviceWeight
*w
;
875 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
876 weight
= cgroup_weight_blkio_to_io(w
->weight
);
878 log_cgroup_compat(u
, "Applying BlockIODeviceWeight %" PRIu64
" as IODeviceWeight %" PRIu64
" for %s",
879 w
->weight
, weight
, w
->path
);
881 cgroup_apply_io_device_weight(u
, w
->path
, weight
);
886 CGroupIODeviceLatency
*l
;
888 LIST_FOREACH(device_latencies
, l
, c
->io_device_latencies
)
889 cgroup_apply_io_device_latency(u
, l
->path
, l
->target_usec
);
894 CGroupIODeviceLimit
*l
;
896 LIST_FOREACH(device_limits
, l
, c
->io_device_limits
)
897 cgroup_apply_io_device_limit(u
, l
->path
, l
->limits
);
899 } else if (has_blockio
) {
900 CGroupBlockIODeviceBandwidth
*b
;
902 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
903 uint64_t limits
[_CGROUP_IO_LIMIT_TYPE_MAX
];
904 CGroupIOLimitType type
;
906 for (type
= 0; type
< _CGROUP_IO_LIMIT_TYPE_MAX
; type
++)
907 limits
[type
] = cgroup_io_limit_defaults
[type
];
909 limits
[CGROUP_IO_RBPS_MAX
] = b
->rbps
;
910 limits
[CGROUP_IO_WBPS_MAX
] = b
->wbps
;
912 log_cgroup_compat(u
, "Applying BlockIO{Read|Write}Bandwidth %" PRIu64
" %" PRIu64
" as IO{Read|Write}BandwidthMax for %s",
913 b
->rbps
, b
->wbps
, b
->path
);
915 cgroup_apply_io_device_limit(u
, b
->path
, limits
);
920 if (apply_mask
& CGROUP_MASK_BLKIO
) {
921 bool has_io
= cgroup_context_has_io_config(c
);
922 bool has_blockio
= cgroup_context_has_blockio_config(c
);
925 char buf
[DECIMAL_STR_MAX(uint64_t)+1];
929 uint64_t io_weight
= cgroup_context_io_weight(c
, state
);
931 weight
= cgroup_weight_io_to_blkio(cgroup_context_io_weight(c
, state
));
933 log_cgroup_compat(u
, "Applying [Startup]IOWeight %" PRIu64
" as [Startup]BlockIOWeight %" PRIu64
,
935 } else if (has_blockio
)
936 weight
= cgroup_context_blkio_weight(c
, state
);
938 weight
= CGROUP_BLKIO_WEIGHT_DEFAULT
;
940 xsprintf(buf
, "%" PRIu64
"\n", weight
);
941 r
= cg_set_attribute("blkio", path
, "blkio.weight", buf
);
943 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
944 "Failed to set blkio.weight: %m");
947 CGroupIODeviceWeight
*w
;
949 LIST_FOREACH(device_weights
, w
, c
->io_device_weights
) {
950 weight
= cgroup_weight_io_to_blkio(w
->weight
);
952 log_cgroup_compat(u
, "Applying IODeviceWeight %" PRIu64
" as BlockIODeviceWeight %" PRIu64
" for %s",
953 w
->weight
, weight
, w
->path
);
955 cgroup_apply_blkio_device_weight(u
, w
->path
, weight
);
957 } else if (has_blockio
) {
958 CGroupBlockIODeviceWeight
*w
;
960 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
961 cgroup_apply_blkio_device_weight(u
, w
->path
, w
->weight
);
966 CGroupIODeviceLimit
*l
;
968 LIST_FOREACH(device_limits
, l
, c
->io_device_limits
) {
969 log_cgroup_compat(u
, "Applying IO{Read|Write}Bandwidth %" PRIu64
" %" PRIu64
" as BlockIO{Read|Write}BandwidthMax for %s",
970 l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
], l
->path
);
972 cgroup_apply_blkio_device_limit(u
, l
->path
, l
->limits
[CGROUP_IO_RBPS_MAX
], l
->limits
[CGROUP_IO_WBPS_MAX
]);
974 } else if (has_blockio
) {
975 CGroupBlockIODeviceBandwidth
*b
;
977 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
)
978 cgroup_apply_blkio_device_limit(u
, b
->path
, b
->rbps
, b
->wbps
);
982 if ((apply_mask
& CGROUP_MASK_MEMORY
) && !is_root
) {
983 if (cg_all_unified() > 0) {
984 uint64_t max
, swap_max
= CGROUP_LIMIT_MAX
;
986 if (cgroup_context_has_unified_memory_config(c
)) {
988 swap_max
= c
->memory_swap_max
;
990 max
= c
->memory_limit
;
992 if (max
!= CGROUP_LIMIT_MAX
)
993 log_cgroup_compat(u
, "Applying MemoryLimit %" PRIu64
" as MemoryMax", max
);
996 cgroup_apply_unified_memory_limit(u
, "memory.min", c
->memory_min
);
997 cgroup_apply_unified_memory_limit(u
, "memory.low", c
->memory_low
);
998 cgroup_apply_unified_memory_limit(u
, "memory.high", c
->memory_high
);
999 cgroup_apply_unified_memory_limit(u
, "memory.max", max
);
1000 cgroup_apply_unified_memory_limit(u
, "memory.swap.max", swap_max
);
1002 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
1005 if (cgroup_context_has_unified_memory_config(c
)) {
1006 val
= c
->memory_max
;
1007 log_cgroup_compat(u
, "Applying MemoryMax %" PRIi64
" as MemoryLimit", val
);
1009 val
= c
->memory_limit
;
1011 if (val
== CGROUP_LIMIT_MAX
)
1012 strncpy(buf
, "-1\n", sizeof(buf
));
1014 xsprintf(buf
, "%" PRIu64
"\n", val
);
1016 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", buf
);
1018 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1019 "Failed to set memory.limit_in_bytes: %m");
1023 if ((apply_mask
& CGROUP_MASK_DEVICES
) && !is_root
) {
1024 CGroupDeviceAllow
*a
;
1026 /* Changing the devices list of a populated cgroup
1027 * might result in EINVAL, hence ignore EINVAL
1030 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
1031 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
1033 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
1035 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1036 "Failed to reset devices.list: %m");
1038 if (c
->device_policy
== CGROUP_CLOSED
||
1039 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
1040 static const char auto_devices
[] =
1041 "/dev/null\0" "rwm\0"
1042 "/dev/zero\0" "rwm\0"
1043 "/dev/full\0" "rwm\0"
1044 "/dev/random\0" "rwm\0"
1045 "/dev/urandom\0" "rwm\0"
1046 "/dev/tty\0" "rwm\0"
1047 "/dev/ptmx\0" "rwm\0"
1048 /* Allow /run/systemd/inaccessible/{chr,blk} devices for mapping InaccessiblePaths */
1049 "-/run/systemd/inaccessible/chr\0" "rwm\0"
1050 "-/run/systemd/inaccessible/blk\0" "rwm\0";
1054 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
1055 whitelist_device(path
, x
, y
);
1057 /* PTS (/dev/pts) devices may not be duplicated, but accessed */
1058 whitelist_major(path
, "pts", 'c', "rw");
1061 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
1077 if (path_startswith(a
->path
, "/dev/"))
1078 whitelist_device(path
, a
->path
, acc
);
1079 else if ((val
= startswith(a
->path
, "block-")))
1080 whitelist_major(path
, val
, 'b', acc
);
1081 else if ((val
= startswith(a
->path
, "char-")))
1082 whitelist_major(path
, val
, 'c', acc
);
1084 log_unit_debug(u
, "Ignoring device %s while writing cgroup attribute.", a
->path
);
1088 if (apply_mask
& CGROUP_MASK_PIDS
) {
1091 /* So, the "pids" controller does not expose anything on the root cgroup, in order not to
1092 * replicate knobs exposed elsewhere needlessly. We abstract this away here however, and when
1093 * the knobs of the root cgroup are modified propagate this to the relevant sysctls. There's a
1094 * non-obvious asymmetry however: unlike the cgroup properties we don't really want to take
1095 * exclusive ownership of the sysctls, but we still want to honour things if the user sets
1096 * limits. Hence we employ sort of a one-way strategy: when the user sets a bounded limit
1097 * through us it counts. When the user afterwards unsets it again (i.e. sets it to unbounded)
1098 * it also counts. But if the user never set a limit through us (i.e. we are the default of
1099 * "unbounded") we leave things unmodified. For this we manage a global boolean that we turn on
1100 * the first time we set a limit. Note that this boolean is flushed out on manager reload,
1101 * which is desirable so that there's an offical way to release control of the sysctl from
1102 * systemd: set the limit to unbounded and reload. */
1104 if (c
->tasks_max
!= CGROUP_LIMIT_MAX
) {
1105 u
->manager
->sysctl_pid_max_changed
= true;
1106 r
= procfs_tasks_set_limit(c
->tasks_max
);
1107 } else if (u
->manager
->sysctl_pid_max_changed
)
1108 r
= procfs_tasks_set_limit(TASKS_MAX
);
1113 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1114 "Failed to write to tasks limit sysctls: %m");
1117 if (c
->tasks_max
!= CGROUP_LIMIT_MAX
) {
1118 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
1120 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
1121 r
= cg_set_attribute("pids", path
, "pids.max", buf
);
1123 r
= cg_set_attribute("pids", path
, "pids.max", "max");
1125 log_unit_full(u
, IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
1126 "Failed to set pids.max: %m");
1131 cgroup_apply_firewall(u
);
1134 CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
1135 CGroupMask mask
= 0;
1137 /* Figure out which controllers we need */
1139 if (c
->cpu_accounting
||
1140 cgroup_context_has_cpu_weight(c
) ||
1141 cgroup_context_has_cpu_shares(c
) ||
1142 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
1143 mask
|= CGROUP_MASK_CPUACCT
| CGROUP_MASK_CPU
;
1145 if (cgroup_context_has_io_config(c
) || cgroup_context_has_blockio_config(c
))
1146 mask
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
1148 if (c
->memory_accounting
||
1149 c
->memory_limit
!= CGROUP_LIMIT_MAX
||
1150 cgroup_context_has_unified_memory_config(c
))
1151 mask
|= CGROUP_MASK_MEMORY
;
1153 if (c
->device_allow
||
1154 c
->device_policy
!= CGROUP_AUTO
)
1155 mask
|= CGROUP_MASK_DEVICES
;
1157 if (c
->tasks_accounting
||
1158 c
->tasks_max
!= CGROUP_LIMIT_MAX
)
1159 mask
|= CGROUP_MASK_PIDS
;
1164 CGroupMask
unit_get_own_mask(Unit
*u
) {
1167 /* Returns the mask of controllers the unit needs for itself */
1169 c
= unit_get_cgroup_context(u
);
1173 return cgroup_context_get_mask(c
) | unit_get_delegate_mask(u
);
1176 CGroupMask
unit_get_delegate_mask(Unit
*u
) {
1179 /* If delegation is turned on, then turn on selected controllers, unless we are on the legacy hierarchy and the
1180 * process we fork into is known to drop privileges, and hence shouldn't get access to the controllers.
1182 * Note that on the unified hierarchy it is safe to delegate controllers to unprivileged services. */
1184 if (!unit_cgroup_delegate(u
))
1187 if (cg_all_unified() <= 0) {
1190 e
= unit_get_exec_context(u
);
1191 if (e
&& !exec_context_maintains_privileges(e
))
1195 assert_se(c
= unit_get_cgroup_context(u
));
1196 return c
->delegate_controllers
;
1199 CGroupMask
unit_get_members_mask(Unit
*u
) {
1202 /* Returns the mask of controllers all of the unit's children require, merged */
1204 if (u
->cgroup_members_mask_valid
)
1205 return u
->cgroup_members_mask
;
1207 u
->cgroup_members_mask
= 0;
1209 if (u
->type
== UNIT_SLICE
) {
1214 HASHMAP_FOREACH_KEY(v
, member
, u
->dependencies
[UNIT_BEFORE
], i
) {
1219 if (UNIT_DEREF(member
->slice
) != u
)
1222 u
->cgroup_members_mask
|= unit_get_subtree_mask(member
); /* note that this calls ourselves again, for the children */
1226 u
->cgroup_members_mask_valid
= true;
1227 return u
->cgroup_members_mask
;
1230 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
1233 /* Returns the mask of controllers all of the unit's siblings
1234 * require, i.e. the members mask of the unit's parent slice
1235 * if there is one. */
1237 if (UNIT_ISSET(u
->slice
))
1238 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
1240 return unit_get_subtree_mask(u
); /* we are the top-level slice */
1243 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
1245 /* Returns the mask of this subtree, meaning of the group
1246 * itself and its children. */
1248 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
1251 CGroupMask
unit_get_target_mask(Unit
*u
) {
1254 /* This returns the cgroup mask of all controllers to enable
1255 * for a specific cgroup, i.e. everything it needs itself,
1256 * plus all that its children need, plus all that its siblings
1257 * need. This is primarily useful on the legacy cgroup
1258 * hierarchy, where we need to duplicate each cgroup in each
1259 * hierarchy that shall be enabled for it. */
1261 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
1262 mask
&= u
->manager
->cgroup_supported
;
1267 CGroupMask
unit_get_enable_mask(Unit
*u
) {
1270 /* This returns the cgroup mask of all controllers to enable
1271 * for the children of a specific cgroup. This is primarily
1272 * useful for the unified cgroup hierarchy, where each cgroup
1273 * controls which controllers are enabled for its children. */
1275 mask
= unit_get_members_mask(u
);
1276 mask
&= u
->manager
->cgroup_supported
;
1281 bool unit_get_needs_bpf(Unit
*u
) {
1286 c
= unit_get_cgroup_context(u
);
1290 if (c
->ip_accounting
||
1291 c
->ip_address_allow
||
1295 /* If any parent slice has an IP access list defined, it applies too */
1296 for (p
= UNIT_DEREF(u
->slice
); p
; p
= UNIT_DEREF(p
->slice
)) {
1297 c
= unit_get_cgroup_context(p
);
1301 if (c
->ip_address_allow
||
1309 /* Recurse from a unit up through its containing slices, propagating
1310 * mask bits upward. A unit is also member of itself. */
1311 void unit_update_cgroup_members_masks(Unit
*u
) {
1317 /* Calculate subtree mask */
1318 m
= unit_get_subtree_mask(u
);
1320 /* See if anything changed from the previous invocation. If
1321 * not, we're done. */
1322 if (u
->cgroup_subtree_mask_valid
&& m
== u
->cgroup_subtree_mask
)
1326 u
->cgroup_subtree_mask_valid
&&
1327 ((m
& ~u
->cgroup_subtree_mask
) != 0) &&
1328 ((~m
& u
->cgroup_subtree_mask
) == 0);
1330 u
->cgroup_subtree_mask
= m
;
1331 u
->cgroup_subtree_mask_valid
= true;
1333 if (UNIT_ISSET(u
->slice
)) {
1334 Unit
*s
= UNIT_DEREF(u
->slice
);
1337 /* There's more set now than before. We
1338 * propagate the new mask to the parent's mask
1339 * (not caring if it actually was valid or
1342 s
->cgroup_members_mask
|= m
;
1345 /* There's less set now than before (or we
1346 * don't know), we need to recalculate
1347 * everything, so let's invalidate the
1348 * parent's members mask */
1350 s
->cgroup_members_mask_valid
= false;
1352 /* And now make sure that this change also hits our
1354 unit_update_cgroup_members_masks(s
);
1358 const char *unit_get_realized_cgroup_path(Unit
*u
, CGroupMask mask
) {
1360 /* Returns the realized cgroup path of the specified unit where all specified controllers are available. */
1364 if (u
->cgroup_path
&&
1365 u
->cgroup_realized
&&
1366 FLAGS_SET(u
->cgroup_realized_mask
, mask
))
1367 return u
->cgroup_path
;
1369 u
= UNIT_DEREF(u
->slice
);
1375 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
1376 return unit_get_realized_cgroup_path(userdata
, mask
);
1379 char *unit_default_cgroup_path(Unit
*u
) {
1380 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
1385 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1386 return strdup(u
->manager
->cgroup_root
);
1388 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
1389 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
1394 escaped
= cg_escape(u
->id
);
1399 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/",
1402 return strjoin(u
->manager
->cgroup_root
, "/", escaped
);
1405 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
1406 _cleanup_free_
char *p
= NULL
;
1418 if (streq_ptr(u
->cgroup_path
, p
))
1422 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
1427 unit_release_cgroup(u
);
1429 u
->cgroup_path
= TAKE_PTR(p
);
1434 int unit_watch_cgroup(Unit
*u
) {
1435 _cleanup_free_
char *events
= NULL
;
1440 if (!u
->cgroup_path
)
1443 if (u
->cgroup_inotify_wd
>= 0)
1446 /* Only applies to the unified hierarchy */
1447 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
1449 return log_error_errno(r
, "Failed to determine whether the name=systemd hierarchy is unified: %m");
1453 /* Don't watch the root slice, it's pointless. */
1454 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1457 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
1461 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events", &events
);
1465 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, events
, IN_MODIFY
);
1466 if (u
->cgroup_inotify_wd
< 0) {
1468 if (errno
== ENOENT
) /* If the directory is already
1469 * gone we don't need to track
1470 * it, so this is not an error */
1473 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
1476 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
1478 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
1483 int unit_pick_cgroup_path(Unit
*u
) {
1484 _cleanup_free_
char *path
= NULL
;
1492 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1495 path
= unit_default_cgroup_path(u
);
1499 r
= unit_set_cgroup_path(u
, path
);
1501 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
1503 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
1508 static int unit_create_cgroup(
1510 CGroupMask target_mask
,
1511 CGroupMask enable_mask
,
1520 c
= unit_get_cgroup_context(u
);
1524 /* Figure out our cgroup path */
1525 r
= unit_pick_cgroup_path(u
);
1529 /* First, create our own group */
1530 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
1532 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
1535 /* Start watching it */
1536 (void) unit_watch_cgroup(u
);
1538 /* Preserve enabled controllers in delegated units, adjust others. */
1539 if (created
|| !unit_cgroup_delegate(u
)) {
1541 /* Enable all controllers we need */
1542 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
);
1544 log_unit_warning_errno(u
, r
, "Failed to enable controllers on cgroup %s, ignoring: %m",
1548 /* Keep track that this is now realized */
1549 u
->cgroup_realized
= true;
1550 u
->cgroup_realized_mask
= target_mask
;
1551 u
->cgroup_enabled_mask
= enable_mask
;
1552 u
->cgroup_bpf_state
= needs_bpf
? UNIT_CGROUP_BPF_ON
: UNIT_CGROUP_BPF_OFF
;
1554 if (u
->type
!= UNIT_SLICE
&& !unit_cgroup_delegate(u
)) {
1556 /* Then, possibly move things over, but not if
1557 * subgroups may contain processes, which is the case
1558 * for slice and delegation units. */
1559 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
1561 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
1567 static int unit_attach_pid_to_cgroup_via_bus(Unit
*u
, pid_t pid
, const char *suffix_path
) {
1568 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1574 if (MANAGER_IS_SYSTEM(u
->manager
))
1577 if (!u
->manager
->system_bus
)
1580 if (!u
->cgroup_path
)
1583 /* Determine this unit's cgroup path relative to our cgroup root */
1584 pp
= path_startswith(u
->cgroup_path
, u
->manager
->cgroup_root
);
1588 pp
= strjoina("/", pp
, suffix_path
);
1589 path_simplify(pp
, false);
1591 r
= sd_bus_call_method(u
->manager
->system_bus
,
1592 "org.freedesktop.systemd1",
1593 "/org/freedesktop/systemd1",
1594 "org.freedesktop.systemd1.Manager",
1595 "AttachProcessesToUnit",
1598 NULL
/* empty unit name means client's unit, i.e. us */, pp
, 1, (uint32_t) pid
);
1600 return log_unit_debug_errno(u
, r
, "Failed to attach unit process " PID_FMT
" via the bus: %s", pid
, bus_error_message(&error
, r
));
1605 int unit_attach_pids_to_cgroup(Unit
*u
, Set
*pids
, const char *suffix_path
) {
1606 CGroupMask delegated_mask
;
1614 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1617 if (set_isempty(pids
))
1620 r
= unit_realize_cgroup(u
);
1624 if (isempty(suffix_path
))
1627 p
= strjoina(u
->cgroup_path
, "/", suffix_path
);
1629 delegated_mask
= unit_get_delegate_mask(u
);
1632 SET_FOREACH(pidp
, pids
, i
) {
1633 pid_t pid
= PTR_TO_PID(pidp
);
1636 /* First, attach the PID to the main cgroup hierarchy */
1637 q
= cg_attach(SYSTEMD_CGROUP_CONTROLLER
, p
, pid
);
1639 log_unit_debug_errno(u
, q
, "Couldn't move process " PID_FMT
" to requested cgroup '%s': %m", pid
, p
);
1641 if (MANAGER_IS_USER(u
->manager
) && IN_SET(q
, -EPERM
, -EACCES
)) {
1644 /* If we are in a user instance, and we can't move the process ourselves due to
1645 * permission problems, let's ask the system instance about it instead. Since it's more
1646 * privileged it might be able to move the process across the leaves of a subtree who's
1647 * top node is not owned by us. */
1649 z
= unit_attach_pid_to_cgroup_via_bus(u
, pid
, suffix_path
);
1651 log_unit_debug_errno(u
, z
, "Couldn't move process " PID_FMT
" to requested cgroup '%s' via the system bus either: %m", pid
, p
);
1653 continue; /* When the bus thing worked via the bus we are fully done for this PID. */
1657 r
= q
; /* Remember first error */
1662 q
= cg_all_unified();
1668 /* In the legacy hierarchy, attach the process to the request cgroup if possible, and if not to the
1669 * innermost realized one */
1671 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
1672 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
1673 const char *realized
;
1675 if (!(u
->manager
->cgroup_supported
& bit
))
1678 /* If this controller is delegated and realized, honour the caller's request for the cgroup suffix. */
1679 if (delegated_mask
& u
->cgroup_realized_mask
& bit
) {
1680 q
= cg_attach(cgroup_controller_to_string(c
), p
, pid
);
1682 continue; /* Success! */
1684 log_unit_debug_errno(u
, q
, "Failed to attach PID " PID_FMT
" to requested cgroup %s in controller %s, falling back to unit's cgroup: %m",
1685 pid
, p
, cgroup_controller_to_string(c
));
1688 /* So this controller is either not delegate or realized, or something else weird happened. In
1689 * that case let's attach the PID at least to the closest cgroup up the tree that is
1691 realized
= unit_get_realized_cgroup_path(u
, bit
);
1693 continue; /* Not even realized in the root slice? Then let's not bother */
1695 q
= cg_attach(cgroup_controller_to_string(c
), realized
, pid
);
1697 log_unit_debug_errno(u
, q
, "Failed to attach PID " PID_FMT
" to realized cgroup %s in controller %s, ignoring: %m",
1698 pid
, realized
, cgroup_controller_to_string(c
));
1705 static void cgroup_xattr_apply(Unit
*u
) {
1706 char ids
[SD_ID128_STRING_MAX
];
1711 if (!MANAGER_IS_SYSTEM(u
->manager
))
1714 if (sd_id128_is_null(u
->invocation_id
))
1717 r
= cg_set_xattr(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
1718 "trusted.invocation_id",
1719 sd_id128_to_string(u
->invocation_id
, ids
), 32,
1722 log_unit_debug_errno(u
, r
, "Failed to set invocation ID on control group %s, ignoring: %m", u
->cgroup_path
);
1725 static bool unit_has_mask_realized(
1727 CGroupMask target_mask
,
1728 CGroupMask enable_mask
,
1733 return u
->cgroup_realized
&&
1734 u
->cgroup_realized_mask
== target_mask
&&
1735 u
->cgroup_enabled_mask
== enable_mask
&&
1736 ((needs_bpf
&& u
->cgroup_bpf_state
== UNIT_CGROUP_BPF_ON
) ||
1737 (!needs_bpf
&& u
->cgroup_bpf_state
== UNIT_CGROUP_BPF_OFF
));
1740 static void unit_add_to_cgroup_realize_queue(Unit
*u
) {
1743 if (u
->in_cgroup_realize_queue
)
1746 LIST_PREPEND(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
1747 u
->in_cgroup_realize_queue
= true;
1750 static void unit_remove_from_cgroup_realize_queue(Unit
*u
) {
1753 if (!u
->in_cgroup_realize_queue
)
1756 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
1757 u
->in_cgroup_realize_queue
= false;
1760 /* Check if necessary controllers and attributes for a unit are in place.
1762 * If so, do nothing.
1763 * If not, create paths, move processes over, and set attributes.
1765 * Returns 0 on success and < 0 on failure. */
1766 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
1767 CGroupMask target_mask
, enable_mask
;
1768 bool needs_bpf
, apply_bpf
;
1773 unit_remove_from_cgroup_realize_queue(u
);
1775 target_mask
= unit_get_target_mask(u
);
1776 enable_mask
= unit_get_enable_mask(u
);
1777 needs_bpf
= unit_get_needs_bpf(u
);
1779 if (unit_has_mask_realized(u
, target_mask
, enable_mask
, needs_bpf
))
1782 /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
1783 * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
1784 * this will trickle down properly to cgroupfs. */
1785 apply_bpf
= needs_bpf
|| u
->cgroup_bpf_state
!= UNIT_CGROUP_BPF_OFF
;
1787 /* First, realize parents */
1788 if (UNIT_ISSET(u
->slice
)) {
1789 r
= unit_realize_cgroup_now(UNIT_DEREF(u
->slice
), state
);
1794 /* And then do the real work */
1795 r
= unit_create_cgroup(u
, target_mask
, enable_mask
, needs_bpf
);
1799 /* Finally, apply the necessary attributes. */
1800 cgroup_context_apply(u
, target_mask
, apply_bpf
, state
);
1801 cgroup_xattr_apply(u
);
1806 unsigned manager_dispatch_cgroup_realize_queue(Manager
*m
) {
1814 state
= manager_state(m
);
1816 while ((i
= m
->cgroup_realize_queue
)) {
1817 assert(i
->in_cgroup_realize_queue
);
1819 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(i
))) {
1820 /* Maybe things changed, and the unit is not actually active anymore? */
1821 unit_remove_from_cgroup_realize_queue(i
);
1825 r
= unit_realize_cgroup_now(i
, state
);
1827 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
1835 static void unit_add_siblings_to_cgroup_realize_queue(Unit
*u
) {
1838 /* This adds the siblings of the specified unit and the
1839 * siblings of all parent units to the cgroup queue. (But
1840 * neither the specified unit itself nor the parents.) */
1842 while ((slice
= UNIT_DEREF(u
->slice
))) {
1847 HASHMAP_FOREACH_KEY(v
, m
, u
->dependencies
[UNIT_BEFORE
], i
) {
1851 /* Skip units that have a dependency on the slice
1852 * but aren't actually in it. */
1853 if (UNIT_DEREF(m
->slice
) != slice
)
1856 /* No point in doing cgroup application for units
1857 * without active processes. */
1858 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
1861 /* If the unit doesn't need any new controllers
1862 * and has current ones realized, it doesn't need
1864 if (unit_has_mask_realized(m
,
1865 unit_get_target_mask(m
),
1866 unit_get_enable_mask(m
),
1867 unit_get_needs_bpf(m
)))
1870 unit_add_to_cgroup_realize_queue(m
);
1877 int unit_realize_cgroup(Unit
*u
) {
1880 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1883 /* So, here's the deal: when realizing the cgroups for this
1884 * unit, we need to first create all parents, but there's more
1885 * actually: for the weight-based controllers we also need to
1886 * make sure that all our siblings (i.e. units that are in the
1887 * same slice as we are) have cgroups, too. Otherwise, things
1888 * would become very uneven as each of their processes would
1889 * get as much resources as all our group together. This call
1890 * will synchronously create the parent cgroups, but will
1891 * defer work on the siblings to the next event loop
1894 /* Add all sibling slices to the cgroup queue. */
1895 unit_add_siblings_to_cgroup_realize_queue(u
);
1897 /* And realize this one now (and apply the values) */
1898 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
1901 void unit_release_cgroup(Unit
*u
) {
1904 /* Forgets all cgroup details for this cgroup */
1906 if (u
->cgroup_path
) {
1907 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
1908 u
->cgroup_path
= mfree(u
->cgroup_path
);
1911 if (u
->cgroup_inotify_wd
>= 0) {
1912 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
1913 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring", u
->cgroup_inotify_wd
, u
->id
);
1915 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
1916 u
->cgroup_inotify_wd
= -1;
1920 void unit_prune_cgroup(Unit
*u
) {
1926 /* Removes the cgroup, if empty and possible, and stops watching it. */
1928 if (!u
->cgroup_path
)
1931 (void) unit_get_cpu_usage(u
, NULL
); /* Cache the last CPU usage value before we destroy the cgroup */
1933 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1935 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
1937 log_unit_debug_errno(u
, r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
1944 unit_release_cgroup(u
);
1946 u
->cgroup_realized
= false;
1947 u
->cgroup_realized_mask
= 0;
1948 u
->cgroup_enabled_mask
= 0;
1951 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
1952 _cleanup_fclose_
FILE *f
= NULL
;
1953 pid_t pid
= 0, npid
, mypid
;
1959 if (!u
->cgroup_path
)
1962 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
1966 mypid
= getpid_cached();
1967 while (cg_read_pid(f
, &npid
) > 0) {
1973 /* Ignore processes that aren't our kids */
1974 if (get_process_ppid(npid
, &ppid
) >= 0 && ppid
!= mypid
)
1978 /* Dang, there's more than one daemonized PID
1979 in this group, so we don't know what process
1980 is the main process. */
1991 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
1992 _cleanup_closedir_
DIR *d
= NULL
;
1993 _cleanup_fclose_
FILE *f
= NULL
;
1999 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
2005 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
2006 r
= unit_watch_pid(u
, pid
);
2007 if (r
< 0 && ret
>= 0)
2011 if (r
< 0 && ret
>= 0)
2015 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
2022 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
2023 _cleanup_free_
char *p
= NULL
;
2025 p
= strjoin(path
, "/", fn
);
2031 r
= unit_watch_pids_in_path(u
, p
);
2032 if (r
< 0 && ret
>= 0)
2036 if (r
< 0 && ret
>= 0)
2043 int unit_synthesize_cgroup_empty_event(Unit
*u
) {
2048 /* Enqueue a synthetic cgroup empty event if this unit doesn't watch any PIDs anymore. This is compatibility
2049 * support for non-unified systems where notifications aren't reliable, and hence need to take whatever we can
2050 * get as notification source as soon as we stopped having any useful PIDs to watch for. */
2052 if (!u
->cgroup_path
)
2055 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2058 if (r
> 0) /* On unified we have reliable notifications, and don't need this */
2061 if (!set_isempty(u
->pids
))
2064 unit_add_to_cgroup_empty_queue(u
);
2068 int unit_watch_all_pids(Unit
*u
) {
2073 /* Adds all PIDs from our cgroup to the set of PIDs we
2074 * watch. This is a fallback logic for cases where we do not
2075 * get reliable cgroup empty notifications: we try to use
2076 * SIGCHLD as replacement. */
2078 if (!u
->cgroup_path
)
2081 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2084 if (r
> 0) /* On unified we can use proper notifications */
2087 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
2090 static int on_cgroup_empty_event(sd_event_source
*s
, void *userdata
) {
2091 Manager
*m
= userdata
;
2098 u
= m
->cgroup_empty_queue
;
2102 assert(u
->in_cgroup_empty_queue
);
2103 u
->in_cgroup_empty_queue
= false;
2104 LIST_REMOVE(cgroup_empty_queue
, m
->cgroup_empty_queue
, u
);
2106 if (m
->cgroup_empty_queue
) {
2107 /* More stuff queued, let's make sure we remain enabled */
2108 r
= sd_event_source_set_enabled(s
, SD_EVENT_ONESHOT
);
2110 log_debug_errno(r
, "Failed to reenable cgroup empty event source, ignoring: %m");
2113 unit_add_to_gc_queue(u
);
2115 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
2116 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
2121 void unit_add_to_cgroup_empty_queue(Unit
*u
) {
2126 /* Note that there are four different ways how cgroup empty events reach us:
2128 * 1. On the unified hierarchy we get an inotify event on the cgroup
2130 * 2. On the legacy hierarchy, when running in system mode, we get a datagram on the cgroup agent socket
2132 * 3. On the legacy hierarchy, when running in user mode, we get a D-Bus signal on the system bus
2134 * 4. On the legacy hierarchy, in service units we start watching all processes of the cgroup for SIGCHLD as
2135 * soon as we get one SIGCHLD, to deal with unreliable cgroup notifications.
2137 * Regardless which way we got the notification, we'll verify it here, and then add it to a separate
2138 * queue. This queue will be dispatched at a lower priority than the SIGCHLD handler, so that we always use
2139 * SIGCHLD if we can get it first, and only use the cgroup empty notifications if there's no SIGCHLD pending
2140 * (which might happen if the cgroup doesn't contain processes that are our own child, which is typically the
2141 * case for scope units). */
2143 if (u
->in_cgroup_empty_queue
)
2146 /* Let's verify that the cgroup is really empty */
2147 if (!u
->cgroup_path
)
2149 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
2151 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
2157 LIST_PREPEND(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
2158 u
->in_cgroup_empty_queue
= true;
2160 /* Trigger the defer event */
2161 r
= sd_event_source_set_enabled(u
->manager
->cgroup_empty_event_source
, SD_EVENT_ONESHOT
);
2163 log_debug_errno(r
, "Failed to enable cgroup empty event source: %m");
2166 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
2167 Manager
*m
= userdata
;
2174 union inotify_event_buffer buffer
;
2175 struct inotify_event
*e
;
2178 l
= read(fd
, &buffer
, sizeof(buffer
));
2180 if (IN_SET(errno
, EINTR
, EAGAIN
))
2183 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
2186 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
2190 /* Queue overflow has no watch descriptor */
2193 if (e
->mask
& IN_IGNORED
)
2194 /* The watch was just removed */
2197 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
2198 if (!u
) /* Not that inotify might deliver
2199 * events for a watch even after it
2200 * was removed, because it was queued
2201 * before the removal. Let's ignore
2202 * this here safely. */
2205 unit_add_to_cgroup_empty_queue(u
);
2210 int manager_setup_cgroup(Manager
*m
) {
2211 _cleanup_free_
char *path
= NULL
;
2212 const char *scope_path
;
2219 /* 1. Determine hierarchy */
2220 m
->cgroup_root
= mfree(m
->cgroup_root
);
2221 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
2223 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
2225 /* Chop off the init scope, if we are already located in it */
2226 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2228 /* LEGACY: Also chop off the system slice if we are in
2229 * it. This is to support live upgrades from older systemd
2230 * versions where PID 1 was moved there. Also see
2231 * cg_get_root_path(). */
2232 if (!e
&& MANAGER_IS_SYSTEM(m
)) {
2233 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
2235 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
2240 /* And make sure to store away the root value without trailing slash, even for the root dir, so that we can
2241 * easily prepend it everywhere. */
2242 delete_trailing_chars(m
->cgroup_root
, "/");
2245 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
2247 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
2249 r
= cg_unified_flush();
2251 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
2253 all_unified
= cg_all_unified();
2254 if (all_unified
< 0)
2255 return log_error_errno(all_unified
, "Couldn't determine whether we are in all unified mode: %m");
2256 if (all_unified
> 0)
2257 log_debug("Unified cgroup hierarchy is located at %s.", path
);
2259 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2261 return log_error_errno(r
, "Failed to determine whether systemd's own controller is in unified mode: %m");
2263 log_debug("Unified cgroup hierarchy is located at %s. Controllers are on legacy hierarchies.", path
);
2265 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER_LEGACY
". File system hierarchy is at %s.", path
);
2268 /* 3. Allocate cgroup empty defer event source */
2269 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2270 r
= sd_event_add_defer(m
->event
, &m
->cgroup_empty_event_source
, on_cgroup_empty_event
, m
);
2272 return log_error_errno(r
, "Failed to create cgroup empty event source: %m");
2274 r
= sd_event_source_set_priority(m
->cgroup_empty_event_source
, SD_EVENT_PRIORITY_NORMAL
-5);
2276 return log_error_errno(r
, "Failed to set priority of cgroup empty event source: %m");
2278 r
= sd_event_source_set_enabled(m
->cgroup_empty_event_source
, SD_EVENT_OFF
);
2280 return log_error_errno(r
, "Failed to disable cgroup empty event source: %m");
2282 (void) sd_event_source_set_description(m
->cgroup_empty_event_source
, "cgroup-empty");
2284 /* 4. Install notifier inotify object, or agent */
2285 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0) {
2287 /* In the unified hierarchy we can get cgroup empty notifications via inotify. */
2289 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2290 safe_close(m
->cgroup_inotify_fd
);
2292 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
2293 if (m
->cgroup_inotify_fd
< 0)
2294 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
2296 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
2298 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
2300 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
2301 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
2302 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_NORMAL
-4);
2304 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
2306 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
2308 } else if (MANAGER_IS_SYSTEM(m
) && m
->test_run_flags
== 0) {
2310 /* On the legacy hierarchy we only get notifications via cgroup agents. (Which isn't really reliable,
2311 * since it does not generate events when control groups with children run empty. */
2313 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
2315 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
2317 log_debug("Installed release agent.");
2319 log_debug("Release agent already installed.");
2322 /* 5. Make sure we are in the special "init.scope" unit in the root slice. */
2323 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
2324 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2326 /* Also, move all other userspace processes remaining in the root cgroup into that scope. */
2327 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
2329 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
2331 /* 6. And pin it, so that it cannot be unmounted */
2332 safe_close(m
->pin_cgroupfs_fd
);
2333 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
2334 if (m
->pin_cgroupfs_fd
< 0)
2335 return log_error_errno(errno
, "Failed to open pin file: %m");
2337 } else if (!m
->test_run_flags
)
2338 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
2340 /* 7. Always enable hierarchical support if it exists... */
2341 if (!all_unified
&& m
->test_run_flags
== 0)
2342 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
2344 /* 8. Figure out which controllers are supported, and log about it */
2345 r
= cg_mask_supported(&m
->cgroup_supported
);
2347 return log_error_errno(r
, "Failed to determine supported controllers: %m");
2348 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
2349 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& CGROUP_CONTROLLER_TO_MASK(c
)));
2354 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
2357 /* We can't really delete the group, since we are in it. But
2359 if (delete && m
->cgroup_root
&& m
->test_run_flags
!= MANAGER_TEST_RUN_MINIMAL
)
2360 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
2362 m
->cgroup_empty_event_source
= sd_event_source_unref(m
->cgroup_empty_event_source
);
2364 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
2366 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
2367 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
2369 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
2371 m
->cgroup_root
= mfree(m
->cgroup_root
);
2374 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
2381 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
2385 p
= strdupa(cgroup
);
2389 e
= strrchr(p
, '/');
2391 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
2395 u
= hashmap_get(m
->cgroup_unit
, p
);
2401 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
2402 _cleanup_free_
char *cgroup
= NULL
;
2406 if (!pid_is_valid(pid
))
2409 if (cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
) < 0)
2412 return manager_get_unit_by_cgroup(m
, cgroup
);
2415 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
2420 /* Note that a process might be owned by multiple units, we return only one here, which is good enough for most
2421 * cases, though not strictly correct. We prefer the one reported by cgroup membership, as that's the most
2422 * relevant one as children of the process will be assigned to that one, too, before all else. */
2424 if (!pid_is_valid(pid
))
2427 if (pid
== getpid_cached())
2428 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
2430 u
= manager_get_unit_by_pid_cgroup(m
, pid
);
2434 u
= hashmap_get(m
->watch_pids
, PID_TO_PTR(pid
));
2438 array
= hashmap_get(m
->watch_pids
, PID_TO_PTR(-pid
));
2445 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
2451 /* Called on the legacy hierarchy whenever we get an explicit cgroup notification from the cgroup agent process
2452 * or from the --system instance */
2454 log_debug("Got cgroup empty notification for: %s", cgroup
);
2456 u
= manager_get_unit_by_cgroup(m
, cgroup
);
2460 unit_add_to_cgroup_empty_queue(u
);
2464 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
2465 _cleanup_free_
char *v
= NULL
;
2471 if (!UNIT_CGROUP_BOOL(u
, memory_accounting
))
2474 if (!u
->cgroup_path
)
2477 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2478 if (unit_has_root_cgroup(u
))
2479 return procfs_memory_get_current(ret
);
2481 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
2484 r
= cg_all_unified();
2488 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
2490 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
2496 return safe_atou64(v
, ret
);
2499 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
2500 _cleanup_free_
char *v
= NULL
;
2506 if (!UNIT_CGROUP_BOOL(u
, tasks_accounting
))
2509 if (!u
->cgroup_path
)
2512 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2513 if (unit_has_root_cgroup(u
))
2514 return procfs_tasks_get_current(ret
);
2516 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
2519 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
2525 return safe_atou64(v
, ret
);
2528 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
2529 _cleanup_free_
char *v
= NULL
;
2536 if (!u
->cgroup_path
)
2539 /* The root cgroup doesn't expose this information, let's get it from /proc instead */
2540 if (unit_has_root_cgroup(u
))
2541 return procfs_cpu_get_usage(ret
);
2543 r
= cg_all_unified();
2547 _cleanup_free_
char *val
= NULL
;
2550 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPU
) == 0)
2553 r
= cg_get_keyed_attribute("cpu", u
->cgroup_path
, "cpu.stat", STRV_MAKE("usage_usec"), &val
);
2556 if (IN_SET(r
, -ENOENT
, -ENXIO
))
2559 r
= safe_atou64(val
, &us
);
2563 ns
= us
* NSEC_PER_USEC
;
2565 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUACCT
) == 0)
2568 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
2574 r
= safe_atou64(v
, &ns
);
2583 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
2589 /* Retrieve the current CPU usage counter. This will subtract the CPU counter taken when the unit was
2590 * started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
2591 * call this function with a NULL return value. */
2593 if (!UNIT_CGROUP_BOOL(u
, cpu_accounting
))
2596 r
= unit_get_cpu_usage_raw(u
, &ns
);
2597 if (r
== -ENODATA
&& u
->cpu_usage_last
!= NSEC_INFINITY
) {
2598 /* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
2602 *ret
= u
->cpu_usage_last
;
2608 if (ns
> u
->cpu_usage_base
)
2609 ns
-= u
->cpu_usage_base
;
2613 u
->cpu_usage_last
= ns
;
2620 int unit_get_ip_accounting(
2622 CGroupIPAccountingMetric metric
,
2629 assert(metric
>= 0);
2630 assert(metric
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
);
2633 if (!UNIT_CGROUP_BOOL(u
, ip_accounting
))
2636 fd
= IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_INGRESS_PACKETS
) ?
2637 u
->ip_accounting_ingress_map_fd
:
2638 u
->ip_accounting_egress_map_fd
;
2642 if (IN_SET(metric
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
2643 r
= bpf_firewall_read_accounting(fd
, &value
, NULL
);
2645 r
= bpf_firewall_read_accounting(fd
, NULL
, &value
);
2649 /* Add in additional metrics from a previous runtime. Note that when reexecing/reloading the daemon we compile
2650 * all BPF programs and maps anew, but serialize the old counters. When deserializing we store them in the
2651 * ip_accounting_extra[] field, and add them in here transparently. */
2653 *ret
= value
+ u
->ip_accounting_extra
[metric
];
2658 int unit_reset_cpu_accounting(Unit
*u
) {
2664 u
->cpu_usage_last
= NSEC_INFINITY
;
2666 r
= unit_get_cpu_usage_raw(u
, &ns
);
2668 u
->cpu_usage_base
= 0;
2672 u
->cpu_usage_base
= ns
;
2676 int unit_reset_ip_accounting(Unit
*u
) {
2681 if (u
->ip_accounting_ingress_map_fd
>= 0)
2682 r
= bpf_firewall_reset_accounting(u
->ip_accounting_ingress_map_fd
);
2684 if (u
->ip_accounting_egress_map_fd
>= 0)
2685 q
= bpf_firewall_reset_accounting(u
->ip_accounting_egress_map_fd
);
2687 zero(u
->ip_accounting_extra
);
2689 return r
< 0 ? r
: q
;
2692 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
2695 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2701 /* always invalidate compat pairs together */
2702 if (m
& (CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
))
2703 m
|= CGROUP_MASK_IO
| CGROUP_MASK_BLKIO
;
2705 if (m
& (CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
))
2706 m
|= CGROUP_MASK_CPU
| CGROUP_MASK_CPUACCT
;
2708 if ((u
->cgroup_realized_mask
& m
) == 0) /* NOP? */
2711 u
->cgroup_realized_mask
&= ~m
;
2712 unit_add_to_cgroup_realize_queue(u
);
2715 void unit_invalidate_cgroup_bpf(Unit
*u
) {
2718 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2721 if (u
->cgroup_bpf_state
== UNIT_CGROUP_BPF_INVALIDATED
) /* NOP? */
2724 u
->cgroup_bpf_state
= UNIT_CGROUP_BPF_INVALIDATED
;
2725 unit_add_to_cgroup_realize_queue(u
);
2727 /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
2728 * list of our children includes our own. */
2729 if (u
->type
== UNIT_SLICE
) {
2734 HASHMAP_FOREACH_KEY(v
, member
, u
->dependencies
[UNIT_BEFORE
], i
) {
2738 if (UNIT_DEREF(member
->slice
) != u
)
2741 unit_invalidate_cgroup_bpf(member
);
2746 bool unit_cgroup_delegate(Unit
*u
) {
2751 if (!UNIT_VTABLE(u
)->can_delegate
)
2754 c
= unit_get_cgroup_context(u
);
2761 void manager_invalidate_startup_units(Manager
*m
) {
2767 SET_FOREACH(u
, m
->startup_units
, i
)
2768 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_IO
|CGROUP_MASK_BLKIO
);
2771 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
2772 [CGROUP_AUTO
] = "auto",
2773 [CGROUP_CLOSED
] = "closed",
2774 [CGROUP_STRICT
] = "strict",
2777 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);