1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include "cgroup-util.h"
26 #include "path-util.h"
27 #include "process-util.h"
29 #include "string-util.h"
32 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
34 void cgroup_context_init(CGroupContext
*c
) {
37 /* Initialize everything to the kernel defaults, assuming the
38 * structure is preinitialized to 0 */
40 c
->cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
41 c
->startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
42 c
->cpu_quota_per_sec_usec
= USEC_INFINITY
;
44 c
->memory_limit
= (uint64_t) -1;
46 c
->blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
47 c
->startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
49 c
->tasks_max
= (uint64_t) -1;
51 c
->netclass_type
= CGROUP_NETCLASS_TYPE_NONE
;
54 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
58 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
63 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
67 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
72 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
76 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
81 void cgroup_context_done(CGroupContext
*c
) {
84 while (c
->blockio_device_weights
)
85 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
87 while (c
->blockio_device_bandwidths
)
88 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
90 while (c
->device_allow
)
91 cgroup_context_free_device_allow(c
, c
->device_allow
);
94 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
95 CGroupBlockIODeviceBandwidth
*b
;
96 CGroupBlockIODeviceWeight
*w
;
98 char u
[FORMAT_TIMESPAN_MAX
];
103 prefix
= strempty(prefix
);
106 "%sCPUAccounting=%s\n"
107 "%sBlockIOAccounting=%s\n"
108 "%sMemoryAccounting=%s\n"
109 "%sTasksAccounting=%s\n"
110 "%sCPUShares=%" PRIu64
"\n"
111 "%sStartupCPUShares=%" PRIu64
"\n"
112 "%sCPUQuotaPerSecSec=%s\n"
113 "%sBlockIOWeight=%" PRIu64
"\n"
114 "%sStartupBlockIOWeight=%" PRIu64
"\n"
115 "%sMemoryLimit=%" PRIu64
"\n"
116 "%sTasksMax=%" PRIu64
"\n"
117 "%sDevicePolicy=%s\n"
119 prefix
, yes_no(c
->cpu_accounting
),
120 prefix
, yes_no(c
->blockio_accounting
),
121 prefix
, yes_no(c
->memory_accounting
),
122 prefix
, yes_no(c
->tasks_accounting
),
123 prefix
, c
->cpu_shares
,
124 prefix
, c
->startup_cpu_shares
,
125 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
126 prefix
, c
->blockio_weight
,
127 prefix
, c
->startup_blockio_weight
,
128 prefix
, c
->memory_limit
,
129 prefix
, c
->tasks_max
,
130 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
131 prefix
, yes_no(c
->delegate
));
133 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
135 "%sDeviceAllow=%s %s%s%s\n",
138 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
140 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
142 "%sBlockIODeviceWeight=%s %" PRIu64
,
147 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
148 char buf
[FORMAT_BYTES_MAX
];
153 b
->read
? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
155 format_bytes(buf
, sizeof(buf
), b
->bandwidth
));
159 static int lookup_blkio_device(const char *p
, dev_t
*dev
) {
168 return log_warning_errno(errno
, "Couldn't stat device %s: %m", p
);
170 if (S_ISBLK(st
.st_mode
))
172 else if (major(st
.st_dev
) != 0) {
173 /* If this is not a device node then find the block
174 * device this file is stored on */
177 /* If this is a partition, try to get the originating
179 block_get_whole_disk(*dev
, dev
);
181 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p
);
188 static int whitelist_device(const char *path
, const char *node
, const char *acc
) {
189 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
196 if (stat(node
, &st
) < 0) {
197 log_warning("Couldn't stat device %s", node
);
201 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
202 log_warning("%s is not a device.", node
);
208 S_ISCHR(st
.st_mode
) ? 'c' : 'b',
209 major(st
.st_rdev
), minor(st
.st_rdev
),
212 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
214 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
215 "Failed to set devices.allow on %s: %m", path
);
220 static int whitelist_major(const char *path
, const char *name
, char type
, const char *acc
) {
221 _cleanup_fclose_
FILE *f
= NULL
;
228 assert(type
== 'b' || type
== 'c');
230 f
= fopen("/proc/devices", "re");
232 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
234 FOREACH_LINE(line
, f
, goto fail
) {
235 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4], *p
, *w
;
240 if (type
== 'c' && streq(line
, "Character devices:")) {
245 if (type
== 'b' && streq(line
, "Block devices:")) {
260 w
= strpbrk(p
, WHITESPACE
);
265 r
= safe_atou(p
, &maj
);
272 w
+= strspn(w
, WHITESPACE
);
274 if (fnmatch(name
, w
, 0) != 0)
283 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
285 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
286 "Failed to set devices.allow on %s: %m", path
);
292 log_warning_errno(errno
, "Failed to read /proc/devices: %m");
296 void cgroup_context_apply(CGroupContext
*c
, CGroupMask mask
, const char *path
, uint32_t netclass
, ManagerState state
) {
306 /* Some cgroup attributes are not supported on the root cgroup,
307 * hence silently ignore */
308 is_root
= isempty(path
) || path_equal(path
, "/");
310 /* Make sure we don't try to display messages with an empty path. */
313 /* We generally ignore errors caused by read-only mounted
314 * cgroup trees (assuming we are running in a container then),
315 * and missing cgroups, i.e. EROFS and ENOENT. */
317 if ((mask
& CGROUP_MASK_CPU
) && !is_root
) {
318 char buf
[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t
)) + 1];
320 sprintf(buf
, "%" PRIu64
"\n",
321 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->startup_cpu_shares
:
322 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->cpu_shares
: CGROUP_CPU_SHARES_DEFAULT
);
323 r
= cg_set_attribute("cpu", path
, "cpu.shares", buf
);
325 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
326 "Failed to set cpu.shares on %s: %m", path
);
328 sprintf(buf
, USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
329 r
= cg_set_attribute("cpu", path
, "cpu.cfs_period_us", buf
);
331 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
332 "Failed to set cpu.cfs_period_us on %s: %m", path
);
334 if (c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
) {
335 sprintf(buf
, USEC_FMT
"\n", c
->cpu_quota_per_sec_usec
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
);
336 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", buf
);
338 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", "-1");
340 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
341 "Failed to set cpu.cfs_quota_us on %s: %m", path
);
344 if (mask
& CGROUP_MASK_BLKIO
) {
345 char buf
[MAX(DECIMAL_STR_MAX(uint64_t)+1,
346 DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
347 CGroupBlockIODeviceWeight
*w
;
348 CGroupBlockIODeviceBandwidth
*b
;
351 sprintf(buf
, "%" PRIu64
"\n",
352 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->startup_blockio_weight
:
353 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->blockio_weight
: CGROUP_BLKIO_WEIGHT_DEFAULT
);
354 r
= cg_set_attribute("blkio", path
, "blkio.weight", buf
);
356 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
357 "Failed to set blkio.weight on %s: %m", path
);
359 /* FIXME: no way to reset this list */
360 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
363 r
= lookup_blkio_device(w
->path
, &dev
);
367 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), w
->weight
);
368 r
= cg_set_attribute("blkio", path
, "blkio.weight_device", buf
);
370 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
371 "Failed to set blkio.weight_device on %s: %m", path
);
375 /* FIXME: no way to reset this list */
376 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
380 r
= lookup_blkio_device(b
->path
, &dev
);
384 a
= b
->read
? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
386 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), b
->bandwidth
);
387 r
= cg_set_attribute("blkio", path
, a
, buf
);
389 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
390 "Failed to set %s on %s: %m", a
, path
);
394 if ((mask
& CGROUP_MASK_MEMORY
) && !is_root
) {
395 if (c
->memory_limit
!= (uint64_t) -1) {
396 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
398 sprintf(buf
, "%" PRIu64
"\n", c
->memory_limit
);
400 if (cg_unified() <= 0)
401 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", buf
);
403 r
= cg_set_attribute("memory", path
, "memory.max", buf
);
406 if (cg_unified() <= 0)
407 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", "-1");
409 r
= cg_set_attribute("memory", path
, "memory.max", "max");
413 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
414 "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path
);
417 if ((mask
& CGROUP_MASK_DEVICES
) && !is_root
) {
418 CGroupDeviceAllow
*a
;
420 /* Changing the devices list of a populated cgroup
421 * might result in EINVAL, hence ignore EINVAL
424 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
425 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
427 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
429 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
430 "Failed to reset devices.list on %s: %m", path
);
432 if (c
->device_policy
== CGROUP_CLOSED
||
433 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
434 static const char auto_devices
[] =
435 "/dev/null\0" "rwm\0"
436 "/dev/zero\0" "rwm\0"
437 "/dev/full\0" "rwm\0"
438 "/dev/random\0" "rwm\0"
439 "/dev/urandom\0" "rwm\0"
441 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
445 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
446 whitelist_device(path
, x
, y
);
448 whitelist_major(path
, "pts", 'c', "rw");
449 whitelist_major(path
, "kdbus", 'c', "rw");
450 whitelist_major(path
, "kdbus/*", 'c', "rw");
453 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
469 if (startswith(a
->path
, "/dev/"))
470 whitelist_device(path
, a
->path
, acc
);
471 else if (startswith(a
->path
, "block-"))
472 whitelist_major(path
, a
->path
+ 6, 'b', acc
);
473 else if (startswith(a
->path
, "char-"))
474 whitelist_major(path
, a
->path
+ 5, 'c', acc
);
476 log_debug("Ignoring device %s while writing cgroup attribute.", a
->path
);
480 if ((mask
& CGROUP_MASK_PIDS
) && !is_root
) {
482 if (c
->tasks_max
!= (uint64_t) -1) {
483 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
485 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
486 r
= cg_set_attribute("pids", path
, "pids.max", buf
);
488 r
= cg_set_attribute("pids", path
, "pids.max", "max");
491 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
492 "Failed to set pids.max on %s: %m", path
);
495 if (mask
& CGROUP_MASK_NET_CLS
) {
496 char buf
[DECIMAL_STR_MAX(uint32_t)];
498 sprintf(buf
, "%" PRIu32
, netclass
);
500 r
= cg_set_attribute("net_cls", path
, "net_cls.classid", buf
);
502 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
503 "Failed to set net_cls.classid on %s: %m", path
);
507 CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
510 /* Figure out which controllers we need */
512 if (c
->cpu_accounting
||
513 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
514 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
515 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
516 mask
|= CGROUP_MASK_CPUACCT
| CGROUP_MASK_CPU
;
518 if (c
->blockio_accounting
||
519 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
520 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
521 c
->blockio_device_weights
||
522 c
->blockio_device_bandwidths
)
523 mask
|= CGROUP_MASK_BLKIO
;
525 if (c
->memory_accounting
||
526 c
->memory_limit
!= (uint64_t) -1)
527 mask
|= CGROUP_MASK_MEMORY
;
529 if (c
->device_allow
||
530 c
->device_policy
!= CGROUP_AUTO
)
531 mask
|= CGROUP_MASK_DEVICES
;
533 if (c
->tasks_accounting
||
534 c
->tasks_max
!= (uint64_t) -1)
535 mask
|= CGROUP_MASK_PIDS
;
537 if (c
->netclass_type
!= CGROUP_NETCLASS_TYPE_NONE
)
538 mask
|= CGROUP_MASK_NET_CLS
;
543 CGroupMask
unit_get_own_mask(Unit
*u
) {
546 /* Returns the mask of controllers the unit needs for itself */
548 c
= unit_get_cgroup_context(u
);
552 /* If delegation is turned on, then turn on all cgroups,
553 * unless we are on the legacy hierarchy and the process we
554 * fork into it is known to drop privileges, and hence
555 * shouldn't get access to the controllers.
557 * Note that on the unified hierarchy it is safe to delegate
558 * controllers to unprivileged services. */
563 e
= unit_get_exec_context(u
);
565 exec_context_maintains_privileges(e
) ||
567 return _CGROUP_MASK_ALL
;
570 return cgroup_context_get_mask(c
);
573 CGroupMask
unit_get_members_mask(Unit
*u
) {
576 /* Returns the mask of controllers all of the unit's children
579 if (u
->cgroup_members_mask_valid
)
580 return u
->cgroup_members_mask
;
582 u
->cgroup_members_mask
= 0;
584 if (u
->type
== UNIT_SLICE
) {
588 SET_FOREACH(member
, u
->dependencies
[UNIT_BEFORE
], i
) {
593 if (UNIT_DEREF(member
->slice
) != u
)
596 u
->cgroup_members_mask
|=
597 unit_get_own_mask(member
) |
598 unit_get_members_mask(member
);
602 u
->cgroup_members_mask_valid
= true;
603 return u
->cgroup_members_mask
;
606 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
609 /* Returns the mask of controllers all of the unit's siblings
610 * require, i.e. the members mask of the unit's parent slice
611 * if there is one. */
613 if (UNIT_ISSET(u
->slice
))
614 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
616 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
619 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
621 /* Returns the mask of this subtree, meaning of the group
622 * itself and its children. */
624 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
627 CGroupMask
unit_get_target_mask(Unit
*u
) {
630 /* This returns the cgroup mask of all controllers to enable
631 * for a specific cgroup, i.e. everything it needs itself,
632 * plus all that its children need, plus all that its siblings
633 * need. This is primarily useful on the legacy cgroup
634 * hierarchy, where we need to duplicate each cgroup in each
635 * hierarchy that shall be enabled for it. */
637 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
638 mask
&= u
->manager
->cgroup_supported
;
643 CGroupMask
unit_get_enable_mask(Unit
*u
) {
646 /* This returns the cgroup mask of all controllers to enable
647 * for the children of a specific cgroup. This is primarily
648 * useful for the unified cgroup hierarchy, where each cgroup
649 * controls which controllers are enabled for its children. */
651 mask
= unit_get_members_mask(u
);
652 mask
&= u
->manager
->cgroup_supported
;
657 /* Recurse from a unit up through its containing slices, propagating
658 * mask bits upward. A unit is also member of itself. */
659 void unit_update_cgroup_members_masks(Unit
*u
) {
665 /* Calculate subtree mask */
666 m
= unit_get_subtree_mask(u
);
668 /* See if anything changed from the previous invocation. If
669 * not, we're done. */
670 if (u
->cgroup_subtree_mask_valid
&& m
== u
->cgroup_subtree_mask
)
674 u
->cgroup_subtree_mask_valid
&&
675 ((m
& ~u
->cgroup_subtree_mask
) != 0) &&
676 ((~m
& u
->cgroup_subtree_mask
) == 0);
678 u
->cgroup_subtree_mask
= m
;
679 u
->cgroup_subtree_mask_valid
= true;
681 if (UNIT_ISSET(u
->slice
)) {
682 Unit
*s
= UNIT_DEREF(u
->slice
);
685 /* There's more set now than before. We
686 * propagate the new mask to the parent's mask
687 * (not caring if it actually was valid or
690 s
->cgroup_members_mask
|= m
;
693 /* There's less set now than before (or we
694 * don't know), we need to recalculate
695 * everything, so let's invalidate the
696 * parent's members mask */
698 s
->cgroup_members_mask_valid
= false;
700 /* And now make sure that this change also hits our
702 unit_update_cgroup_members_masks(s
);
706 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
713 if (u
->cgroup_path
&&
714 u
->cgroup_realized
&&
715 (u
->cgroup_realized_mask
& mask
) == mask
)
716 return u
->cgroup_path
;
718 u
= UNIT_DEREF(u
->slice
);
724 char *unit_default_cgroup_path(Unit
*u
) {
725 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
730 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
731 return strdup(u
->manager
->cgroup_root
);
733 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
734 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
739 escaped
= cg_escape(u
->id
);
744 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/", escaped
, NULL
);
746 return strjoin(u
->manager
->cgroup_root
, "/", escaped
, NULL
);
749 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
750 _cleanup_free_
char *p
= NULL
;
762 if (streq_ptr(u
->cgroup_path
, p
))
766 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
771 unit_release_cgroup(u
);
779 int unit_watch_cgroup(Unit
*u
) {
780 _cleanup_free_
char *populated
= NULL
;
788 if (u
->cgroup_inotify_wd
>= 0)
791 /* Only applies to the unified hierarchy */
794 return log_unit_error_errno(u
, r
, "Failed detect wether the unified hierarchy is used: %m");
798 /* Don't watch the root slice, it's pointless. */
799 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
802 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
806 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.populated", &populated
);
810 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, populated
, IN_MODIFY
);
811 if (u
->cgroup_inotify_wd
< 0) {
813 if (errno
== ENOENT
) /* If the directory is already
814 * gone we don't need to track
815 * it, so this is not an error */
818 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
821 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
823 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
828 static int unit_create_cgroup(
830 CGroupMask target_mask
,
831 CGroupMask enable_mask
) {
838 c
= unit_get_cgroup_context(u
);
842 if (!u
->cgroup_path
) {
843 _cleanup_free_
char *path
= NULL
;
845 path
= unit_default_cgroup_path(u
);
849 r
= unit_set_cgroup_path(u
, path
);
851 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
853 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
856 /* First, create our own group */
857 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
859 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
861 /* Start watching it */
862 (void) unit_watch_cgroup(u
);
864 /* Enable all controllers we need */
865 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
);
867 log_unit_warning_errno(u
, r
, "Failed to enable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
869 /* Keep track that this is now realized */
870 u
->cgroup_realized
= true;
871 u
->cgroup_realized_mask
= target_mask
;
873 if (u
->type
!= UNIT_SLICE
&& !c
->delegate
) {
875 /* Then, possibly move things over, but not if
876 * subgroups may contain processes, which is the case
877 * for slice and delegation units. */
878 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
880 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
886 int unit_attach_pids_to_cgroup(Unit
*u
) {
890 r
= unit_realize_cgroup(u
);
894 r
= cg_attach_many_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->pids
, migrate_callback
, u
);
901 static bool unit_has_mask_realized(Unit
*u
, CGroupMask target_mask
) {
904 return u
->cgroup_realized
&& u
->cgroup_realized_mask
== target_mask
;
907 static int unit_find_free_netclass_cgroup(Unit
*u
, uint32_t *ret
) {
916 i
= start
= m
->cgroup_netclass_registry_last
;
921 if (!hashmap_get(m
->cgroup_netclass_registry
, UINT_TO_PTR(i
))) {
922 m
->cgroup_netclass_registry_last
= i
;
928 i
= CGROUP_NETCLASS_FIXED_MAX
;
930 } while (i
!= start
);
935 int unit_add_to_netclass_cgroup(Unit
*u
) {
944 cc
= unit_get_cgroup_context(u
);
948 switch (cc
->netclass_type
) {
949 case CGROUP_NETCLASS_TYPE_NONE
:
952 case CGROUP_NETCLASS_TYPE_FIXED
:
953 u
->cgroup_netclass_id
= cc
->netclass_id
;
956 case CGROUP_NETCLASS_TYPE_AUTO
:
957 /* Allocate a new ID in case it was requested and not done yet */
958 if (u
->cgroup_netclass_id
== 0) {
959 r
= unit_find_free_netclass_cgroup(u
, &u
->cgroup_netclass_id
);
963 log_debug("Dynamically assigned netclass cgroup id %" PRIu32
" to %s", u
->cgroup_netclass_id
, u
->id
);
969 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_netclass_registry
, &trivial_hash_ops
);
973 key
= UINT32_TO_PTR(u
->cgroup_netclass_id
);
974 first
= hashmap_get(u
->manager
->cgroup_netclass_registry
, key
);
977 LIST_PREPEND(cgroup_netclass
, first
, u
);
978 return hashmap_replace(u
->manager
->cgroup_netclass_registry
, key
, u
);
981 return hashmap_put(u
->manager
->cgroup_netclass_registry
, key
, u
);
984 int unit_remove_from_netclass_cgroup(Unit
*u
) {
991 key
= UINT32_TO_PTR(u
->cgroup_netclass_id
);
993 LIST_FIND_HEAD(cgroup_netclass
, u
, head
);
994 LIST_REMOVE(cgroup_netclass
, head
, u
);
997 return hashmap_replace(u
->manager
->cgroup_netclass_registry
, key
, head
);
999 hashmap_remove(u
->manager
->cgroup_netclass_registry
, key
);
1004 /* Check if necessary controllers and attributes for a unit are in place.
1006 * If so, do nothing.
1007 * If not, create paths, move processes over, and set attributes.
1009 * Returns 0 on success and < 0 on failure. */
1010 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
1011 CGroupMask target_mask
, enable_mask
;
1016 if (u
->in_cgroup_queue
) {
1017 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
1018 u
->in_cgroup_queue
= false;
1021 target_mask
= unit_get_target_mask(u
);
1022 if (unit_has_mask_realized(u
, target_mask
))
1025 /* First, realize parents */
1026 if (UNIT_ISSET(u
->slice
)) {
1027 r
= unit_realize_cgroup_now(UNIT_DEREF(u
->slice
), state
);
1032 /* And then do the real work */
1033 enable_mask
= unit_get_enable_mask(u
);
1034 r
= unit_create_cgroup(u
, target_mask
, enable_mask
);
1038 /* Finally, apply the necessary attributes. */
1039 cgroup_context_apply(unit_get_cgroup_context(u
), target_mask
, u
->cgroup_path
, u
->cgroup_netclass_id
, state
);
1044 static void unit_add_to_cgroup_queue(Unit
*u
) {
1046 if (u
->in_cgroup_queue
)
1049 LIST_PREPEND(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
1050 u
->in_cgroup_queue
= true;
1053 unsigned manager_dispatch_cgroup_queue(Manager
*m
) {
1059 state
= manager_state(m
);
1061 while ((i
= m
->cgroup_queue
)) {
1062 assert(i
->in_cgroup_queue
);
1064 r
= unit_realize_cgroup_now(i
, state
);
1066 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
1074 static void unit_queue_siblings(Unit
*u
) {
1077 /* This adds the siblings of the specified unit and the
1078 * siblings of all parent units to the cgroup queue. (But
1079 * neither the specified unit itself nor the parents.) */
1081 while ((slice
= UNIT_DEREF(u
->slice
))) {
1085 SET_FOREACH(m
, slice
->dependencies
[UNIT_BEFORE
], i
) {
1089 /* Skip units that have a dependency on the slice
1090 * but aren't actually in it. */
1091 if (UNIT_DEREF(m
->slice
) != slice
)
1094 /* No point in doing cgroup application for units
1095 * without active processes. */
1096 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
1099 /* If the unit doesn't need any new controllers
1100 * and has current ones realized, it doesn't need
1102 if (unit_has_mask_realized(m
, unit_get_target_mask(m
)))
1105 unit_add_to_cgroup_queue(m
);
1112 int unit_realize_cgroup(Unit
*u
) {
1115 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1118 /* So, here's the deal: when realizing the cgroups for this
1119 * unit, we need to first create all parents, but there's more
1120 * actually: for the weight-based controllers we also need to
1121 * make sure that all our siblings (i.e. units that are in the
1122 * same slice as we are) have cgroups, too. Otherwise, things
1123 * would become very uneven as each of their processes would
1124 * get as much resources as all our group together. This call
1125 * will synchronously create the parent cgroups, but will
1126 * defer work on the siblings to the next event loop
1129 /* Add all sibling slices to the cgroup queue. */
1130 unit_queue_siblings(u
);
1132 /* And realize this one now (and apply the values) */
1133 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
1136 void unit_release_cgroup(Unit
*u
) {
1139 /* Forgets all cgroup details for this cgroup */
1141 if (u
->cgroup_path
) {
1142 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
1143 u
->cgroup_path
= mfree(u
->cgroup_path
);
1146 if (u
->cgroup_inotify_wd
>= 0) {
1147 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
1148 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring", u
->cgroup_inotify_wd
, u
->id
);
1150 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
1151 u
->cgroup_inotify_wd
= -1;
1155 void unit_prune_cgroup(Unit
*u
) {
1161 /* Removes the cgroup, if empty and possible, and stops watching it. */
1163 if (!u
->cgroup_path
)
1166 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1168 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
1170 log_debug_errno(r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
1177 unit_release_cgroup(u
);
1179 u
->cgroup_realized
= false;
1180 u
->cgroup_realized_mask
= 0;
1183 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
1184 _cleanup_fclose_
FILE *f
= NULL
;
1185 pid_t pid
= 0, npid
, mypid
;
1191 if (!u
->cgroup_path
)
1194 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
1199 while (cg_read_pid(f
, &npid
) > 0) {
1205 /* Ignore processes that aren't our kids */
1206 if (get_parent_of_pid(npid
, &ppid
) >= 0 && ppid
!= mypid
)
1210 /* Dang, there's more than one daemonized PID
1211 in this group, so we don't know what process
1212 is the main process. */
1223 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
1224 _cleanup_closedir_
DIR *d
= NULL
;
1225 _cleanup_fclose_
FILE *f
= NULL
;
1231 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
1237 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
1238 r
= unit_watch_pid(u
, pid
);
1239 if (r
< 0 && ret
>= 0)
1243 if (r
< 0 && ret
>= 0)
1247 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
1254 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
1255 _cleanup_free_
char *p
= NULL
;
1257 p
= strjoin(path
, "/", fn
, NULL
);
1263 r
= unit_watch_pids_in_path(u
, p
);
1264 if (r
< 0 && ret
>= 0)
1268 if (r
< 0 && ret
>= 0)
1275 int unit_watch_all_pids(Unit
*u
) {
1278 /* Adds all PIDs from our cgroup to the set of PIDs we
1279 * watch. This is a fallback logic for cases where we do not
1280 * get reliable cgroup empty notifications: we try to use
1281 * SIGCHLD as replacement. */
1283 if (!u
->cgroup_path
)
1286 if (cg_unified() > 0) /* On unified we can use proper notifications */
1289 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
1292 int unit_notify_cgroup_empty(Unit
*u
) {
1297 if (!u
->cgroup_path
)
1300 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
1304 unit_add_to_gc_queue(u
);
1306 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
1307 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
1312 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1313 Manager
*m
= userdata
;
1320 union inotify_event_buffer buffer
;
1321 struct inotify_event
*e
;
1324 l
= read(fd
, &buffer
, sizeof(buffer
));
1326 if (errno
== EINTR
|| errno
== EAGAIN
)
1329 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
1332 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1336 /* Queue overflow has no watch descriptor */
1339 if (e
->mask
& IN_IGNORED
)
1340 /* The watch was just removed */
1343 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
1344 if (!u
) /* Not that inotify might deliver
1345 * events for a watch even after it
1346 * was removed, because it was queued
1347 * before the removal. Let's ignore
1348 * this here safely. */
1351 (void) unit_notify_cgroup_empty(u
);
1356 int manager_setup_cgroup(Manager
*m
) {
1357 _cleanup_free_
char *path
= NULL
;
1364 /* 1. Determine hierarchy */
1365 m
->cgroup_root
= mfree(m
->cgroup_root
);
1366 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
1368 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
1370 /* Chop off the init scope, if we are already located in it */
1371 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1373 /* LEGACY: Also chop off the system slice if we are in
1374 * it. This is to support live upgrades from older systemd
1375 * versions where PID 1 was moved there. Also see
1376 * cg_get_root_path(). */
1377 if (!e
&& m
->running_as
== MANAGER_SYSTEM
) {
1378 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
1380 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
1385 /* And make sure to store away the root value without trailing
1386 * slash, even for the root dir, so that we can easily prepend
1388 while ((e
= endswith(m
->cgroup_root
, "/")))
1392 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
1394 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
1396 unified
= cg_unified();
1398 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
1400 log_debug("Unified cgroup hierarchy is located at %s.", path
);
1402 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER
". File system hierarchy is at %s.", path
);
1405 const char *scope_path
;
1407 /* 3. Install agent */
1410 /* In the unified hierarchy we can can get
1411 * cgroup empty notifications via inotify. */
1413 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1414 safe_close(m
->cgroup_inotify_fd
);
1416 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
1417 if (m
->cgroup_inotify_fd
< 0)
1418 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
1420 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
1422 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
1424 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_IDLE
- 5);
1426 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
1428 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
1430 } else if (m
->running_as
== MANAGER_SYSTEM
) {
1432 /* On the legacy hierarchy we only get
1433 * notifications via cgroup agents. (Which
1434 * isn't really reliable, since it does not
1435 * generate events when control groups with
1436 * children run empty. */
1438 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
1440 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
1442 log_debug("Installed release agent.");
1444 log_debug("Release agent already installed.");
1447 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1448 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1449 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
1451 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
1453 /* also, move all other userspace processes remaining
1454 * in the root cgroup into that scope. */
1455 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, false);
1457 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
1459 /* 5. And pin it, so that it cannot be unmounted */
1460 safe_close(m
->pin_cgroupfs_fd
);
1461 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
1462 if (m
->pin_cgroupfs_fd
< 0)
1463 return log_error_errno(errno
, "Failed to open pin file: %m");
1465 /* 6. Always enable hierarchical support if it exists... */
1467 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1470 /* 7. Figure out which controllers are supported */
1471 r
= cg_mask_supported(&m
->cgroup_supported
);
1473 return log_error_errno(r
, "Failed to determine supported controllers: %m");
1475 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
1476 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& c
));
1481 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
1484 /* We can't really delete the group, since we are in it. But
1486 if (delete && m
->cgroup_root
)
1487 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
1489 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
1491 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1492 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
1494 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
1496 m
->cgroup_root
= mfree(m
->cgroup_root
);
1499 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
1506 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
1510 p
= strdupa(cgroup
);
1514 e
= strrchr(p
, '/');
1516 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
1520 u
= hashmap_get(m
->cgroup_unit
, p
);
1526 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
1527 _cleanup_free_
char *cgroup
= NULL
;
1535 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
);
1539 return manager_get_unit_by_cgroup(m
, cgroup
);
1542 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
1551 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
1553 u
= hashmap_get(m
->watch_pids1
, PID_TO_PTR(pid
));
1557 u
= hashmap_get(m
->watch_pids2
, PID_TO_PTR(pid
));
1561 return manager_get_unit_by_pid_cgroup(m
, pid
);
1564 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
1570 u
= manager_get_unit_by_cgroup(m
, cgroup
);
1574 return unit_notify_cgroup_empty(u
);
1577 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
1578 _cleanup_free_
char *v
= NULL
;
1584 if (!u
->cgroup_path
)
1587 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
1590 if (cg_unified() <= 0)
1591 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
1593 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
1599 return safe_atou64(v
, ret
);
1602 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
1603 _cleanup_free_
char *v
= NULL
;
1609 if (!u
->cgroup_path
)
1612 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
1615 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
1621 return safe_atou64(v
, ret
);
1624 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
1625 _cleanup_free_
char *v
= NULL
;
1632 if (!u
->cgroup_path
)
1635 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUACCT
) == 0)
1638 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
1644 r
= safe_atou64(v
, &ns
);
1652 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
1656 r
= unit_get_cpu_usage_raw(u
, &ns
);
1660 if (ns
> u
->cpuacct_usage_base
)
1661 ns
-= u
->cpuacct_usage_base
;
1669 int unit_reset_cpu_usage(Unit
*u
) {
1675 r
= unit_get_cpu_usage_raw(u
, &ns
);
1677 u
->cpuacct_usage_base
= 0;
1681 u
->cpuacct_usage_base
= ns
;
1685 bool unit_cgroup_delegate(Unit
*u
) {
1690 c
= unit_get_cgroup_context(u
);
1697 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
1700 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1706 if ((u
->cgroup_realized_mask
& m
) == 0)
1709 u
->cgroup_realized_mask
&= ~m
;
1710 unit_add_to_cgroup_queue(u
);
1713 void manager_invalidate_startup_units(Manager
*m
) {
1719 SET_FOREACH(u
, m
->startup_units
, i
)
1720 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_BLKIO
);
1723 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
1724 [CGROUP_AUTO
] = "auto",
1725 [CGROUP_CLOSED
] = "closed",
1726 [CGROUP_STRICT
] = "strict",
1729 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);