1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include "cgroup-util.h"
28 #include "path-util.h"
29 #include "process-util.h"
31 #include "string-util.h"
33 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
35 void cgroup_context_init(CGroupContext
*c
) {
38 /* Initialize everything to the kernel defaults, assuming the
39 * structure is preinitialized to 0 */
41 c
->cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
42 c
->startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
43 c
->cpu_quota_per_sec_usec
= USEC_INFINITY
;
45 c
->memory_limit
= (uint64_t) -1;
47 c
->blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
48 c
->startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
50 c
->tasks_max
= (uint64_t) -1;
52 c
->netclass_type
= CGROUP_NETCLASS_TYPE_NONE
;
55 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
59 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
64 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
68 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
73 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
77 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
82 void cgroup_context_done(CGroupContext
*c
) {
85 while (c
->blockio_device_weights
)
86 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
88 while (c
->blockio_device_bandwidths
)
89 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
91 while (c
->device_allow
)
92 cgroup_context_free_device_allow(c
, c
->device_allow
);
95 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
96 CGroupBlockIODeviceBandwidth
*b
;
97 CGroupBlockIODeviceWeight
*w
;
99 char u
[FORMAT_TIMESPAN_MAX
];
104 prefix
= strempty(prefix
);
107 "%sCPUAccounting=%s\n"
108 "%sBlockIOAccounting=%s\n"
109 "%sMemoryAccounting=%s\n"
110 "%sTasksAccounting=%s\n"
111 "%sCPUShares=%" PRIu64
"\n"
112 "%sStartupCPUShares=%" PRIu64
"\n"
113 "%sCPUQuotaPerSecSec=%s\n"
114 "%sBlockIOWeight=%" PRIu64
"\n"
115 "%sStartupBlockIOWeight=%" PRIu64
"\n"
116 "%sMemoryLimit=%" PRIu64
"\n"
117 "%sTasksMax=%" PRIu64
"\n"
118 "%sDevicePolicy=%s\n"
120 prefix
, yes_no(c
->cpu_accounting
),
121 prefix
, yes_no(c
->blockio_accounting
),
122 prefix
, yes_no(c
->memory_accounting
),
123 prefix
, yes_no(c
->tasks_accounting
),
124 prefix
, c
->cpu_shares
,
125 prefix
, c
->startup_cpu_shares
,
126 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
127 prefix
, c
->blockio_weight
,
128 prefix
, c
->startup_blockio_weight
,
129 prefix
, c
->memory_limit
,
130 prefix
, c
->tasks_max
,
131 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
132 prefix
, yes_no(c
->delegate
));
134 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
136 "%sDeviceAllow=%s %s%s%s\n",
139 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
141 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
143 "%sBlockIODeviceWeight=%s %" PRIu64
,
148 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
149 char buf
[FORMAT_BYTES_MAX
];
154 b
->read
? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
156 format_bytes(buf
, sizeof(buf
), b
->bandwidth
));
160 static int lookup_blkio_device(const char *p
, dev_t
*dev
) {
169 return log_warning_errno(errno
, "Couldn't stat device %s: %m", p
);
171 if (S_ISBLK(st
.st_mode
))
173 else if (major(st
.st_dev
) != 0) {
174 /* If this is not a device node then find the block
175 * device this file is stored on */
178 /* If this is a partition, try to get the originating
180 block_get_whole_disk(*dev
, dev
);
182 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p
);
189 static int whitelist_device(const char *path
, const char *node
, const char *acc
) {
190 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
197 if (stat(node
, &st
) < 0) {
198 log_warning("Couldn't stat device %s", node
);
202 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
203 log_warning("%s is not a device.", node
);
209 S_ISCHR(st
.st_mode
) ? 'c' : 'b',
210 major(st
.st_rdev
), minor(st
.st_rdev
),
213 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
215 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
216 "Failed to set devices.allow on %s: %m", path
);
221 static int whitelist_major(const char *path
, const char *name
, char type
, const char *acc
) {
222 _cleanup_fclose_
FILE *f
= NULL
;
229 assert(type
== 'b' || type
== 'c');
231 f
= fopen("/proc/devices", "re");
233 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
235 FOREACH_LINE(line
, f
, goto fail
) {
236 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4], *p
, *w
;
241 if (type
== 'c' && streq(line
, "Character devices:")) {
246 if (type
== 'b' && streq(line
, "Block devices:")) {
261 w
= strpbrk(p
, WHITESPACE
);
266 r
= safe_atou(p
, &maj
);
273 w
+= strspn(w
, WHITESPACE
);
275 if (fnmatch(name
, w
, 0) != 0)
284 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
286 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
287 "Failed to set devices.allow on %s: %m", path
);
293 log_warning_errno(errno
, "Failed to read /proc/devices: %m");
297 void cgroup_context_apply(CGroupContext
*c
, CGroupMask mask
, const char *path
, uint32_t netclass
, ManagerState state
) {
307 /* Some cgroup attributes are not supported on the root cgroup,
308 * hence silently ignore */
309 is_root
= isempty(path
) || path_equal(path
, "/");
311 /* Make sure we don't try to display messages with an empty path. */
314 /* We generally ignore errors caused by read-only mounted
315 * cgroup trees (assuming we are running in a container then),
316 * and missing cgroups, i.e. EROFS and ENOENT. */
318 if ((mask
& CGROUP_MASK_CPU
) && !is_root
) {
319 char buf
[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t
)) + 1];
321 sprintf(buf
, "%" PRIu64
"\n",
322 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->startup_cpu_shares
:
323 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->cpu_shares
: CGROUP_CPU_SHARES_DEFAULT
);
324 r
= cg_set_attribute("cpu", path
, "cpu.shares", buf
);
326 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
327 "Failed to set cpu.shares on %s: %m", path
);
329 sprintf(buf
, USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
330 r
= cg_set_attribute("cpu", path
, "cpu.cfs_period_us", buf
);
332 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
333 "Failed to set cpu.cfs_period_us on %s: %m", path
);
335 if (c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
) {
336 sprintf(buf
, USEC_FMT
"\n", c
->cpu_quota_per_sec_usec
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
);
337 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", buf
);
339 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", "-1");
341 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
342 "Failed to set cpu.cfs_quota_us on %s: %m", path
);
345 if (mask
& CGROUP_MASK_BLKIO
) {
346 char buf
[MAX(DECIMAL_STR_MAX(uint64_t)+1,
347 DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
348 CGroupBlockIODeviceWeight
*w
;
349 CGroupBlockIODeviceBandwidth
*b
;
352 sprintf(buf
, "%" PRIu64
"\n",
353 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->startup_blockio_weight
:
354 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->blockio_weight
: CGROUP_BLKIO_WEIGHT_DEFAULT
);
355 r
= cg_set_attribute("blkio", path
, "blkio.weight", buf
);
357 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
358 "Failed to set blkio.weight on %s: %m", path
);
360 /* FIXME: no way to reset this list */
361 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
364 r
= lookup_blkio_device(w
->path
, &dev
);
368 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), w
->weight
);
369 r
= cg_set_attribute("blkio", path
, "blkio.weight_device", buf
);
371 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
372 "Failed to set blkio.weight_device on %s: %m", path
);
376 /* FIXME: no way to reset this list */
377 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
381 r
= lookup_blkio_device(b
->path
, &dev
);
385 a
= b
->read
? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
387 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), b
->bandwidth
);
388 r
= cg_set_attribute("blkio", path
, a
, buf
);
390 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
391 "Failed to set %s on %s: %m", a
, path
);
395 if ((mask
& CGROUP_MASK_MEMORY
) && !is_root
) {
396 if (c
->memory_limit
!= (uint64_t) -1) {
397 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
399 sprintf(buf
, "%" PRIu64
"\n", c
->memory_limit
);
401 if (cg_unified() <= 0)
402 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", buf
);
404 r
= cg_set_attribute("memory", path
, "memory.max", buf
);
407 if (cg_unified() <= 0)
408 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", "-1");
410 r
= cg_set_attribute("memory", path
, "memory.max", "max");
414 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
415 "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path
);
418 if ((mask
& CGROUP_MASK_DEVICES
) && !is_root
) {
419 CGroupDeviceAllow
*a
;
421 /* Changing the devices list of a populated cgroup
422 * might result in EINVAL, hence ignore EINVAL
425 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
426 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
428 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
430 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
431 "Failed to reset devices.list on %s: %m", path
);
433 if (c
->device_policy
== CGROUP_CLOSED
||
434 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
435 static const char auto_devices
[] =
436 "/dev/null\0" "rwm\0"
437 "/dev/zero\0" "rwm\0"
438 "/dev/full\0" "rwm\0"
439 "/dev/random\0" "rwm\0"
440 "/dev/urandom\0" "rwm\0"
442 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
446 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
447 whitelist_device(path
, x
, y
);
449 whitelist_major(path
, "pts", 'c', "rw");
450 whitelist_major(path
, "kdbus", 'c', "rw");
451 whitelist_major(path
, "kdbus/*", 'c', "rw");
454 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
470 if (startswith(a
->path
, "/dev/"))
471 whitelist_device(path
, a
->path
, acc
);
472 else if (startswith(a
->path
, "block-"))
473 whitelist_major(path
, a
->path
+ 6, 'b', acc
);
474 else if (startswith(a
->path
, "char-"))
475 whitelist_major(path
, a
->path
+ 5, 'c', acc
);
477 log_debug("Ignoring device %s while writing cgroup attribute.", a
->path
);
481 if ((mask
& CGROUP_MASK_PIDS
) && !is_root
) {
483 if (c
->tasks_max
!= (uint64_t) -1) {
484 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
486 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
487 r
= cg_set_attribute("pids", path
, "pids.max", buf
);
489 r
= cg_set_attribute("pids", path
, "pids.max", "max");
492 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
493 "Failed to set pids.max on %s: %m", path
);
496 if (mask
& CGROUP_MASK_NET_CLS
) {
497 char buf
[DECIMAL_STR_MAX(uint32_t)];
499 sprintf(buf
, "%" PRIu32
, netclass
);
501 r
= cg_set_attribute("net_cls", path
, "net_cls.classid", buf
);
503 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
504 "Failed to set net_cls.classid on %s: %m", path
);
508 CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
511 /* Figure out which controllers we need */
513 if (c
->cpu_accounting
||
514 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
515 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
516 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
517 mask
|= CGROUP_MASK_CPUACCT
| CGROUP_MASK_CPU
;
519 if (c
->blockio_accounting
||
520 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
521 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
522 c
->blockio_device_weights
||
523 c
->blockio_device_bandwidths
)
524 mask
|= CGROUP_MASK_BLKIO
;
526 if (c
->memory_accounting
||
527 c
->memory_limit
!= (uint64_t) -1)
528 mask
|= CGROUP_MASK_MEMORY
;
530 if (c
->device_allow
||
531 c
->device_policy
!= CGROUP_AUTO
)
532 mask
|= CGROUP_MASK_DEVICES
;
534 if (c
->tasks_accounting
||
535 c
->tasks_max
!= (uint64_t) -1)
536 mask
|= CGROUP_MASK_PIDS
;
538 if (c
->netclass_type
!= CGROUP_NETCLASS_TYPE_NONE
)
539 mask
|= CGROUP_MASK_NET_CLS
;
544 CGroupMask
unit_get_own_mask(Unit
*u
) {
547 /* Returns the mask of controllers the unit needs for itself */
549 c
= unit_get_cgroup_context(u
);
553 /* If delegation is turned on, then turn on all cgroups,
554 * unless we are on the legacy hierarchy and the process we
555 * fork into it is known to drop privileges, and hence
556 * shouldn't get access to the controllers.
558 * Note that on the unified hierarchy it is safe to delegate
559 * controllers to unprivileged services. */
564 e
= unit_get_exec_context(u
);
566 exec_context_maintains_privileges(e
) ||
568 return _CGROUP_MASK_ALL
;
571 return cgroup_context_get_mask(c
);
574 CGroupMask
unit_get_members_mask(Unit
*u
) {
577 /* Returns the mask of controllers all of the unit's children
580 if (u
->cgroup_members_mask_valid
)
581 return u
->cgroup_members_mask
;
583 u
->cgroup_members_mask
= 0;
585 if (u
->type
== UNIT_SLICE
) {
589 SET_FOREACH(member
, u
->dependencies
[UNIT_BEFORE
], i
) {
594 if (UNIT_DEREF(member
->slice
) != u
)
597 u
->cgroup_members_mask
|=
598 unit_get_own_mask(member
) |
599 unit_get_members_mask(member
);
603 u
->cgroup_members_mask_valid
= true;
604 return u
->cgroup_members_mask
;
607 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
610 /* Returns the mask of controllers all of the unit's siblings
611 * require, i.e. the members mask of the unit's parent slice
612 * if there is one. */
614 if (UNIT_ISSET(u
->slice
))
615 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
617 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
620 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
622 /* Returns the mask of this subtree, meaning of the group
623 * itself and its children. */
625 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
628 CGroupMask
unit_get_target_mask(Unit
*u
) {
631 /* This returns the cgroup mask of all controllers to enable
632 * for a specific cgroup, i.e. everything it needs itself,
633 * plus all that its children need, plus all that its siblings
634 * need. This is primarily useful on the legacy cgroup
635 * hierarchy, where we need to duplicate each cgroup in each
636 * hierarchy that shall be enabled for it. */
638 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
639 mask
&= u
->manager
->cgroup_supported
;
644 CGroupMask
unit_get_enable_mask(Unit
*u
) {
647 /* This returns the cgroup mask of all controllers to enable
648 * for the children of a specific cgroup. This is primarily
649 * useful for the unified cgroup hierarchy, where each cgroup
650 * controls which controllers are enabled for its children. */
652 mask
= unit_get_members_mask(u
);
653 mask
&= u
->manager
->cgroup_supported
;
658 /* Recurse from a unit up through its containing slices, propagating
659 * mask bits upward. A unit is also member of itself. */
660 void unit_update_cgroup_members_masks(Unit
*u
) {
666 /* Calculate subtree mask */
667 m
= unit_get_subtree_mask(u
);
669 /* See if anything changed from the previous invocation. If
670 * not, we're done. */
671 if (u
->cgroup_subtree_mask_valid
&& m
== u
->cgroup_subtree_mask
)
675 u
->cgroup_subtree_mask_valid
&&
676 ((m
& ~u
->cgroup_subtree_mask
) != 0) &&
677 ((~m
& u
->cgroup_subtree_mask
) == 0);
679 u
->cgroup_subtree_mask
= m
;
680 u
->cgroup_subtree_mask_valid
= true;
682 if (UNIT_ISSET(u
->slice
)) {
683 Unit
*s
= UNIT_DEREF(u
->slice
);
686 /* There's more set now than before. We
687 * propagate the new mask to the parent's mask
688 * (not caring if it actually was valid or
691 s
->cgroup_members_mask
|= m
;
694 /* There's less set now than before (or we
695 * don't know), we need to recalculate
696 * everything, so let's invalidate the
697 * parent's members mask */
699 s
->cgroup_members_mask_valid
= false;
701 /* And now make sure that this change also hits our
703 unit_update_cgroup_members_masks(s
);
707 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
714 if (u
->cgroup_path
&&
715 u
->cgroup_realized
&&
716 (u
->cgroup_realized_mask
& mask
) == mask
)
717 return u
->cgroup_path
;
719 u
= UNIT_DEREF(u
->slice
);
725 char *unit_default_cgroup_path(Unit
*u
) {
726 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
731 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
732 return strdup(u
->manager
->cgroup_root
);
734 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
735 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
740 escaped
= cg_escape(u
->id
);
745 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/", escaped
, NULL
);
747 return strjoin(u
->manager
->cgroup_root
, "/", escaped
, NULL
);
750 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
751 _cleanup_free_
char *p
= NULL
;
763 if (streq_ptr(u
->cgroup_path
, p
))
767 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
772 unit_release_cgroup(u
);
780 int unit_watch_cgroup(Unit
*u
) {
781 _cleanup_free_
char *populated
= NULL
;
789 if (u
->cgroup_inotify_wd
>= 0)
792 /* Only applies to the unified hierarchy */
795 return log_unit_error_errno(u
, r
, "Failed detect wether the unified hierarchy is used: %m");
799 /* Don't watch the root slice, it's pointless. */
800 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
803 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
807 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.populated", &populated
);
811 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, populated
, IN_MODIFY
);
812 if (u
->cgroup_inotify_wd
< 0) {
814 if (errno
== ENOENT
) /* If the directory is already
815 * gone we don't need to track
816 * it, so this is not an error */
819 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
822 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
824 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
829 static int unit_create_cgroup(
831 CGroupMask target_mask
,
832 CGroupMask enable_mask
) {
839 c
= unit_get_cgroup_context(u
);
843 if (!u
->cgroup_path
) {
844 _cleanup_free_
char *path
= NULL
;
846 path
= unit_default_cgroup_path(u
);
850 r
= unit_set_cgroup_path(u
, path
);
852 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
854 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
857 /* First, create our own group */
858 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
860 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
862 /* Start watching it */
863 (void) unit_watch_cgroup(u
);
865 /* Enable all controllers we need */
866 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
);
868 log_unit_warning_errno(u
, r
, "Failed to enable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
870 /* Keep track that this is now realized */
871 u
->cgroup_realized
= true;
872 u
->cgroup_realized_mask
= target_mask
;
874 if (u
->type
!= UNIT_SLICE
&& !c
->delegate
) {
876 /* Then, possibly move things over, but not if
877 * subgroups may contain processes, which is the case
878 * for slice and delegation units. */
879 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
881 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
887 int unit_attach_pids_to_cgroup(Unit
*u
) {
891 r
= unit_realize_cgroup(u
);
895 r
= cg_attach_many_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->pids
, migrate_callback
, u
);
902 static bool unit_has_mask_realized(Unit
*u
, CGroupMask target_mask
) {
905 return u
->cgroup_realized
&& u
->cgroup_realized_mask
== target_mask
;
908 static int unit_find_free_netclass_cgroup(Unit
*u
, uint32_t *ret
) {
917 i
= start
= m
->cgroup_netclass_registry_last
;
922 if (!hashmap_get(m
->cgroup_netclass_registry
, UINT_TO_PTR(i
))) {
923 m
->cgroup_netclass_registry_last
= i
;
929 i
= CGROUP_NETCLASS_FIXED_MAX
;
931 } while (i
!= start
);
936 int unit_add_to_netclass_cgroup(Unit
*u
) {
945 cc
= unit_get_cgroup_context(u
);
949 switch (cc
->netclass_type
) {
950 case CGROUP_NETCLASS_TYPE_NONE
:
953 case CGROUP_NETCLASS_TYPE_FIXED
:
954 u
->cgroup_netclass_id
= cc
->netclass_id
;
957 case CGROUP_NETCLASS_TYPE_AUTO
:
958 /* Allocate a new ID in case it was requested and not done yet */
959 if (u
->cgroup_netclass_id
== 0) {
960 r
= unit_find_free_netclass_cgroup(u
, &u
->cgroup_netclass_id
);
964 log_debug("Dynamically assigned netclass cgroup id %" PRIu32
" to %s", u
->cgroup_netclass_id
, u
->id
);
970 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_netclass_registry
, &trivial_hash_ops
);
974 key
= UINT32_TO_PTR(u
->cgroup_netclass_id
);
975 first
= hashmap_get(u
->manager
->cgroup_netclass_registry
, key
);
978 LIST_PREPEND(cgroup_netclass
, first
, u
);
979 return hashmap_replace(u
->manager
->cgroup_netclass_registry
, key
, u
);
982 return hashmap_put(u
->manager
->cgroup_netclass_registry
, key
, u
);
985 int unit_remove_from_netclass_cgroup(Unit
*u
) {
992 key
= UINT32_TO_PTR(u
->cgroup_netclass_id
);
994 LIST_FIND_HEAD(cgroup_netclass
, u
, head
);
995 LIST_REMOVE(cgroup_netclass
, head
, u
);
998 return hashmap_replace(u
->manager
->cgroup_netclass_registry
, key
, head
);
1000 hashmap_remove(u
->manager
->cgroup_netclass_registry
, key
);
1005 /* Check if necessary controllers and attributes for a unit are in place.
1007 * If so, do nothing.
1008 * If not, create paths, move processes over, and set attributes.
1010 * Returns 0 on success and < 0 on failure. */
1011 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
1012 CGroupMask target_mask
, enable_mask
;
1017 if (u
->in_cgroup_queue
) {
1018 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
1019 u
->in_cgroup_queue
= false;
1022 target_mask
= unit_get_target_mask(u
);
1023 if (unit_has_mask_realized(u
, target_mask
))
1026 /* First, realize parents */
1027 if (UNIT_ISSET(u
->slice
)) {
1028 r
= unit_realize_cgroup_now(UNIT_DEREF(u
->slice
), state
);
1033 /* And then do the real work */
1034 enable_mask
= unit_get_enable_mask(u
);
1035 r
= unit_create_cgroup(u
, target_mask
, enable_mask
);
1039 /* Finally, apply the necessary attributes. */
1040 cgroup_context_apply(unit_get_cgroup_context(u
), target_mask
, u
->cgroup_path
, u
->cgroup_netclass_id
, state
);
1045 static void unit_add_to_cgroup_queue(Unit
*u
) {
1047 if (u
->in_cgroup_queue
)
1050 LIST_PREPEND(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
1051 u
->in_cgroup_queue
= true;
1054 unsigned manager_dispatch_cgroup_queue(Manager
*m
) {
1060 state
= manager_state(m
);
1062 while ((i
= m
->cgroup_queue
)) {
1063 assert(i
->in_cgroup_queue
);
1065 r
= unit_realize_cgroup_now(i
, state
);
1067 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
1075 static void unit_queue_siblings(Unit
*u
) {
1078 /* This adds the siblings of the specified unit and the
1079 * siblings of all parent units to the cgroup queue. (But
1080 * neither the specified unit itself nor the parents.) */
1082 while ((slice
= UNIT_DEREF(u
->slice
))) {
1086 SET_FOREACH(m
, slice
->dependencies
[UNIT_BEFORE
], i
) {
1090 /* Skip units that have a dependency on the slice
1091 * but aren't actually in it. */
1092 if (UNIT_DEREF(m
->slice
) != slice
)
1095 /* No point in doing cgroup application for units
1096 * without active processes. */
1097 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
1100 /* If the unit doesn't need any new controllers
1101 * and has current ones realized, it doesn't need
1103 if (unit_has_mask_realized(m
, unit_get_target_mask(m
)))
1106 unit_add_to_cgroup_queue(m
);
1113 int unit_realize_cgroup(Unit
*u
) {
1116 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1119 /* So, here's the deal: when realizing the cgroups for this
1120 * unit, we need to first create all parents, but there's more
1121 * actually: for the weight-based controllers we also need to
1122 * make sure that all our siblings (i.e. units that are in the
1123 * same slice as we are) have cgroups, too. Otherwise, things
1124 * would become very uneven as each of their processes would
1125 * get as much resources as all our group together. This call
1126 * will synchronously create the parent cgroups, but will
1127 * defer work on the siblings to the next event loop
1130 /* Add all sibling slices to the cgroup queue. */
1131 unit_queue_siblings(u
);
1133 /* And realize this one now (and apply the values) */
1134 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
1137 void unit_release_cgroup(Unit
*u
) {
1140 /* Forgets all cgroup details for this cgroup */
1142 if (u
->cgroup_path
) {
1143 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
1144 u
->cgroup_path
= mfree(u
->cgroup_path
);
1147 if (u
->cgroup_inotify_wd
>= 0) {
1148 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
1149 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring", u
->cgroup_inotify_wd
, u
->id
);
1151 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
1152 u
->cgroup_inotify_wd
= -1;
1156 void unit_prune_cgroup(Unit
*u
) {
1162 /* Removes the cgroup, if empty and possible, and stops watching it. */
1164 if (!u
->cgroup_path
)
1167 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1169 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
1171 log_debug_errno(r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
1178 unit_release_cgroup(u
);
1180 u
->cgroup_realized
= false;
1181 u
->cgroup_realized_mask
= 0;
1184 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
1185 _cleanup_fclose_
FILE *f
= NULL
;
1186 pid_t pid
= 0, npid
, mypid
;
1192 if (!u
->cgroup_path
)
1195 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
1200 while (cg_read_pid(f
, &npid
) > 0) {
1206 /* Ignore processes that aren't our kids */
1207 if (get_parent_of_pid(npid
, &ppid
) >= 0 && ppid
!= mypid
)
1211 /* Dang, there's more than one daemonized PID
1212 in this group, so we don't know what process
1213 is the main process. */
1224 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
1225 _cleanup_closedir_
DIR *d
= NULL
;
1226 _cleanup_fclose_
FILE *f
= NULL
;
1232 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
1238 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
1239 r
= unit_watch_pid(u
, pid
);
1240 if (r
< 0 && ret
>= 0)
1244 if (r
< 0 && ret
>= 0)
1248 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
1255 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
1256 _cleanup_free_
char *p
= NULL
;
1258 p
= strjoin(path
, "/", fn
, NULL
);
1264 r
= unit_watch_pids_in_path(u
, p
);
1265 if (r
< 0 && ret
>= 0)
1269 if (r
< 0 && ret
>= 0)
1276 int unit_watch_all_pids(Unit
*u
) {
1279 /* Adds all PIDs from our cgroup to the set of PIDs we
1280 * watch. This is a fallback logic for cases where we do not
1281 * get reliable cgroup empty notifications: we try to use
1282 * SIGCHLD as replacement. */
1284 if (!u
->cgroup_path
)
1287 if (cg_unified() > 0) /* On unified we can use proper notifications */
1290 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
1293 int unit_notify_cgroup_empty(Unit
*u
) {
1298 if (!u
->cgroup_path
)
1301 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
1305 unit_add_to_gc_queue(u
);
1307 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
1308 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
1313 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1314 Manager
*m
= userdata
;
1321 union inotify_event_buffer buffer
;
1322 struct inotify_event
*e
;
1325 l
= read(fd
, &buffer
, sizeof(buffer
));
1327 if (errno
== EINTR
|| errno
== EAGAIN
)
1330 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
1333 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1337 /* Queue overflow has no watch descriptor */
1340 if (e
->mask
& IN_IGNORED
)
1341 /* The watch was just removed */
1344 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
1345 if (!u
) /* Not that inotify might deliver
1346 * events for a watch even after it
1347 * was removed, because it was queued
1348 * before the removal. Let's ignore
1349 * this here safely. */
1352 (void) unit_notify_cgroup_empty(u
);
1357 int manager_setup_cgroup(Manager
*m
) {
1358 _cleanup_free_
char *path
= NULL
;
1365 /* 1. Determine hierarchy */
1366 m
->cgroup_root
= mfree(m
->cgroup_root
);
1367 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
1369 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
1371 /* Chop off the init scope, if we are already located in it */
1372 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1374 /* LEGACY: Also chop off the system slice if we are in
1375 * it. This is to support live upgrades from older systemd
1376 * versions where PID 1 was moved there. Also see
1377 * cg_get_root_path(). */
1378 if (!e
&& m
->running_as
== MANAGER_SYSTEM
) {
1379 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
1381 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
1386 /* And make sure to store away the root value without trailing
1387 * slash, even for the root dir, so that we can easily prepend
1389 while ((e
= endswith(m
->cgroup_root
, "/")))
1393 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
1395 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
1397 unified
= cg_unified();
1399 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
1401 log_debug("Unified cgroup hierarchy is located at %s.", path
);
1403 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER
". File system hierarchy is at %s.", path
);
1406 const char *scope_path
;
1408 /* 3. Install agent */
1411 /* In the unified hierarchy we can can get
1412 * cgroup empty notifications via inotify. */
1414 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1415 safe_close(m
->cgroup_inotify_fd
);
1417 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
1418 if (m
->cgroup_inotify_fd
< 0)
1419 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
1421 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
1423 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
1425 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_IDLE
- 5);
1427 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
1429 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
1431 } else if (m
->running_as
== MANAGER_SYSTEM
) {
1433 /* On the legacy hierarchy we only get
1434 * notifications via cgroup agents. (Which
1435 * isn't really reliable, since it does not
1436 * generate events when control groups with
1437 * children run empty. */
1439 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
1441 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
1443 log_debug("Installed release agent.");
1445 log_debug("Release agent already installed.");
1448 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1449 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1450 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
1452 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
1454 /* also, move all other userspace processes remaining
1455 * in the root cgroup into that scope. */
1456 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, false);
1458 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
1460 /* 5. And pin it, so that it cannot be unmounted */
1461 safe_close(m
->pin_cgroupfs_fd
);
1462 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
1463 if (m
->pin_cgroupfs_fd
< 0)
1464 return log_error_errno(errno
, "Failed to open pin file: %m");
1466 /* 6. Always enable hierarchical support if it exists... */
1468 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1471 /* 7. Figure out which controllers are supported */
1472 r
= cg_mask_supported(&m
->cgroup_supported
);
1474 return log_error_errno(r
, "Failed to determine supported controllers: %m");
1476 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
1477 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& c
));
1482 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
1485 /* We can't really delete the group, since we are in it. But
1487 if (delete && m
->cgroup_root
)
1488 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
1490 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
1492 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1493 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
1495 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
1497 m
->cgroup_root
= mfree(m
->cgroup_root
);
1500 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
1507 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
1511 p
= strdupa(cgroup
);
1515 e
= strrchr(p
, '/');
1517 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
1521 u
= hashmap_get(m
->cgroup_unit
, p
);
1527 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
1528 _cleanup_free_
char *cgroup
= NULL
;
1536 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
);
1540 return manager_get_unit_by_cgroup(m
, cgroup
);
1543 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
1552 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
1554 u
= hashmap_get(m
->watch_pids1
, PID_TO_PTR(pid
));
1558 u
= hashmap_get(m
->watch_pids2
, PID_TO_PTR(pid
));
1562 return manager_get_unit_by_pid_cgroup(m
, pid
);
1565 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
1571 u
= manager_get_unit_by_cgroup(m
, cgroup
);
1575 return unit_notify_cgroup_empty(u
);
1578 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
1579 _cleanup_free_
char *v
= NULL
;
1585 if (!u
->cgroup_path
)
1588 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
1591 if (cg_unified() <= 0)
1592 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
1594 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
1600 return safe_atou64(v
, ret
);
1603 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
1604 _cleanup_free_
char *v
= NULL
;
1610 if (!u
->cgroup_path
)
1613 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
1616 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
1622 return safe_atou64(v
, ret
);
1625 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
1626 _cleanup_free_
char *v
= NULL
;
1633 if (!u
->cgroup_path
)
1636 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUACCT
) == 0)
1639 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
1645 r
= safe_atou64(v
, &ns
);
1653 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
1657 r
= unit_get_cpu_usage_raw(u
, &ns
);
1661 if (ns
> u
->cpuacct_usage_base
)
1662 ns
-= u
->cpuacct_usage_base
;
1670 int unit_reset_cpu_usage(Unit
*u
) {
1676 r
= unit_get_cpu_usage_raw(u
, &ns
);
1678 u
->cpuacct_usage_base
= 0;
1682 u
->cpuacct_usage_base
= ns
;
1686 bool unit_cgroup_delegate(Unit
*u
) {
1691 c
= unit_get_cgroup_context(u
);
1698 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
1701 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1707 if ((u
->cgroup_realized_mask
& m
) == 0)
1710 u
->cgroup_realized_mask
&= ~m
;
1711 unit_add_to_cgroup_queue(u
);
1714 void manager_invalidate_startup_units(Manager
*m
) {
1720 SET_FOREACH(u
, m
->startup_units
, i
)
1721 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_BLKIO
);
1724 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
1725 [CGROUP_AUTO
] = "auto",
1726 [CGROUP_CLOSED
] = "closed",
1727 [CGROUP_STRICT
] = "strict",
1730 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);