1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include "cgroup-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "process-util.h"
33 #include "string-table.h"
34 #include "string-util.h"
36 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
38 void cgroup_context_init(CGroupContext
*c
) {
41 /* Initialize everything to the kernel defaults, assuming the
42 * structure is preinitialized to 0 */
44 c
->cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
45 c
->startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
46 c
->cpu_quota_per_sec_usec
= USEC_INFINITY
;
48 c
->memory_limit
= (uint64_t) -1;
50 c
->blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
51 c
->startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
53 c
->tasks_max
= (uint64_t) -1;
55 c
->netclass_type
= CGROUP_NETCLASS_TYPE_NONE
;
58 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
62 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
67 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
71 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
76 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
80 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
85 void cgroup_context_done(CGroupContext
*c
) {
88 while (c
->blockio_device_weights
)
89 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
91 while (c
->blockio_device_bandwidths
)
92 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
94 while (c
->device_allow
)
95 cgroup_context_free_device_allow(c
, c
->device_allow
);
98 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
99 CGroupBlockIODeviceBandwidth
*b
;
100 CGroupBlockIODeviceWeight
*w
;
101 CGroupDeviceAllow
*a
;
102 char u
[FORMAT_TIMESPAN_MAX
];
107 prefix
= strempty(prefix
);
110 "%sCPUAccounting=%s\n"
111 "%sBlockIOAccounting=%s\n"
112 "%sMemoryAccounting=%s\n"
113 "%sTasksAccounting=%s\n"
114 "%sCPUShares=%" PRIu64
"\n"
115 "%sStartupCPUShares=%" PRIu64
"\n"
116 "%sCPUQuotaPerSecSec=%s\n"
117 "%sBlockIOWeight=%" PRIu64
"\n"
118 "%sStartupBlockIOWeight=%" PRIu64
"\n"
119 "%sMemoryLimit=%" PRIu64
"\n"
120 "%sTasksMax=%" PRIu64
"\n"
121 "%sDevicePolicy=%s\n"
123 prefix
, yes_no(c
->cpu_accounting
),
124 prefix
, yes_no(c
->blockio_accounting
),
125 prefix
, yes_no(c
->memory_accounting
),
126 prefix
, yes_no(c
->tasks_accounting
),
127 prefix
, c
->cpu_shares
,
128 prefix
, c
->startup_cpu_shares
,
129 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
130 prefix
, c
->blockio_weight
,
131 prefix
, c
->startup_blockio_weight
,
132 prefix
, c
->memory_limit
,
133 prefix
, c
->tasks_max
,
134 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
135 prefix
, yes_no(c
->delegate
));
137 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
139 "%sDeviceAllow=%s %s%s%s\n",
142 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
144 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
146 "%sBlockIODeviceWeight=%s %" PRIu64
,
151 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
152 char buf
[FORMAT_BYTES_MAX
];
157 b
->read
? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
159 format_bytes(buf
, sizeof(buf
), b
->bandwidth
));
163 static int lookup_blkio_device(const char *p
, dev_t
*dev
) {
172 return log_warning_errno(errno
, "Couldn't stat device %s: %m", p
);
174 if (S_ISBLK(st
.st_mode
))
176 else if (major(st
.st_dev
) != 0) {
177 /* If this is not a device node then find the block
178 * device this file is stored on */
181 /* If this is a partition, try to get the originating
183 block_get_whole_disk(*dev
, dev
);
185 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p
);
192 static int whitelist_device(const char *path
, const char *node
, const char *acc
) {
193 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
200 if (stat(node
, &st
) < 0) {
201 log_warning("Couldn't stat device %s", node
);
205 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
206 log_warning("%s is not a device.", node
);
212 S_ISCHR(st
.st_mode
) ? 'c' : 'b',
213 major(st
.st_rdev
), minor(st
.st_rdev
),
216 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
218 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
219 "Failed to set devices.allow on %s: %m", path
);
224 static int whitelist_major(const char *path
, const char *name
, char type
, const char *acc
) {
225 _cleanup_fclose_
FILE *f
= NULL
;
232 assert(type
== 'b' || type
== 'c');
234 f
= fopen("/proc/devices", "re");
236 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
238 FOREACH_LINE(line
, f
, goto fail
) {
239 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4], *p
, *w
;
244 if (type
== 'c' && streq(line
, "Character devices:")) {
249 if (type
== 'b' && streq(line
, "Block devices:")) {
264 w
= strpbrk(p
, WHITESPACE
);
269 r
= safe_atou(p
, &maj
);
276 w
+= strspn(w
, WHITESPACE
);
278 if (fnmatch(name
, w
, 0) != 0)
287 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
289 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
290 "Failed to set devices.allow on %s: %m", path
);
296 log_warning_errno(errno
, "Failed to read /proc/devices: %m");
300 void cgroup_context_apply(CGroupContext
*c
, CGroupMask mask
, const char *path
, uint32_t netclass
, ManagerState state
) {
310 /* Some cgroup attributes are not supported on the root cgroup,
311 * hence silently ignore */
312 is_root
= isempty(path
) || path_equal(path
, "/");
314 /* Make sure we don't try to display messages with an empty path. */
317 /* We generally ignore errors caused by read-only mounted
318 * cgroup trees (assuming we are running in a container then),
319 * and missing cgroups, i.e. EROFS and ENOENT. */
321 if ((mask
& CGROUP_MASK_CPU
) && !is_root
) {
322 char buf
[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t
)) + 1];
324 sprintf(buf
, "%" PRIu64
"\n",
325 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->startup_cpu_shares
:
326 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->cpu_shares
: CGROUP_CPU_SHARES_DEFAULT
);
327 r
= cg_set_attribute("cpu", path
, "cpu.shares", buf
);
329 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
330 "Failed to set cpu.shares on %s: %m", path
);
332 sprintf(buf
, USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
333 r
= cg_set_attribute("cpu", path
, "cpu.cfs_period_us", buf
);
335 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
336 "Failed to set cpu.cfs_period_us on %s: %m", path
);
338 if (c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
) {
339 sprintf(buf
, USEC_FMT
"\n", c
->cpu_quota_per_sec_usec
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
);
340 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", buf
);
342 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", "-1");
344 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
345 "Failed to set cpu.cfs_quota_us on %s: %m", path
);
348 if (mask
& CGROUP_MASK_BLKIO
) {
349 char buf
[MAX(DECIMAL_STR_MAX(uint64_t)+1,
350 DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
351 CGroupBlockIODeviceWeight
*w
;
352 CGroupBlockIODeviceBandwidth
*b
;
355 sprintf(buf
, "%" PRIu64
"\n",
356 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->startup_blockio_weight
:
357 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->blockio_weight
: CGROUP_BLKIO_WEIGHT_DEFAULT
);
358 r
= cg_set_attribute("blkio", path
, "blkio.weight", buf
);
360 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
361 "Failed to set blkio.weight on %s: %m", path
);
363 /* FIXME: no way to reset this list */
364 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
367 r
= lookup_blkio_device(w
->path
, &dev
);
371 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), w
->weight
);
372 r
= cg_set_attribute("blkio", path
, "blkio.weight_device", buf
);
374 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
375 "Failed to set blkio.weight_device on %s: %m", path
);
379 /* FIXME: no way to reset this list */
380 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
384 r
= lookup_blkio_device(b
->path
, &dev
);
388 a
= b
->read
? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
390 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), b
->bandwidth
);
391 r
= cg_set_attribute("blkio", path
, a
, buf
);
393 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
394 "Failed to set %s on %s: %m", a
, path
);
398 if ((mask
& CGROUP_MASK_MEMORY
) && !is_root
) {
399 if (c
->memory_limit
!= (uint64_t) -1) {
400 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
402 sprintf(buf
, "%" PRIu64
"\n", c
->memory_limit
);
404 if (cg_unified() <= 0)
405 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", buf
);
407 r
= cg_set_attribute("memory", path
, "memory.max", buf
);
410 if (cg_unified() <= 0)
411 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", "-1");
413 r
= cg_set_attribute("memory", path
, "memory.max", "max");
417 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
418 "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path
);
421 if ((mask
& CGROUP_MASK_DEVICES
) && !is_root
) {
422 CGroupDeviceAllow
*a
;
424 /* Changing the devices list of a populated cgroup
425 * might result in EINVAL, hence ignore EINVAL
428 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
429 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
431 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
433 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
) ? LOG_DEBUG
: LOG_WARNING
, r
,
434 "Failed to reset devices.list on %s: %m", path
);
436 if (c
->device_policy
== CGROUP_CLOSED
||
437 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
438 static const char auto_devices
[] =
439 "/dev/null\0" "rwm\0"
440 "/dev/zero\0" "rwm\0"
441 "/dev/full\0" "rwm\0"
442 "/dev/random\0" "rwm\0"
443 "/dev/urandom\0" "rwm\0"
445 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
449 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
450 whitelist_device(path
, x
, y
);
452 whitelist_major(path
, "pts", 'c', "rw");
453 whitelist_major(path
, "kdbus", 'c', "rw");
454 whitelist_major(path
, "kdbus/*", 'c', "rw");
457 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
473 if (startswith(a
->path
, "/dev/"))
474 whitelist_device(path
, a
->path
, acc
);
475 else if (startswith(a
->path
, "block-"))
476 whitelist_major(path
, a
->path
+ 6, 'b', acc
);
477 else if (startswith(a
->path
, "char-"))
478 whitelist_major(path
, a
->path
+ 5, 'c', acc
);
480 log_debug("Ignoring device %s while writing cgroup attribute.", a
->path
);
484 if ((mask
& CGROUP_MASK_PIDS
) && !is_root
) {
486 if (c
->tasks_max
!= (uint64_t) -1) {
487 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
489 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
490 r
= cg_set_attribute("pids", path
, "pids.max", buf
);
492 r
= cg_set_attribute("pids", path
, "pids.max", "max");
495 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
496 "Failed to set pids.max on %s: %m", path
);
499 if (mask
& CGROUP_MASK_NET_CLS
) {
500 char buf
[DECIMAL_STR_MAX(uint32_t)];
502 sprintf(buf
, "%" PRIu32
, netclass
);
504 r
= cg_set_attribute("net_cls", path
, "net_cls.classid", buf
);
506 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
) ? LOG_DEBUG
: LOG_WARNING
, r
,
507 "Failed to set net_cls.classid on %s: %m", path
);
511 CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
514 /* Figure out which controllers we need */
516 if (c
->cpu_accounting
||
517 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
518 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
519 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
520 mask
|= CGROUP_MASK_CPUACCT
| CGROUP_MASK_CPU
;
522 if (c
->blockio_accounting
||
523 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
524 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
525 c
->blockio_device_weights
||
526 c
->blockio_device_bandwidths
)
527 mask
|= CGROUP_MASK_BLKIO
;
529 if (c
->memory_accounting
||
530 c
->memory_limit
!= (uint64_t) -1)
531 mask
|= CGROUP_MASK_MEMORY
;
533 if (c
->device_allow
||
534 c
->device_policy
!= CGROUP_AUTO
)
535 mask
|= CGROUP_MASK_DEVICES
;
537 if (c
->tasks_accounting
||
538 c
->tasks_max
!= (uint64_t) -1)
539 mask
|= CGROUP_MASK_PIDS
;
541 if (c
->netclass_type
!= CGROUP_NETCLASS_TYPE_NONE
)
542 mask
|= CGROUP_MASK_NET_CLS
;
547 CGroupMask
unit_get_own_mask(Unit
*u
) {
550 /* Returns the mask of controllers the unit needs for itself */
552 c
= unit_get_cgroup_context(u
);
556 /* If delegation is turned on, then turn on all cgroups,
557 * unless we are on the legacy hierarchy and the process we
558 * fork into it is known to drop privileges, and hence
559 * shouldn't get access to the controllers.
561 * Note that on the unified hierarchy it is safe to delegate
562 * controllers to unprivileged services. */
567 e
= unit_get_exec_context(u
);
569 exec_context_maintains_privileges(e
) ||
571 return _CGROUP_MASK_ALL
;
574 return cgroup_context_get_mask(c
);
577 CGroupMask
unit_get_members_mask(Unit
*u
) {
580 /* Returns the mask of controllers all of the unit's children
583 if (u
->cgroup_members_mask_valid
)
584 return u
->cgroup_members_mask
;
586 u
->cgroup_members_mask
= 0;
588 if (u
->type
== UNIT_SLICE
) {
592 SET_FOREACH(member
, u
->dependencies
[UNIT_BEFORE
], i
) {
597 if (UNIT_DEREF(member
->slice
) != u
)
600 u
->cgroup_members_mask
|=
601 unit_get_own_mask(member
) |
602 unit_get_members_mask(member
);
606 u
->cgroup_members_mask_valid
= true;
607 return u
->cgroup_members_mask
;
610 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
613 /* Returns the mask of controllers all of the unit's siblings
614 * require, i.e. the members mask of the unit's parent slice
615 * if there is one. */
617 if (UNIT_ISSET(u
->slice
))
618 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
620 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
623 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
625 /* Returns the mask of this subtree, meaning of the group
626 * itself and its children. */
628 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
631 CGroupMask
unit_get_target_mask(Unit
*u
) {
634 /* This returns the cgroup mask of all controllers to enable
635 * for a specific cgroup, i.e. everything it needs itself,
636 * plus all that its children need, plus all that its siblings
637 * need. This is primarily useful on the legacy cgroup
638 * hierarchy, where we need to duplicate each cgroup in each
639 * hierarchy that shall be enabled for it. */
641 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
642 mask
&= u
->manager
->cgroup_supported
;
647 CGroupMask
unit_get_enable_mask(Unit
*u
) {
650 /* This returns the cgroup mask of all controllers to enable
651 * for the children of a specific cgroup. This is primarily
652 * useful for the unified cgroup hierarchy, where each cgroup
653 * controls which controllers are enabled for its children. */
655 mask
= unit_get_members_mask(u
);
656 mask
&= u
->manager
->cgroup_supported
;
661 /* Recurse from a unit up through its containing slices, propagating
662 * mask bits upward. A unit is also member of itself. */
663 void unit_update_cgroup_members_masks(Unit
*u
) {
669 /* Calculate subtree mask */
670 m
= unit_get_subtree_mask(u
);
672 /* See if anything changed from the previous invocation. If
673 * not, we're done. */
674 if (u
->cgroup_subtree_mask_valid
&& m
== u
->cgroup_subtree_mask
)
678 u
->cgroup_subtree_mask_valid
&&
679 ((m
& ~u
->cgroup_subtree_mask
) != 0) &&
680 ((~m
& u
->cgroup_subtree_mask
) == 0);
682 u
->cgroup_subtree_mask
= m
;
683 u
->cgroup_subtree_mask_valid
= true;
685 if (UNIT_ISSET(u
->slice
)) {
686 Unit
*s
= UNIT_DEREF(u
->slice
);
689 /* There's more set now than before. We
690 * propagate the new mask to the parent's mask
691 * (not caring if it actually was valid or
694 s
->cgroup_members_mask
|= m
;
697 /* There's less set now than before (or we
698 * don't know), we need to recalculate
699 * everything, so let's invalidate the
700 * parent's members mask */
702 s
->cgroup_members_mask_valid
= false;
704 /* And now make sure that this change also hits our
706 unit_update_cgroup_members_masks(s
);
710 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
717 if (u
->cgroup_path
&&
718 u
->cgroup_realized
&&
719 (u
->cgroup_realized_mask
& mask
) == mask
)
720 return u
->cgroup_path
;
722 u
= UNIT_DEREF(u
->slice
);
728 char *unit_default_cgroup_path(Unit
*u
) {
729 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
734 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
735 return strdup(u
->manager
->cgroup_root
);
737 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
738 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
743 escaped
= cg_escape(u
->id
);
748 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/", escaped
, NULL
);
750 return strjoin(u
->manager
->cgroup_root
, "/", escaped
, NULL
);
753 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
754 _cleanup_free_
char *p
= NULL
;
766 if (streq_ptr(u
->cgroup_path
, p
))
770 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
775 unit_release_cgroup(u
);
783 int unit_watch_cgroup(Unit
*u
) {
784 _cleanup_free_
char *populated
= NULL
;
792 if (u
->cgroup_inotify_wd
>= 0)
795 /* Only applies to the unified hierarchy */
798 return log_unit_error_errno(u
, r
, "Failed detect wether the unified hierarchy is used: %m");
802 /* Don't watch the root slice, it's pointless. */
803 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
806 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
810 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.populated", &populated
);
814 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, populated
, IN_MODIFY
);
815 if (u
->cgroup_inotify_wd
< 0) {
817 if (errno
== ENOENT
) /* If the directory is already
818 * gone we don't need to track
819 * it, so this is not an error */
822 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
825 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
827 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
832 static int unit_create_cgroup(
834 CGroupMask target_mask
,
835 CGroupMask enable_mask
) {
842 c
= unit_get_cgroup_context(u
);
846 if (!u
->cgroup_path
) {
847 _cleanup_free_
char *path
= NULL
;
849 path
= unit_default_cgroup_path(u
);
853 r
= unit_set_cgroup_path(u
, path
);
855 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
857 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
860 /* First, create our own group */
861 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
863 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
865 /* Start watching it */
866 (void) unit_watch_cgroup(u
);
868 /* Enable all controllers we need */
869 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
);
871 log_unit_warning_errno(u
, r
, "Failed to enable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
873 /* Keep track that this is now realized */
874 u
->cgroup_realized
= true;
875 u
->cgroup_realized_mask
= target_mask
;
877 if (u
->type
!= UNIT_SLICE
&& !c
->delegate
) {
879 /* Then, possibly move things over, but not if
880 * subgroups may contain processes, which is the case
881 * for slice and delegation units. */
882 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
884 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
890 int unit_attach_pids_to_cgroup(Unit
*u
) {
894 r
= unit_realize_cgroup(u
);
898 r
= cg_attach_many_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->pids
, migrate_callback
, u
);
905 static bool unit_has_mask_realized(Unit
*u
, CGroupMask target_mask
) {
908 return u
->cgroup_realized
&& u
->cgroup_realized_mask
== target_mask
;
911 static int unit_find_free_netclass_cgroup(Unit
*u
, uint32_t *ret
) {
920 i
= start
= m
->cgroup_netclass_registry_last
;
925 if (!hashmap_get(m
->cgroup_netclass_registry
, UINT_TO_PTR(i
))) {
926 m
->cgroup_netclass_registry_last
= i
;
932 i
= CGROUP_NETCLASS_FIXED_MAX
;
934 } while (i
!= start
);
939 int unit_add_to_netclass_cgroup(Unit
*u
) {
948 cc
= unit_get_cgroup_context(u
);
952 switch (cc
->netclass_type
) {
953 case CGROUP_NETCLASS_TYPE_NONE
:
956 case CGROUP_NETCLASS_TYPE_FIXED
:
957 u
->cgroup_netclass_id
= cc
->netclass_id
;
960 case CGROUP_NETCLASS_TYPE_AUTO
:
961 /* Allocate a new ID in case it was requested and not done yet */
962 if (u
->cgroup_netclass_id
== 0) {
963 r
= unit_find_free_netclass_cgroup(u
, &u
->cgroup_netclass_id
);
967 log_debug("Dynamically assigned netclass cgroup id %" PRIu32
" to %s", u
->cgroup_netclass_id
, u
->id
);
973 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_netclass_registry
, &trivial_hash_ops
);
977 key
= UINT32_TO_PTR(u
->cgroup_netclass_id
);
978 first
= hashmap_get(u
->manager
->cgroup_netclass_registry
, key
);
981 LIST_PREPEND(cgroup_netclass
, first
, u
);
982 return hashmap_replace(u
->manager
->cgroup_netclass_registry
, key
, u
);
985 return hashmap_put(u
->manager
->cgroup_netclass_registry
, key
, u
);
988 int unit_remove_from_netclass_cgroup(Unit
*u
) {
995 key
= UINT32_TO_PTR(u
->cgroup_netclass_id
);
997 LIST_FIND_HEAD(cgroup_netclass
, u
, head
);
998 LIST_REMOVE(cgroup_netclass
, head
, u
);
1001 return hashmap_replace(u
->manager
->cgroup_netclass_registry
, key
, head
);
1003 hashmap_remove(u
->manager
->cgroup_netclass_registry
, key
);
1008 /* Check if necessary controllers and attributes for a unit are in place.
1010 * If so, do nothing.
1011 * If not, create paths, move processes over, and set attributes.
1013 * Returns 0 on success and < 0 on failure. */
1014 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
1015 CGroupMask target_mask
, enable_mask
;
1020 if (u
->in_cgroup_queue
) {
1021 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
1022 u
->in_cgroup_queue
= false;
1025 target_mask
= unit_get_target_mask(u
);
1026 if (unit_has_mask_realized(u
, target_mask
))
1029 /* First, realize parents */
1030 if (UNIT_ISSET(u
->slice
)) {
1031 r
= unit_realize_cgroup_now(UNIT_DEREF(u
->slice
), state
);
1036 /* And then do the real work */
1037 enable_mask
= unit_get_enable_mask(u
);
1038 r
= unit_create_cgroup(u
, target_mask
, enable_mask
);
1042 /* Finally, apply the necessary attributes. */
1043 cgroup_context_apply(unit_get_cgroup_context(u
), target_mask
, u
->cgroup_path
, u
->cgroup_netclass_id
, state
);
1048 static void unit_add_to_cgroup_queue(Unit
*u
) {
1050 if (u
->in_cgroup_queue
)
1053 LIST_PREPEND(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
1054 u
->in_cgroup_queue
= true;
1057 unsigned manager_dispatch_cgroup_queue(Manager
*m
) {
1063 state
= manager_state(m
);
1065 while ((i
= m
->cgroup_queue
)) {
1066 assert(i
->in_cgroup_queue
);
1068 r
= unit_realize_cgroup_now(i
, state
);
1070 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
1078 static void unit_queue_siblings(Unit
*u
) {
1081 /* This adds the siblings of the specified unit and the
1082 * siblings of all parent units to the cgroup queue. (But
1083 * neither the specified unit itself nor the parents.) */
1085 while ((slice
= UNIT_DEREF(u
->slice
))) {
1089 SET_FOREACH(m
, slice
->dependencies
[UNIT_BEFORE
], i
) {
1093 /* Skip units that have a dependency on the slice
1094 * but aren't actually in it. */
1095 if (UNIT_DEREF(m
->slice
) != slice
)
1098 /* No point in doing cgroup application for units
1099 * without active processes. */
1100 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
1103 /* If the unit doesn't need any new controllers
1104 * and has current ones realized, it doesn't need
1106 if (unit_has_mask_realized(m
, unit_get_target_mask(m
)))
1109 unit_add_to_cgroup_queue(m
);
1116 int unit_realize_cgroup(Unit
*u
) {
1119 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1122 /* So, here's the deal: when realizing the cgroups for this
1123 * unit, we need to first create all parents, but there's more
1124 * actually: for the weight-based controllers we also need to
1125 * make sure that all our siblings (i.e. units that are in the
1126 * same slice as we are) have cgroups, too. Otherwise, things
1127 * would become very uneven as each of their processes would
1128 * get as much resources as all our group together. This call
1129 * will synchronously create the parent cgroups, but will
1130 * defer work on the siblings to the next event loop
1133 /* Add all sibling slices to the cgroup queue. */
1134 unit_queue_siblings(u
);
1136 /* And realize this one now (and apply the values) */
1137 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
1140 void unit_release_cgroup(Unit
*u
) {
1143 /* Forgets all cgroup details for this cgroup */
1145 if (u
->cgroup_path
) {
1146 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
1147 u
->cgroup_path
= mfree(u
->cgroup_path
);
1150 if (u
->cgroup_inotify_wd
>= 0) {
1151 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
1152 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring", u
->cgroup_inotify_wd
, u
->id
);
1154 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
1155 u
->cgroup_inotify_wd
= -1;
1159 void unit_prune_cgroup(Unit
*u
) {
1165 /* Removes the cgroup, if empty and possible, and stops watching it. */
1167 if (!u
->cgroup_path
)
1170 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1172 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
1174 log_debug_errno(r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
1181 unit_release_cgroup(u
);
1183 u
->cgroup_realized
= false;
1184 u
->cgroup_realized_mask
= 0;
1187 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
1188 _cleanup_fclose_
FILE *f
= NULL
;
1189 pid_t pid
= 0, npid
, mypid
;
1195 if (!u
->cgroup_path
)
1198 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
1203 while (cg_read_pid(f
, &npid
) > 0) {
1209 /* Ignore processes that aren't our kids */
1210 if (get_parent_of_pid(npid
, &ppid
) >= 0 && ppid
!= mypid
)
1214 /* Dang, there's more than one daemonized PID
1215 in this group, so we don't know what process
1216 is the main process. */
1227 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
1228 _cleanup_closedir_
DIR *d
= NULL
;
1229 _cleanup_fclose_
FILE *f
= NULL
;
1235 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
1241 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
1242 r
= unit_watch_pid(u
, pid
);
1243 if (r
< 0 && ret
>= 0)
1247 if (r
< 0 && ret
>= 0)
1251 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
1258 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
1259 _cleanup_free_
char *p
= NULL
;
1261 p
= strjoin(path
, "/", fn
, NULL
);
1267 r
= unit_watch_pids_in_path(u
, p
);
1268 if (r
< 0 && ret
>= 0)
1272 if (r
< 0 && ret
>= 0)
1279 int unit_watch_all_pids(Unit
*u
) {
1282 /* Adds all PIDs from our cgroup to the set of PIDs we
1283 * watch. This is a fallback logic for cases where we do not
1284 * get reliable cgroup empty notifications: we try to use
1285 * SIGCHLD as replacement. */
1287 if (!u
->cgroup_path
)
1290 if (cg_unified() > 0) /* On unified we can use proper notifications */
1293 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
1296 int unit_notify_cgroup_empty(Unit
*u
) {
1301 if (!u
->cgroup_path
)
1304 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
1308 unit_add_to_gc_queue(u
);
1310 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
1311 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
1316 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1317 Manager
*m
= userdata
;
1324 union inotify_event_buffer buffer
;
1325 struct inotify_event
*e
;
1328 l
= read(fd
, &buffer
, sizeof(buffer
));
1330 if (errno
== EINTR
|| errno
== EAGAIN
)
1333 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
1336 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1340 /* Queue overflow has no watch descriptor */
1343 if (e
->mask
& IN_IGNORED
)
1344 /* The watch was just removed */
1347 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
1348 if (!u
) /* Not that inotify might deliver
1349 * events for a watch even after it
1350 * was removed, because it was queued
1351 * before the removal. Let's ignore
1352 * this here safely. */
1355 (void) unit_notify_cgroup_empty(u
);
1360 int manager_setup_cgroup(Manager
*m
) {
1361 _cleanup_free_
char *path
= NULL
;
1368 /* 1. Determine hierarchy */
1369 m
->cgroup_root
= mfree(m
->cgroup_root
);
1370 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
1372 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
1374 /* Chop off the init scope, if we are already located in it */
1375 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1377 /* LEGACY: Also chop off the system slice if we are in
1378 * it. This is to support live upgrades from older systemd
1379 * versions where PID 1 was moved there. Also see
1380 * cg_get_root_path(). */
1381 if (!e
&& m
->running_as
== MANAGER_SYSTEM
) {
1382 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
1384 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
1389 /* And make sure to store away the root value without trailing
1390 * slash, even for the root dir, so that we can easily prepend
1392 while ((e
= endswith(m
->cgroup_root
, "/")))
1396 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
1398 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
1400 unified
= cg_unified();
1402 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
1404 log_debug("Unified cgroup hierarchy is located at %s.", path
);
1406 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER
". File system hierarchy is at %s.", path
);
1409 const char *scope_path
;
1411 /* 3. Install agent */
1414 /* In the unified hierarchy we can can get
1415 * cgroup empty notifications via inotify. */
1417 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1418 safe_close(m
->cgroup_inotify_fd
);
1420 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
1421 if (m
->cgroup_inotify_fd
< 0)
1422 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
1424 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
1426 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
1428 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_IDLE
- 5);
1430 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
1432 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
1434 } else if (m
->running_as
== MANAGER_SYSTEM
) {
1436 /* On the legacy hierarchy we only get
1437 * notifications via cgroup agents. (Which
1438 * isn't really reliable, since it does not
1439 * generate events when control groups with
1440 * children run empty. */
1442 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
1444 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
1446 log_debug("Installed release agent.");
1448 log_debug("Release agent already installed.");
1451 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1452 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1453 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
1455 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
1457 /* also, move all other userspace processes remaining
1458 * in the root cgroup into that scope. */
1459 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, false);
1461 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
1463 /* 5. And pin it, so that it cannot be unmounted */
1464 safe_close(m
->pin_cgroupfs_fd
);
1465 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
1466 if (m
->pin_cgroupfs_fd
< 0)
1467 return log_error_errno(errno
, "Failed to open pin file: %m");
1469 /* 6. Always enable hierarchical support if it exists... */
1471 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1474 /* 7. Figure out which controllers are supported */
1475 r
= cg_mask_supported(&m
->cgroup_supported
);
1477 return log_error_errno(r
, "Failed to determine supported controllers: %m");
1479 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
1480 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& c
));
1485 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
1488 /* We can't really delete the group, since we are in it. But
1490 if (delete && m
->cgroup_root
)
1491 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
1493 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
1495 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1496 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
1498 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
1500 m
->cgroup_root
= mfree(m
->cgroup_root
);
1503 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
1510 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
1514 p
= strdupa(cgroup
);
1518 e
= strrchr(p
, '/');
1520 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
1524 u
= hashmap_get(m
->cgroup_unit
, p
);
1530 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
1531 _cleanup_free_
char *cgroup
= NULL
;
1539 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
);
1543 return manager_get_unit_by_cgroup(m
, cgroup
);
1546 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
1555 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
1557 u
= hashmap_get(m
->watch_pids1
, PID_TO_PTR(pid
));
1561 u
= hashmap_get(m
->watch_pids2
, PID_TO_PTR(pid
));
1565 return manager_get_unit_by_pid_cgroup(m
, pid
);
1568 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
1574 u
= manager_get_unit_by_cgroup(m
, cgroup
);
1578 return unit_notify_cgroup_empty(u
);
1581 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
1582 _cleanup_free_
char *v
= NULL
;
1588 if (!u
->cgroup_path
)
1591 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
1594 if (cg_unified() <= 0)
1595 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
1597 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
1603 return safe_atou64(v
, ret
);
1606 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
1607 _cleanup_free_
char *v
= NULL
;
1613 if (!u
->cgroup_path
)
1616 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
1619 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
1625 return safe_atou64(v
, ret
);
1628 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
1629 _cleanup_free_
char *v
= NULL
;
1636 if (!u
->cgroup_path
)
1639 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUACCT
) == 0)
1642 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
1648 r
= safe_atou64(v
, &ns
);
1656 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
1660 r
= unit_get_cpu_usage_raw(u
, &ns
);
1664 if (ns
> u
->cpuacct_usage_base
)
1665 ns
-= u
->cpuacct_usage_base
;
1673 int unit_reset_cpu_usage(Unit
*u
) {
1679 r
= unit_get_cpu_usage_raw(u
, &ns
);
1681 u
->cpuacct_usage_base
= 0;
1685 u
->cpuacct_usage_base
= ns
;
1689 bool unit_cgroup_delegate(Unit
*u
) {
1694 c
= unit_get_cgroup_context(u
);
1701 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
1704 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1710 if ((u
->cgroup_realized_mask
& m
) == 0)
1713 u
->cgroup_realized_mask
&= ~m
;
1714 unit_add_to_cgroup_queue(u
);
1717 void manager_invalidate_startup_units(Manager
*m
) {
1723 SET_FOREACH(u
, m
->startup_units
, i
)
1724 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_BLKIO
);
1727 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
1728 [CGROUP_AUTO
] = "auto",
1729 [CGROUP_CLOSED
] = "closed",
1730 [CGROUP_STRICT
] = "strict",
1733 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);