]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/cgroup.c
Merge pull request #3160 from htejun/cgroup-fixes-rev2
[thirdparty/systemd.git] / src / core / cgroup.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2013 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <fcntl.h>
21 #include <fnmatch.h>
22
23 #include "alloc-util.h"
24 #include "cgroup-util.h"
25 #include "cgroup.h"
26 #include "fd-util.h"
27 #include "fileio.h"
28 #include "fs-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "process-util.h"
32 #include "special.h"
33 #include "string-table.h"
34 #include "string-util.h"
35
36 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
37
38 void cgroup_context_init(CGroupContext *c) {
39 assert(c);
40
41 /* Initialize everything to the kernel defaults, assuming the
42 * structure is preinitialized to 0 */
43
44 c->cpu_shares = CGROUP_CPU_SHARES_INVALID;
45 c->startup_cpu_shares = CGROUP_CPU_SHARES_INVALID;
46 c->cpu_quota_per_sec_usec = USEC_INFINITY;
47
48 c->memory_limit = (uint64_t) -1;
49
50 c->blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
51 c->startup_blockio_weight = CGROUP_BLKIO_WEIGHT_INVALID;
52
53 c->tasks_max = (uint64_t) -1;
54 }
55
56 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
57 assert(c);
58 assert(a);
59
60 LIST_REMOVE(device_allow, c->device_allow, a);
61 free(a->path);
62 free(a);
63 }
64
65 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
66 assert(c);
67 assert(w);
68
69 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
70 free(w->path);
71 free(w);
72 }
73
74 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
75 assert(c);
76 assert(b);
77
78 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
79 free(b->path);
80 free(b);
81 }
82
83 void cgroup_context_done(CGroupContext *c) {
84 assert(c);
85
86 while (c->blockio_device_weights)
87 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
88
89 while (c->blockio_device_bandwidths)
90 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
91
92 while (c->device_allow)
93 cgroup_context_free_device_allow(c, c->device_allow);
94 }
95
96 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
97 CGroupBlockIODeviceBandwidth *b;
98 CGroupBlockIODeviceWeight *w;
99 CGroupDeviceAllow *a;
100 char u[FORMAT_TIMESPAN_MAX];
101
102 assert(c);
103 assert(f);
104
105 prefix = strempty(prefix);
106
107 fprintf(f,
108 "%sCPUAccounting=%s\n"
109 "%sBlockIOAccounting=%s\n"
110 "%sMemoryAccounting=%s\n"
111 "%sTasksAccounting=%s\n"
112 "%sCPUShares=%" PRIu64 "\n"
113 "%sStartupCPUShares=%" PRIu64 "\n"
114 "%sCPUQuotaPerSecSec=%s\n"
115 "%sBlockIOWeight=%" PRIu64 "\n"
116 "%sStartupBlockIOWeight=%" PRIu64 "\n"
117 "%sMemoryLimit=%" PRIu64 "\n"
118 "%sTasksMax=%" PRIu64 "\n"
119 "%sDevicePolicy=%s\n"
120 "%sDelegate=%s\n",
121 prefix, yes_no(c->cpu_accounting),
122 prefix, yes_no(c->blockio_accounting),
123 prefix, yes_no(c->memory_accounting),
124 prefix, yes_no(c->tasks_accounting),
125 prefix, c->cpu_shares,
126 prefix, c->startup_cpu_shares,
127 prefix, format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1),
128 prefix, c->blockio_weight,
129 prefix, c->startup_blockio_weight,
130 prefix, c->memory_limit,
131 prefix, c->tasks_max,
132 prefix, cgroup_device_policy_to_string(c->device_policy),
133 prefix, yes_no(c->delegate));
134
135 LIST_FOREACH(device_allow, a, c->device_allow)
136 fprintf(f,
137 "%sDeviceAllow=%s %s%s%s\n",
138 prefix,
139 a->path,
140 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
141
142 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
143 fprintf(f,
144 "%sBlockIODeviceWeight=%s %" PRIu64,
145 prefix,
146 w->path,
147 w->weight);
148
149 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
150 char buf[FORMAT_BYTES_MAX];
151
152 fprintf(f,
153 "%s%s=%s %s\n",
154 prefix,
155 b->read ? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
156 b->path,
157 format_bytes(buf, sizeof(buf), b->bandwidth));
158 }
159 }
160
161 static int lookup_blkio_device(const char *p, dev_t *dev) {
162 struct stat st;
163 int r;
164
165 assert(p);
166 assert(dev);
167
168 r = stat(p, &st);
169 if (r < 0)
170 return log_warning_errno(errno, "Couldn't stat device %s: %m", p);
171
172 if (S_ISBLK(st.st_mode))
173 *dev = st.st_rdev;
174 else if (major(st.st_dev) != 0) {
175 /* If this is not a device node then find the block
176 * device this file is stored on */
177 *dev = st.st_dev;
178
179 /* If this is a partition, try to get the originating
180 * block device */
181 block_get_whole_disk(*dev, dev);
182 } else {
183 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
184 return -ENODEV;
185 }
186
187 return 0;
188 }
189
190 static int whitelist_device(const char *path, const char *node, const char *acc) {
191 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
192 struct stat st;
193 int r;
194
195 assert(path);
196 assert(acc);
197
198 if (stat(node, &st) < 0) {
199 log_warning("Couldn't stat device %s", node);
200 return -errno;
201 }
202
203 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
204 log_warning("%s is not a device.", node);
205 return -ENODEV;
206 }
207
208 sprintf(buf,
209 "%c %u:%u %s",
210 S_ISCHR(st.st_mode) ? 'c' : 'b',
211 major(st.st_rdev), minor(st.st_rdev),
212 acc);
213
214 r = cg_set_attribute("devices", path, "devices.allow", buf);
215 if (r < 0)
216 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
217 "Failed to set devices.allow on %s: %m", path);
218
219 return r;
220 }
221
222 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
223 _cleanup_fclose_ FILE *f = NULL;
224 char line[LINE_MAX];
225 bool good = false;
226 int r;
227
228 assert(path);
229 assert(acc);
230 assert(type == 'b' || type == 'c');
231
232 f = fopen("/proc/devices", "re");
233 if (!f)
234 return log_warning_errno(errno, "Cannot open /proc/devices to resolve %s (%c): %m", name, type);
235
236 FOREACH_LINE(line, f, goto fail) {
237 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
238 unsigned maj;
239
240 truncate_nl(line);
241
242 if (type == 'c' && streq(line, "Character devices:")) {
243 good = true;
244 continue;
245 }
246
247 if (type == 'b' && streq(line, "Block devices:")) {
248 good = true;
249 continue;
250 }
251
252 if (isempty(line)) {
253 good = false;
254 continue;
255 }
256
257 if (!good)
258 continue;
259
260 p = strstrip(line);
261
262 w = strpbrk(p, WHITESPACE);
263 if (!w)
264 continue;
265 *w = 0;
266
267 r = safe_atou(p, &maj);
268 if (r < 0)
269 continue;
270 if (maj <= 0)
271 continue;
272
273 w++;
274 w += strspn(w, WHITESPACE);
275
276 if (fnmatch(name, w, 0) != 0)
277 continue;
278
279 sprintf(buf,
280 "%c %u:* %s",
281 type,
282 maj,
283 acc);
284
285 r = cg_set_attribute("devices", path, "devices.allow", buf);
286 if (r < 0)
287 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
288 "Failed to set devices.allow on %s: %m", path);
289 }
290
291 return 0;
292
293 fail:
294 log_warning_errno(errno, "Failed to read /proc/devices: %m");
295 return -errno;
296 }
297
298 void cgroup_context_apply(CGroupContext *c, CGroupMask mask, const char *path, ManagerState state) {
299 bool is_root;
300 int r;
301
302 assert(c);
303 assert(path);
304
305 if (mask == 0)
306 return;
307
308 /* Some cgroup attributes are not supported on the root cgroup,
309 * hence silently ignore */
310 is_root = isempty(path) || path_equal(path, "/");
311 if (is_root)
312 /* Make sure we don't try to display messages with an empty path. */
313 path = "/";
314
315 /* We generally ignore errors caused by read-only mounted
316 * cgroup trees (assuming we are running in a container then),
317 * and missing cgroups, i.e. EROFS and ENOENT. */
318
319 if ((mask & CGROUP_MASK_CPU) && !is_root) {
320 char buf[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t)) + 1];
321
322 sprintf(buf, "%" PRIu64 "\n",
323 IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID ? c->startup_cpu_shares :
324 c->cpu_shares != CGROUP_CPU_SHARES_INVALID ? c->cpu_shares : CGROUP_CPU_SHARES_DEFAULT);
325 r = cg_set_attribute("cpu", path, "cpu.shares", buf);
326 if (r < 0)
327 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
328 "Failed to set cpu.shares on %s: %m", path);
329
330 sprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
331 r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf);
332 if (r < 0)
333 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
334 "Failed to set cpu.cfs_period_us on %s: %m", path);
335
336 if (c->cpu_quota_per_sec_usec != USEC_INFINITY) {
337 sprintf(buf, USEC_FMT "\n", c->cpu_quota_per_sec_usec * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
338 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf);
339 } else
340 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1");
341 if (r < 0)
342 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
343 "Failed to set cpu.cfs_quota_us on %s: %m", path);
344 }
345
346 if (mask & CGROUP_MASK_BLKIO) {
347 char buf[MAX(DECIMAL_STR_MAX(uint64_t)+1,
348 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
349 CGroupBlockIODeviceWeight *w;
350 CGroupBlockIODeviceBandwidth *b;
351
352 if (!is_root) {
353 sprintf(buf, "%" PRIu64 "\n",
354 IN_SET(state, MANAGER_STARTING, MANAGER_INITIALIZING) && c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ? c->startup_blockio_weight :
355 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ? c->blockio_weight : CGROUP_BLKIO_WEIGHT_DEFAULT);
356 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
357 if (r < 0)
358 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
359 "Failed to set blkio.weight on %s: %m", path);
360
361 /* FIXME: no way to reset this list */
362 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
363 dev_t dev;
364
365 r = lookup_blkio_device(w->path, &dev);
366 if (r < 0)
367 continue;
368
369 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), w->weight);
370 r = cg_set_attribute("blkio", path, "blkio.weight_device", buf);
371 if (r < 0)
372 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
373 "Failed to set blkio.weight_device on %s: %m", path);
374 }
375 }
376
377 /* FIXME: no way to reset this list */
378 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
379 const char *a;
380 dev_t dev;
381
382 r = lookup_blkio_device(b->path, &dev);
383 if (r < 0)
384 continue;
385
386 a = b->read ? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
387
388 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth);
389 r = cg_set_attribute("blkio", path, a, buf);
390 if (r < 0)
391 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
392 "Failed to set %s on %s: %m", a, path);
393 }
394 }
395
396 if ((mask & CGROUP_MASK_MEMORY) && !is_root) {
397 if (c->memory_limit != (uint64_t) -1) {
398 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
399
400 sprintf(buf, "%" PRIu64 "\n", c->memory_limit);
401
402 if (cg_unified() <= 0)
403 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
404 else
405 r = cg_set_attribute("memory", path, "memory.max", buf);
406
407 } else {
408 if (cg_unified() <= 0)
409 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
410 else
411 r = cg_set_attribute("memory", path, "memory.max", "max");
412 }
413
414 if (r < 0)
415 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
416 "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path);
417 }
418
419 if ((mask & CGROUP_MASK_DEVICES) && !is_root) {
420 CGroupDeviceAllow *a;
421
422 /* Changing the devices list of a populated cgroup
423 * might result in EINVAL, hence ignore EINVAL
424 * here. */
425
426 if (c->device_allow || c->device_policy != CGROUP_AUTO)
427 r = cg_set_attribute("devices", path, "devices.deny", "a");
428 else
429 r = cg_set_attribute("devices", path, "devices.allow", "a");
430 if (r < 0)
431 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EINVAL, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
432 "Failed to reset devices.list on %s: %m", path);
433
434 if (c->device_policy == CGROUP_CLOSED ||
435 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
436 static const char auto_devices[] =
437 "/dev/null\0" "rwm\0"
438 "/dev/zero\0" "rwm\0"
439 "/dev/full\0" "rwm\0"
440 "/dev/random\0" "rwm\0"
441 "/dev/urandom\0" "rwm\0"
442 "/dev/tty\0" "rwm\0"
443 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
444
445 const char *x, *y;
446
447 NULSTR_FOREACH_PAIR(x, y, auto_devices)
448 whitelist_device(path, x, y);
449
450 whitelist_major(path, "pts", 'c', "rw");
451 whitelist_major(path, "kdbus", 'c', "rw");
452 whitelist_major(path, "kdbus/*", 'c', "rw");
453 }
454
455 LIST_FOREACH(device_allow, a, c->device_allow) {
456 char acc[4];
457 unsigned k = 0;
458
459 if (a->r)
460 acc[k++] = 'r';
461 if (a->w)
462 acc[k++] = 'w';
463 if (a->m)
464 acc[k++] = 'm';
465
466 if (k == 0)
467 continue;
468
469 acc[k++] = 0;
470
471 if (startswith(a->path, "/dev/"))
472 whitelist_device(path, a->path, acc);
473 else if (startswith(a->path, "block-"))
474 whitelist_major(path, a->path + 6, 'b', acc);
475 else if (startswith(a->path, "char-"))
476 whitelist_major(path, a->path + 5, 'c', acc);
477 else
478 log_debug("Ignoring device %s while writing cgroup attribute.", a->path);
479 }
480 }
481
482 if ((mask & CGROUP_MASK_PIDS) && !is_root) {
483
484 if (c->tasks_max != (uint64_t) -1) {
485 char buf[DECIMAL_STR_MAX(uint64_t) + 2];
486
487 sprintf(buf, "%" PRIu64 "\n", c->tasks_max);
488 r = cg_set_attribute("pids", path, "pids.max", buf);
489 } else
490 r = cg_set_attribute("pids", path, "pids.max", "max");
491
492 if (r < 0)
493 log_full_errno(IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
494 "Failed to set pids.max on %s: %m", path);
495 }
496 }
497
498 CGroupMask cgroup_context_get_mask(CGroupContext *c) {
499 CGroupMask mask = 0;
500
501 /* Figure out which controllers we need */
502
503 if (c->cpu_accounting ||
504 c->cpu_shares != CGROUP_CPU_SHARES_INVALID ||
505 c->startup_cpu_shares != CGROUP_CPU_SHARES_INVALID ||
506 c->cpu_quota_per_sec_usec != USEC_INFINITY)
507 mask |= CGROUP_MASK_CPUACCT | CGROUP_MASK_CPU;
508
509 if (c->blockio_accounting ||
510 c->blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
511 c->startup_blockio_weight != CGROUP_BLKIO_WEIGHT_INVALID ||
512 c->blockio_device_weights ||
513 c->blockio_device_bandwidths)
514 mask |= CGROUP_MASK_BLKIO;
515
516 if (c->memory_accounting ||
517 c->memory_limit != (uint64_t) -1)
518 mask |= CGROUP_MASK_MEMORY;
519
520 if (c->device_allow ||
521 c->device_policy != CGROUP_AUTO)
522 mask |= CGROUP_MASK_DEVICES;
523
524 if (c->tasks_accounting ||
525 c->tasks_max != (uint64_t) -1)
526 mask |= CGROUP_MASK_PIDS;
527
528 return mask;
529 }
530
531 CGroupMask unit_get_own_mask(Unit *u) {
532 CGroupContext *c;
533
534 /* Returns the mask of controllers the unit needs for itself */
535
536 c = unit_get_cgroup_context(u);
537 if (!c)
538 return 0;
539
540 /* If delegation is turned on, then turn on all cgroups,
541 * unless we are on the legacy hierarchy and the process we
542 * fork into it is known to drop privileges, and hence
543 * shouldn't get access to the controllers.
544 *
545 * Note that on the unified hierarchy it is safe to delegate
546 * controllers to unprivileged services. */
547
548 if (c->delegate) {
549 ExecContext *e;
550
551 e = unit_get_exec_context(u);
552 if (!e ||
553 exec_context_maintains_privileges(e) ||
554 cg_unified() > 0)
555 return _CGROUP_MASK_ALL;
556 }
557
558 return cgroup_context_get_mask(c);
559 }
560
561 CGroupMask unit_get_members_mask(Unit *u) {
562 assert(u);
563
564 /* Returns the mask of controllers all of the unit's children
565 * require, merged */
566
567 if (u->cgroup_members_mask_valid)
568 return u->cgroup_members_mask;
569
570 u->cgroup_members_mask = 0;
571
572 if (u->type == UNIT_SLICE) {
573 Unit *member;
574 Iterator i;
575
576 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
577
578 if (member == u)
579 continue;
580
581 if (UNIT_DEREF(member->slice) != u)
582 continue;
583
584 u->cgroup_members_mask |=
585 unit_get_own_mask(member) |
586 unit_get_members_mask(member);
587 }
588 }
589
590 u->cgroup_members_mask_valid = true;
591 return u->cgroup_members_mask;
592 }
593
594 CGroupMask unit_get_siblings_mask(Unit *u) {
595 assert(u);
596
597 /* Returns the mask of controllers all of the unit's siblings
598 * require, i.e. the members mask of the unit's parent slice
599 * if there is one. */
600
601 if (UNIT_ISSET(u->slice))
602 return unit_get_members_mask(UNIT_DEREF(u->slice));
603
604 return unit_get_own_mask(u) | unit_get_members_mask(u);
605 }
606
607 CGroupMask unit_get_subtree_mask(Unit *u) {
608
609 /* Returns the mask of this subtree, meaning of the group
610 * itself and its children. */
611
612 return unit_get_own_mask(u) | unit_get_members_mask(u);
613 }
614
615 CGroupMask unit_get_target_mask(Unit *u) {
616 CGroupMask mask;
617
618 /* This returns the cgroup mask of all controllers to enable
619 * for a specific cgroup, i.e. everything it needs itself,
620 * plus all that its children need, plus all that its siblings
621 * need. This is primarily useful on the legacy cgroup
622 * hierarchy, where we need to duplicate each cgroup in each
623 * hierarchy that shall be enabled for it. */
624
625 mask = unit_get_own_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
626 mask &= u->manager->cgroup_supported;
627
628 return mask;
629 }
630
631 CGroupMask unit_get_enable_mask(Unit *u) {
632 CGroupMask mask;
633
634 /* This returns the cgroup mask of all controllers to enable
635 * for the children of a specific cgroup. This is primarily
636 * useful for the unified cgroup hierarchy, where each cgroup
637 * controls which controllers are enabled for its children. */
638
639 mask = unit_get_members_mask(u);
640 mask &= u->manager->cgroup_supported;
641
642 return mask;
643 }
644
645 /* Recurse from a unit up through its containing slices, propagating
646 * mask bits upward. A unit is also member of itself. */
647 void unit_update_cgroup_members_masks(Unit *u) {
648 CGroupMask m;
649 bool more;
650
651 assert(u);
652
653 /* Calculate subtree mask */
654 m = unit_get_subtree_mask(u);
655
656 /* See if anything changed from the previous invocation. If
657 * not, we're done. */
658 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
659 return;
660
661 more =
662 u->cgroup_subtree_mask_valid &&
663 ((m & ~u->cgroup_subtree_mask) != 0) &&
664 ((~m & u->cgroup_subtree_mask) == 0);
665
666 u->cgroup_subtree_mask = m;
667 u->cgroup_subtree_mask_valid = true;
668
669 if (UNIT_ISSET(u->slice)) {
670 Unit *s = UNIT_DEREF(u->slice);
671
672 if (more)
673 /* There's more set now than before. We
674 * propagate the new mask to the parent's mask
675 * (not caring if it actually was valid or
676 * not). */
677
678 s->cgroup_members_mask |= m;
679
680 else
681 /* There's less set now than before (or we
682 * don't know), we need to recalculate
683 * everything, so let's invalidate the
684 * parent's members mask */
685
686 s->cgroup_members_mask_valid = false;
687
688 /* And now make sure that this change also hits our
689 * grandparents */
690 unit_update_cgroup_members_masks(s);
691 }
692 }
693
694 static const char *migrate_callback(CGroupMask mask, void *userdata) {
695 Unit *u = userdata;
696
697 assert(mask != 0);
698 assert(u);
699
700 while (u) {
701 if (u->cgroup_path &&
702 u->cgroup_realized &&
703 (u->cgroup_realized_mask & mask) == mask)
704 return u->cgroup_path;
705
706 u = UNIT_DEREF(u->slice);
707 }
708
709 return NULL;
710 }
711
712 char *unit_default_cgroup_path(Unit *u) {
713 _cleanup_free_ char *escaped = NULL, *slice = NULL;
714 int r;
715
716 assert(u);
717
718 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
719 return strdup(u->manager->cgroup_root);
720
721 if (UNIT_ISSET(u->slice) && !unit_has_name(UNIT_DEREF(u->slice), SPECIAL_ROOT_SLICE)) {
722 r = cg_slice_to_path(UNIT_DEREF(u->slice)->id, &slice);
723 if (r < 0)
724 return NULL;
725 }
726
727 escaped = cg_escape(u->id);
728 if (!escaped)
729 return NULL;
730
731 if (slice)
732 return strjoin(u->manager->cgroup_root, "/", slice, "/", escaped, NULL);
733 else
734 return strjoin(u->manager->cgroup_root, "/", escaped, NULL);
735 }
736
737 int unit_set_cgroup_path(Unit *u, const char *path) {
738 _cleanup_free_ char *p = NULL;
739 int r;
740
741 assert(u);
742
743 if (path) {
744 p = strdup(path);
745 if (!p)
746 return -ENOMEM;
747 } else
748 p = NULL;
749
750 if (streq_ptr(u->cgroup_path, p))
751 return 0;
752
753 if (p) {
754 r = hashmap_put(u->manager->cgroup_unit, p, u);
755 if (r < 0)
756 return r;
757 }
758
759 unit_release_cgroup(u);
760
761 u->cgroup_path = p;
762 p = NULL;
763
764 return 1;
765 }
766
767 int unit_watch_cgroup(Unit *u) {
768 _cleanup_free_ char *events = NULL;
769 int r;
770
771 assert(u);
772
773 if (!u->cgroup_path)
774 return 0;
775
776 if (u->cgroup_inotify_wd >= 0)
777 return 0;
778
779 /* Only applies to the unified hierarchy */
780 r = cg_unified();
781 if (r < 0)
782 return log_unit_error_errno(u, r, "Failed detect wether the unified hierarchy is used: %m");
783 if (r == 0)
784 return 0;
785
786 /* Don't watch the root slice, it's pointless. */
787 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
788 return 0;
789
790 r = hashmap_ensure_allocated(&u->manager->cgroup_inotify_wd_unit, &trivial_hash_ops);
791 if (r < 0)
792 return log_oom();
793
794 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events", &events);
795 if (r < 0)
796 return log_oom();
797
798 u->cgroup_inotify_wd = inotify_add_watch(u->manager->cgroup_inotify_fd, events, IN_MODIFY);
799 if (u->cgroup_inotify_wd < 0) {
800
801 if (errno == ENOENT) /* If the directory is already
802 * gone we don't need to track
803 * it, so this is not an error */
804 return 0;
805
806 return log_unit_error_errno(u, errno, "Failed to add inotify watch descriptor for control group %s: %m", u->cgroup_path);
807 }
808
809 r = hashmap_put(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd), u);
810 if (r < 0)
811 return log_unit_error_errno(u, r, "Failed to add inotify watch descriptor to hash map: %m");
812
813 return 0;
814 }
815
816 static int unit_create_cgroup(
817 Unit *u,
818 CGroupMask target_mask,
819 CGroupMask enable_mask) {
820
821 CGroupContext *c;
822 int r;
823
824 assert(u);
825
826 c = unit_get_cgroup_context(u);
827 if (!c)
828 return 0;
829
830 if (!u->cgroup_path) {
831 _cleanup_free_ char *path = NULL;
832
833 path = unit_default_cgroup_path(u);
834 if (!path)
835 return log_oom();
836
837 r = unit_set_cgroup_path(u, path);
838 if (r == -EEXIST)
839 return log_unit_error_errno(u, r, "Control group %s exists already.", path);
840 if (r < 0)
841 return log_unit_error_errno(u, r, "Failed to set unit's control group path to %s: %m", path);
842 }
843
844 /* First, create our own group */
845 r = cg_create_everywhere(u->manager->cgroup_supported, target_mask, u->cgroup_path);
846 if (r < 0)
847 return log_unit_error_errno(u, r, "Failed to create cgroup %s: %m", u->cgroup_path);
848
849 /* Start watching it */
850 (void) unit_watch_cgroup(u);
851
852 /* Enable all controllers we need */
853 r = cg_enable_everywhere(u->manager->cgroup_supported, enable_mask, u->cgroup_path);
854 if (r < 0)
855 log_unit_warning_errno(u, r, "Failed to enable controllers on cgroup %s, ignoring: %m", u->cgroup_path);
856
857 /* Keep track that this is now realized */
858 u->cgroup_realized = true;
859 u->cgroup_realized_mask = target_mask;
860 u->cgroup_enabled_mask = enable_mask;
861
862 if (u->type != UNIT_SLICE && !c->delegate) {
863
864 /* Then, possibly move things over, but not if
865 * subgroups may contain processes, which is the case
866 * for slice and delegation units. */
867 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
868 if (r < 0)
869 log_unit_warning_errno(u, r, "Failed to migrate cgroup from to %s, ignoring: %m", u->cgroup_path);
870 }
871
872 return 0;
873 }
874
875 int unit_attach_pids_to_cgroup(Unit *u) {
876 int r;
877 assert(u);
878
879 r = unit_realize_cgroup(u);
880 if (r < 0)
881 return r;
882
883 r = cg_attach_many_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->pids, migrate_callback, u);
884 if (r < 0)
885 return r;
886
887 return 0;
888 }
889
890 static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask, CGroupMask enable_mask) {
891 assert(u);
892
893 return u->cgroup_realized && u->cgroup_realized_mask == target_mask && u->cgroup_enabled_mask == enable_mask;
894 }
895
896 /* Check if necessary controllers and attributes for a unit are in place.
897 *
898 * If so, do nothing.
899 * If not, create paths, move processes over, and set attributes.
900 *
901 * Returns 0 on success and < 0 on failure. */
902 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
903 CGroupMask target_mask, enable_mask;
904 int r;
905
906 assert(u);
907
908 if (u->in_cgroup_queue) {
909 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
910 u->in_cgroup_queue = false;
911 }
912
913 target_mask = unit_get_target_mask(u);
914 enable_mask = unit_get_enable_mask(u);
915
916 if (unit_has_mask_realized(u, target_mask, enable_mask))
917 return 0;
918
919 /* First, realize parents */
920 if (UNIT_ISSET(u->slice)) {
921 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
922 if (r < 0)
923 return r;
924 }
925
926 /* And then do the real work */
927 r = unit_create_cgroup(u, target_mask, enable_mask);
928 if (r < 0)
929 return r;
930
931 /* Finally, apply the necessary attributes. */
932 cgroup_context_apply(unit_get_cgroup_context(u), target_mask, u->cgroup_path, state);
933
934 return 0;
935 }
936
937 static void unit_add_to_cgroup_queue(Unit *u) {
938
939 if (u->in_cgroup_queue)
940 return;
941
942 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
943 u->in_cgroup_queue = true;
944 }
945
946 unsigned manager_dispatch_cgroup_queue(Manager *m) {
947 ManagerState state;
948 unsigned n = 0;
949 Unit *i;
950 int r;
951
952 state = manager_state(m);
953
954 while ((i = m->cgroup_queue)) {
955 assert(i->in_cgroup_queue);
956
957 r = unit_realize_cgroup_now(i, state);
958 if (r < 0)
959 log_warning_errno(r, "Failed to realize cgroups for queued unit %s, ignoring: %m", i->id);
960
961 n++;
962 }
963
964 return n;
965 }
966
967 static void unit_queue_siblings(Unit *u) {
968 Unit *slice;
969
970 /* This adds the siblings of the specified unit and the
971 * siblings of all parent units to the cgroup queue. (But
972 * neither the specified unit itself nor the parents.) */
973
974 while ((slice = UNIT_DEREF(u->slice))) {
975 Iterator i;
976 Unit *m;
977
978 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
979 if (m == u)
980 continue;
981
982 /* Skip units that have a dependency on the slice
983 * but aren't actually in it. */
984 if (UNIT_DEREF(m->slice) != slice)
985 continue;
986
987 /* No point in doing cgroup application for units
988 * without active processes. */
989 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
990 continue;
991
992 /* If the unit doesn't need any new controllers
993 * and has current ones realized, it doesn't need
994 * any changes. */
995 if (unit_has_mask_realized(m, unit_get_target_mask(m), unit_get_enable_mask(m)))
996 continue;
997
998 unit_add_to_cgroup_queue(m);
999 }
1000
1001 u = slice;
1002 }
1003 }
1004
1005 int unit_realize_cgroup(Unit *u) {
1006 assert(u);
1007
1008 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1009 return 0;
1010
1011 /* So, here's the deal: when realizing the cgroups for this
1012 * unit, we need to first create all parents, but there's more
1013 * actually: for the weight-based controllers we also need to
1014 * make sure that all our siblings (i.e. units that are in the
1015 * same slice as we are) have cgroups, too. Otherwise, things
1016 * would become very uneven as each of their processes would
1017 * get as much resources as all our group together. This call
1018 * will synchronously create the parent cgroups, but will
1019 * defer work on the siblings to the next event loop
1020 * iteration. */
1021
1022 /* Add all sibling slices to the cgroup queue. */
1023 unit_queue_siblings(u);
1024
1025 /* And realize this one now (and apply the values) */
1026 return unit_realize_cgroup_now(u, manager_state(u->manager));
1027 }
1028
1029 void unit_release_cgroup(Unit *u) {
1030 assert(u);
1031
1032 /* Forgets all cgroup details for this cgroup */
1033
1034 if (u->cgroup_path) {
1035 (void) hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
1036 u->cgroup_path = mfree(u->cgroup_path);
1037 }
1038
1039 if (u->cgroup_inotify_wd >= 0) {
1040 if (inotify_rm_watch(u->manager->cgroup_inotify_fd, u->cgroup_inotify_wd) < 0)
1041 log_unit_debug_errno(u, errno, "Failed to remove cgroup inotify watch %i for %s, ignoring", u->cgroup_inotify_wd, u->id);
1042
1043 (void) hashmap_remove(u->manager->cgroup_inotify_wd_unit, INT_TO_PTR(u->cgroup_inotify_wd));
1044 u->cgroup_inotify_wd = -1;
1045 }
1046 }
1047
1048 void unit_prune_cgroup(Unit *u) {
1049 int r;
1050 bool is_root_slice;
1051
1052 assert(u);
1053
1054 /* Removes the cgroup, if empty and possible, and stops watching it. */
1055
1056 if (!u->cgroup_path)
1057 return;
1058
1059 is_root_slice = unit_has_name(u, SPECIAL_ROOT_SLICE);
1060
1061 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !is_root_slice);
1062 if (r < 0) {
1063 log_debug_errno(r, "Failed to destroy cgroup %s, ignoring: %m", u->cgroup_path);
1064 return;
1065 }
1066
1067 if (is_root_slice)
1068 return;
1069
1070 unit_release_cgroup(u);
1071
1072 u->cgroup_realized = false;
1073 u->cgroup_realized_mask = 0;
1074 u->cgroup_enabled_mask = 0;
1075 }
1076
1077 int unit_search_main_pid(Unit *u, pid_t *ret) {
1078 _cleanup_fclose_ FILE *f = NULL;
1079 pid_t pid = 0, npid, mypid;
1080 int r;
1081
1082 assert(u);
1083 assert(ret);
1084
1085 if (!u->cgroup_path)
1086 return -ENXIO;
1087
1088 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f);
1089 if (r < 0)
1090 return r;
1091
1092 mypid = getpid();
1093 while (cg_read_pid(f, &npid) > 0) {
1094 pid_t ppid;
1095
1096 if (npid == pid)
1097 continue;
1098
1099 /* Ignore processes that aren't our kids */
1100 if (get_process_ppid(npid, &ppid) >= 0 && ppid != mypid)
1101 continue;
1102
1103 if (pid != 0)
1104 /* Dang, there's more than one daemonized PID
1105 in this group, so we don't know what process
1106 is the main process. */
1107
1108 return -ENODATA;
1109
1110 pid = npid;
1111 }
1112
1113 *ret = pid;
1114 return 0;
1115 }
1116
1117 static int unit_watch_pids_in_path(Unit *u, const char *path) {
1118 _cleanup_closedir_ DIR *d = NULL;
1119 _cleanup_fclose_ FILE *f = NULL;
1120 int ret = 0, r;
1121
1122 assert(u);
1123 assert(path);
1124
1125 r = cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, path, &f);
1126 if (r < 0)
1127 ret = r;
1128 else {
1129 pid_t pid;
1130
1131 while ((r = cg_read_pid(f, &pid)) > 0) {
1132 r = unit_watch_pid(u, pid);
1133 if (r < 0 && ret >= 0)
1134 ret = r;
1135 }
1136
1137 if (r < 0 && ret >= 0)
1138 ret = r;
1139 }
1140
1141 r = cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER, path, &d);
1142 if (r < 0) {
1143 if (ret >= 0)
1144 ret = r;
1145 } else {
1146 char *fn;
1147
1148 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1149 _cleanup_free_ char *p = NULL;
1150
1151 p = strjoin(path, "/", fn, NULL);
1152 free(fn);
1153
1154 if (!p)
1155 return -ENOMEM;
1156
1157 r = unit_watch_pids_in_path(u, p);
1158 if (r < 0 && ret >= 0)
1159 ret = r;
1160 }
1161
1162 if (r < 0 && ret >= 0)
1163 ret = r;
1164 }
1165
1166 return ret;
1167 }
1168
1169 int unit_watch_all_pids(Unit *u) {
1170 assert(u);
1171
1172 /* Adds all PIDs from our cgroup to the set of PIDs we
1173 * watch. This is a fallback logic for cases where we do not
1174 * get reliable cgroup empty notifications: we try to use
1175 * SIGCHLD as replacement. */
1176
1177 if (!u->cgroup_path)
1178 return -ENOENT;
1179
1180 if (cg_unified() > 0) /* On unified we can use proper notifications */
1181 return 0;
1182
1183 return unit_watch_pids_in_path(u, u->cgroup_path);
1184 }
1185
1186 int unit_notify_cgroup_empty(Unit *u) {
1187 int r;
1188
1189 assert(u);
1190
1191 if (!u->cgroup_path)
1192 return 0;
1193
1194 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
1195 if (r <= 0)
1196 return r;
1197
1198 unit_add_to_gc_queue(u);
1199
1200 if (UNIT_VTABLE(u)->notify_cgroup_empty)
1201 UNIT_VTABLE(u)->notify_cgroup_empty(u);
1202
1203 return 0;
1204 }
1205
1206 static int on_cgroup_inotify_event(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1207 Manager *m = userdata;
1208
1209 assert(s);
1210 assert(fd >= 0);
1211 assert(m);
1212
1213 for (;;) {
1214 union inotify_event_buffer buffer;
1215 struct inotify_event *e;
1216 ssize_t l;
1217
1218 l = read(fd, &buffer, sizeof(buffer));
1219 if (l < 0) {
1220 if (errno == EINTR || errno == EAGAIN)
1221 return 0;
1222
1223 return log_error_errno(errno, "Failed to read control group inotify events: %m");
1224 }
1225
1226 FOREACH_INOTIFY_EVENT(e, buffer, l) {
1227 Unit *u;
1228
1229 if (e->wd < 0)
1230 /* Queue overflow has no watch descriptor */
1231 continue;
1232
1233 if (e->mask & IN_IGNORED)
1234 /* The watch was just removed */
1235 continue;
1236
1237 u = hashmap_get(m->cgroup_inotify_wd_unit, INT_TO_PTR(e->wd));
1238 if (!u) /* Not that inotify might deliver
1239 * events for a watch even after it
1240 * was removed, because it was queued
1241 * before the removal. Let's ignore
1242 * this here safely. */
1243 continue;
1244
1245 (void) unit_notify_cgroup_empty(u);
1246 }
1247 }
1248 }
1249
1250 int manager_setup_cgroup(Manager *m) {
1251 _cleanup_free_ char *path = NULL;
1252 CGroupController c;
1253 int r, unified;
1254 char *e;
1255
1256 assert(m);
1257
1258 /* 1. Determine hierarchy */
1259 m->cgroup_root = mfree(m->cgroup_root);
1260 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
1261 if (r < 0)
1262 return log_error_errno(r, "Cannot determine cgroup we are running in: %m");
1263
1264 /* Chop off the init scope, if we are already located in it */
1265 e = endswith(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1266
1267 /* LEGACY: Also chop off the system slice if we are in
1268 * it. This is to support live upgrades from older systemd
1269 * versions where PID 1 was moved there. Also see
1270 * cg_get_root_path(). */
1271 if (!e && MANAGER_IS_SYSTEM(m)) {
1272 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
1273 if (!e)
1274 e = endswith(m->cgroup_root, "/system"); /* even more legacy */
1275 }
1276 if (e)
1277 *e = 0;
1278
1279 /* And make sure to store away the root value without trailing
1280 * slash, even for the root dir, so that we can easily prepend
1281 * it everywhere. */
1282 while ((e = endswith(m->cgroup_root, "/")))
1283 *e = 0;
1284
1285 /* 2. Show data */
1286 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
1287 if (r < 0)
1288 return log_error_errno(r, "Cannot find cgroup mount point: %m");
1289
1290 unified = cg_unified();
1291 if (unified < 0)
1292 return log_error_errno(r, "Couldn't determine if we are running in the unified hierarchy: %m");
1293 if (unified > 0)
1294 log_debug("Unified cgroup hierarchy is located at %s.", path);
1295 else
1296 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
1297
1298 if (!m->test_run) {
1299 const char *scope_path;
1300
1301 /* 3. Install agent */
1302 if (unified) {
1303
1304 /* In the unified hierarchy we can can get
1305 * cgroup empty notifications via inotify. */
1306
1307 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1308 safe_close(m->cgroup_inotify_fd);
1309
1310 m->cgroup_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
1311 if (m->cgroup_inotify_fd < 0)
1312 return log_error_errno(errno, "Failed to create control group inotify object: %m");
1313
1314 r = sd_event_add_io(m->event, &m->cgroup_inotify_event_source, m->cgroup_inotify_fd, EPOLLIN, on_cgroup_inotify_event, m);
1315 if (r < 0)
1316 return log_error_errno(r, "Failed to watch control group inotify object: %m");
1317
1318 /* Process cgroup empty notifications early, but after service notifications and SIGCHLD. Also
1319 * see handling of cgroup agent notifications, for the classic cgroup hierarchy support. */
1320 r = sd_event_source_set_priority(m->cgroup_inotify_event_source, SD_EVENT_PRIORITY_NORMAL-5);
1321 if (r < 0)
1322 return log_error_errno(r, "Failed to set priority of inotify event source: %m");
1323
1324 (void) sd_event_source_set_description(m->cgroup_inotify_event_source, "cgroup-inotify");
1325
1326 } else if (MANAGER_IS_SYSTEM(m)) {
1327
1328 /* On the legacy hierarchy we only get
1329 * notifications via cgroup agents. (Which
1330 * isn't really reliable, since it does not
1331 * generate events when control groups with
1332 * children run empty. */
1333
1334 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
1335 if (r < 0)
1336 log_warning_errno(r, "Failed to install release agent, ignoring: %m");
1337 else if (r > 0)
1338 log_debug("Installed release agent.");
1339 else if (r == 0)
1340 log_debug("Release agent already installed.");
1341 }
1342
1343 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1344 scope_path = strjoina(m->cgroup_root, "/" SPECIAL_INIT_SCOPE);
1345 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, scope_path, 0);
1346 if (r < 0)
1347 return log_error_errno(r, "Failed to create %s control group: %m", scope_path);
1348
1349 /* also, move all other userspace processes remaining
1350 * in the root cgroup into that scope. */
1351 r = cg_migrate(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, SYSTEMD_CGROUP_CONTROLLER, scope_path, false);
1352 if (r < 0)
1353 log_warning_errno(r, "Couldn't move remaining userspace processes, ignoring: %m");
1354
1355 /* 5. And pin it, so that it cannot be unmounted */
1356 safe_close(m->pin_cgroupfs_fd);
1357 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
1358 if (m->pin_cgroupfs_fd < 0)
1359 return log_error_errno(errno, "Failed to open pin file: %m");
1360
1361 /* 6. Always enable hierarchical support if it exists... */
1362 if (!unified)
1363 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1364 }
1365
1366 /* 7. Figure out which controllers are supported */
1367 r = cg_mask_supported(&m->cgroup_supported);
1368 if (r < 0)
1369 return log_error_errno(r, "Failed to determine supported controllers: %m");
1370
1371 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++)
1372 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c), yes_no(m->cgroup_supported & c));
1373
1374 return 0;
1375 }
1376
1377 void manager_shutdown_cgroup(Manager *m, bool delete) {
1378 assert(m);
1379
1380 /* We can't really delete the group, since we are in it. But
1381 * let's trim it. */
1382 if (delete && m->cgroup_root)
1383 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
1384
1385 m->cgroup_inotify_wd_unit = hashmap_free(m->cgroup_inotify_wd_unit);
1386
1387 m->cgroup_inotify_event_source = sd_event_source_unref(m->cgroup_inotify_event_source);
1388 m->cgroup_inotify_fd = safe_close(m->cgroup_inotify_fd);
1389
1390 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
1391
1392 m->cgroup_root = mfree(m->cgroup_root);
1393 }
1394
1395 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
1396 char *p;
1397 Unit *u;
1398
1399 assert(m);
1400 assert(cgroup);
1401
1402 u = hashmap_get(m->cgroup_unit, cgroup);
1403 if (u)
1404 return u;
1405
1406 p = strdupa(cgroup);
1407 for (;;) {
1408 char *e;
1409
1410 e = strrchr(p, '/');
1411 if (!e || e == p)
1412 return hashmap_get(m->cgroup_unit, SPECIAL_ROOT_SLICE);
1413
1414 *e = 0;
1415
1416 u = hashmap_get(m->cgroup_unit, p);
1417 if (u)
1418 return u;
1419 }
1420 }
1421
1422 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid) {
1423 _cleanup_free_ char *cgroup = NULL;
1424 int r;
1425
1426 assert(m);
1427
1428 if (pid <= 0)
1429 return NULL;
1430
1431 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
1432 if (r < 0)
1433 return NULL;
1434
1435 return manager_get_unit_by_cgroup(m, cgroup);
1436 }
1437
1438 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
1439 Unit *u;
1440
1441 assert(m);
1442
1443 if (pid <= 0)
1444 return NULL;
1445
1446 if (pid == 1)
1447 return hashmap_get(m->units, SPECIAL_INIT_SCOPE);
1448
1449 u = hashmap_get(m->watch_pids1, PID_TO_PTR(pid));
1450 if (u)
1451 return u;
1452
1453 u = hashmap_get(m->watch_pids2, PID_TO_PTR(pid));
1454 if (u)
1455 return u;
1456
1457 return manager_get_unit_by_pid_cgroup(m, pid);
1458 }
1459
1460 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
1461 Unit *u;
1462
1463 assert(m);
1464 assert(cgroup);
1465
1466 log_debug("Got cgroup empty notification for: %s", cgroup);
1467
1468 u = manager_get_unit_by_cgroup(m, cgroup);
1469 if (!u)
1470 return 0;
1471
1472 return unit_notify_cgroup_empty(u);
1473 }
1474
1475 int unit_get_memory_current(Unit *u, uint64_t *ret) {
1476 _cleanup_free_ char *v = NULL;
1477 int r;
1478
1479 assert(u);
1480 assert(ret);
1481
1482 if (!u->cgroup_path)
1483 return -ENODATA;
1484
1485 if ((u->cgroup_realized_mask & CGROUP_MASK_MEMORY) == 0)
1486 return -ENODATA;
1487
1488 if (cg_unified() <= 0)
1489 r = cg_get_attribute("memory", u->cgroup_path, "memory.usage_in_bytes", &v);
1490 else
1491 r = cg_get_attribute("memory", u->cgroup_path, "memory.current", &v);
1492 if (r == -ENOENT)
1493 return -ENODATA;
1494 if (r < 0)
1495 return r;
1496
1497 return safe_atou64(v, ret);
1498 }
1499
1500 int unit_get_tasks_current(Unit *u, uint64_t *ret) {
1501 _cleanup_free_ char *v = NULL;
1502 int r;
1503
1504 assert(u);
1505 assert(ret);
1506
1507 if (!u->cgroup_path)
1508 return -ENODATA;
1509
1510 if ((u->cgroup_realized_mask & CGROUP_MASK_PIDS) == 0)
1511 return -ENODATA;
1512
1513 r = cg_get_attribute("pids", u->cgroup_path, "pids.current", &v);
1514 if (r == -ENOENT)
1515 return -ENODATA;
1516 if (r < 0)
1517 return r;
1518
1519 return safe_atou64(v, ret);
1520 }
1521
1522 static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
1523 _cleanup_free_ char *v = NULL;
1524 uint64_t ns;
1525 int r;
1526
1527 assert(u);
1528 assert(ret);
1529
1530 if (!u->cgroup_path)
1531 return -ENODATA;
1532
1533 if ((u->cgroup_realized_mask & CGROUP_MASK_CPUACCT) == 0)
1534 return -ENODATA;
1535
1536 r = cg_get_attribute("cpuacct", u->cgroup_path, "cpuacct.usage", &v);
1537 if (r == -ENOENT)
1538 return -ENODATA;
1539 if (r < 0)
1540 return r;
1541
1542 r = safe_atou64(v, &ns);
1543 if (r < 0)
1544 return r;
1545
1546 *ret = ns;
1547 return 0;
1548 }
1549
1550 int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
1551 nsec_t ns;
1552 int r;
1553
1554 r = unit_get_cpu_usage_raw(u, &ns);
1555 if (r < 0)
1556 return r;
1557
1558 if (ns > u->cpuacct_usage_base)
1559 ns -= u->cpuacct_usage_base;
1560 else
1561 ns = 0;
1562
1563 *ret = ns;
1564 return 0;
1565 }
1566
1567 int unit_reset_cpu_usage(Unit *u) {
1568 nsec_t ns;
1569 int r;
1570
1571 assert(u);
1572
1573 r = unit_get_cpu_usage_raw(u, &ns);
1574 if (r < 0) {
1575 u->cpuacct_usage_base = 0;
1576 return r;
1577 }
1578
1579 u->cpuacct_usage_base = ns;
1580 return 0;
1581 }
1582
1583 bool unit_cgroup_delegate(Unit *u) {
1584 CGroupContext *c;
1585
1586 assert(u);
1587
1588 c = unit_get_cgroup_context(u);
1589 if (!c)
1590 return false;
1591
1592 return c->delegate;
1593 }
1594
1595 void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
1596 assert(u);
1597
1598 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1599 return;
1600
1601 if (m == 0)
1602 return;
1603
1604 if ((u->cgroup_realized_mask & m) == 0)
1605 return;
1606
1607 u->cgroup_realized_mask &= ~m;
1608 unit_add_to_cgroup_queue(u);
1609 }
1610
1611 void manager_invalidate_startup_units(Manager *m) {
1612 Iterator i;
1613 Unit *u;
1614
1615 assert(m);
1616
1617 SET_FOREACH(u, m->startup_units, i)
1618 unit_invalidate_cgroup(u, CGROUP_MASK_CPU|CGROUP_MASK_BLKIO);
1619 }
1620
1621 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
1622 [CGROUP_AUTO] = "auto",
1623 [CGROUP_CLOSED] = "closed",
1624 [CGROUP_STRICT] = "strict",
1625 };
1626
1627 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);