]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/cgroup.c
Comment spelling fixes.
[thirdparty/systemd.git] / src / core / cgroup.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2013 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <fcntl.h>
23
24 #include "path-util.h"
25 #include "special.h"
26 #include "cgroup-util.h"
27 #include "cgroup.h"
28
29 void cgroup_context_init(CGroupContext *c) {
30 assert(c);
31
32 /* Initialize everything to the kernel defaults, assuming the
33 * structure is preinitialized to 0 */
34
35 c->cpu_shares = 1024;
36 c->memory_limit = (uint64_t) -1;
37 c->blockio_weight = 1000;
38 }
39
40 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
41 assert(c);
42 assert(a);
43
44 LIST_REMOVE(device_allow, c->device_allow, a);
45 free(a->path);
46 free(a);
47 }
48
49 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
50 assert(c);
51 assert(w);
52
53 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
54 free(w->path);
55 free(w);
56 }
57
58 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
59 assert(c);
60 assert(b);
61
62 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
63 free(b->path);
64 free(b);
65 }
66
67 void cgroup_context_done(CGroupContext *c) {
68 assert(c);
69
70 while (c->blockio_device_weights)
71 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
72
73 while (c->blockio_device_bandwidths)
74 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
75
76 while (c->device_allow)
77 cgroup_context_free_device_allow(c, c->device_allow);
78 }
79
80 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
81 CGroupBlockIODeviceBandwidth *b;
82 CGroupBlockIODeviceWeight *w;
83 CGroupDeviceAllow *a;
84
85 assert(c);
86 assert(f);
87
88 prefix = strempty(prefix);
89
90 fprintf(f,
91 "%sCPUAccounting=%s\n"
92 "%sBlockIOAccounting=%s\n"
93 "%sMemoryAccounting=%s\n"
94 "%sCPUShares=%lu\n"
95 "%sBlockIOWeight=%lu\n"
96 "%sMemoryLimit=%" PRIu64 "\n"
97 "%sDevicePolicy=%s\n",
98 prefix, yes_no(c->cpu_accounting),
99 prefix, yes_no(c->blockio_accounting),
100 prefix, yes_no(c->memory_accounting),
101 prefix, c->cpu_shares,
102 prefix, c->blockio_weight,
103 prefix, c->memory_limit,
104 prefix, cgroup_device_policy_to_string(c->device_policy));
105
106 LIST_FOREACH(device_allow, a, c->device_allow)
107 fprintf(f,
108 "%sDeviceAllow=%s %s%s%s\n",
109 prefix,
110 a->path,
111 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
112
113 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
114 fprintf(f,
115 "%sBlockIODeviceWeight=%s %lu",
116 prefix,
117 w->path,
118 w->weight);
119
120 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
121 char buf[FORMAT_BYTES_MAX];
122
123 fprintf(f,
124 "%s%s=%s %s\n",
125 prefix,
126 b->read ? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
127 b->path,
128 format_bytes(buf, sizeof(buf), b->bandwidth));
129 }
130 }
131
132 static int lookup_blkio_device(const char *p, dev_t *dev) {
133 struct stat st;
134 int r;
135
136 assert(p);
137 assert(dev);
138
139 r = stat(p, &st);
140 if (r < 0) {
141 log_warning("Couldn't stat device %s: %m", p);
142 return -errno;
143 }
144
145 if (S_ISBLK(st.st_mode))
146 *dev = st.st_rdev;
147 else if (major(st.st_dev) != 0) {
148 /* If this is not a device node then find the block
149 * device this file is stored on */
150 *dev = st.st_dev;
151
152 /* If this is a partition, try to get the originating
153 * block device */
154 block_get_whole_disk(*dev, dev);
155 } else {
156 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
157 return -ENODEV;
158 }
159
160 return 0;
161 }
162
163 static int whitelist_device(const char *path, const char *node, const char *acc) {
164 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
165 struct stat st;
166 int r;
167
168 assert(path);
169 assert(acc);
170
171 if (stat(node, &st) < 0) {
172 log_warning("Couldn't stat device %s", node);
173 return -errno;
174 }
175
176 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
177 log_warning("%s is not a device.", node);
178 return -ENODEV;
179 }
180
181 sprintf(buf,
182 "%c %u:%u %s",
183 S_ISCHR(st.st_mode) ? 'c' : 'b',
184 major(st.st_rdev), minor(st.st_rdev),
185 acc);
186
187 r = cg_set_attribute("devices", path, "devices.allow", buf);
188 if (r < 0)
189 log_warning("Failed to set devices.allow on %s: %s", path, strerror(-r));
190
191 return r;
192 }
193
194 void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const char *path) {
195 int r;
196
197 assert(c);
198 assert(path);
199
200 if (mask == 0)
201 return;
202
203 if (mask & CGROUP_CPU) {
204 char buf[DECIMAL_STR_MAX(unsigned long) + 1];
205
206 sprintf(buf, "%lu\n", c->cpu_shares);
207 r = cg_set_attribute("cpu", path, "cpu.shares", buf);
208 if (r < 0)
209 log_warning("Failed to set cpu.shares on %s: %s", path, strerror(-r));
210 }
211
212 if (mask & CGROUP_BLKIO) {
213 char buf[MAX3(DECIMAL_STR_MAX(unsigned long)+1,
214 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(unsigned long)*1,
215 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
216 CGroupBlockIODeviceWeight *w;
217 CGroupBlockIODeviceBandwidth *b;
218
219 sprintf(buf, "%lu\n", c->blockio_weight);
220 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
221 if (r < 0)
222 log_warning("Failed to set blkio.weight on %s: %s", path, strerror(-r));
223
224 /* FIXME: no way to reset this list */
225 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
226 dev_t dev;
227
228 r = lookup_blkio_device(w->path, &dev);
229 if (r < 0)
230 continue;
231
232 sprintf(buf, "%u:%u %lu", major(dev), minor(dev), w->weight);
233 r = cg_set_attribute("blkio", path, "blkio.weight_device", buf);
234 if (r < 0)
235 log_error("Failed to set blkio.weight_device on %s: %s", path, strerror(-r));
236 }
237
238 /* FIXME: no way to reset this list */
239 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
240 const char *a;
241 dev_t dev;
242
243 r = lookup_blkio_device(b->path, &dev);
244 if (r < 0)
245 continue;
246
247 a = b->read ? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
248
249 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth);
250 r = cg_set_attribute("blkio", path, a, buf);
251 if (r < 0)
252 log_error("Failed to set %s on %s: %s", a, path, strerror(-r));
253 }
254 }
255
256 if (mask & CGROUP_MEMORY) {
257 if (c->memory_limit != (uint64_t) -1) {
258 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
259
260 sprintf(buf, "%" PRIu64 "\n", c->memory_limit);
261 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
262 } else
263 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
264
265 if (r < 0)
266 log_error("Failed to set memory.limit_in_bytes on %s: %s", path, strerror(-r));
267 }
268
269 if (mask & CGROUP_DEVICE) {
270 CGroupDeviceAllow *a;
271
272 if (c->device_allow || c->device_policy != CGROUP_AUTO)
273 r = cg_set_attribute("devices", path, "devices.deny", "a");
274 else
275 r = cg_set_attribute("devices", path, "devices.allow", "a");
276 if (r < 0)
277 log_error("Failed to reset devices.list on %s: %s", path, strerror(-r));
278
279 if (c->device_policy == CGROUP_CLOSED ||
280 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
281 static const char auto_devices[] =
282 "/dev/null\0" "rw\0"
283 "/dev/zero\0" "rw\0"
284 "/dev/full\0" "rw\0"
285 "/dev/random\0" "rw\0"
286 "/dev/urandom\0" "rw\0";
287
288 const char *x, *y;
289
290 NULSTR_FOREACH_PAIR(x, y, auto_devices)
291 whitelist_device(path, x, y);
292 }
293
294 LIST_FOREACH(device_allow, a, c->device_allow) {
295 char acc[4];
296 unsigned k = 0;
297
298 if (a->r)
299 acc[k++] = 'r';
300 if (a->w)
301 acc[k++] = 'w';
302 if (a->m)
303 acc[k++] = 'm';
304
305 if (k == 0)
306 continue;
307
308 acc[k++] = 0;
309 whitelist_device(path, a->path, acc);
310 }
311 }
312 }
313
314 CGroupControllerMask cgroup_context_get_mask(CGroupContext *c) {
315 CGroupControllerMask mask = 0;
316
317 /* Figure out which controllers we need */
318
319 if (c->cpu_accounting || c->cpu_shares != 1024)
320 mask |= CGROUP_CPUACCT | CGROUP_CPU;
321
322 if (c->blockio_accounting ||
323 c->blockio_weight != 1000 ||
324 c->blockio_device_weights ||
325 c->blockio_device_bandwidths)
326 mask |= CGROUP_BLKIO;
327
328 if (c->memory_accounting ||
329 c->memory_limit != (uint64_t) -1)
330 mask |= CGROUP_MEMORY;
331
332 if (c->device_allow || c->device_policy != CGROUP_AUTO)
333 mask |= CGROUP_DEVICE;
334
335 return mask;
336 }
337
338 static CGroupControllerMask unit_get_cgroup_mask(Unit *u) {
339 CGroupContext *c;
340
341 c = unit_get_cgroup_context(u);
342 if (!c)
343 return 0;
344
345 return cgroup_context_get_mask(c);
346 }
347
348 static CGroupControllerMask unit_get_members_mask(Unit *u) {
349 CGroupControllerMask mask = 0;
350 Unit *m;
351 Iterator i;
352
353 assert(u);
354
355 SET_FOREACH(m, u->dependencies[UNIT_BEFORE], i) {
356
357 if (UNIT_DEREF(m->slice) != u)
358 continue;
359
360 mask |= unit_get_cgroup_mask(m) | unit_get_members_mask(m);
361 }
362
363 return mask;
364 }
365
366 static CGroupControllerMask unit_get_siblings_mask(Unit *u) {
367 assert(u);
368
369 if (!UNIT_ISSET(u->slice))
370 return 0;
371
372 /* Sibling propagation is only relevant for weight-based
373 * controllers, so let's mask out everything else */
374 return unit_get_members_mask(UNIT_DEREF(u->slice)) &
375 (CGROUP_CPU|CGROUP_BLKIO|CGROUP_CPUACCT);
376 }
377
378 static int unit_create_cgroups(Unit *u, CGroupControllerMask mask) {
379 char *path = NULL;
380 int r;
381 bool is_in_hash = false;
382
383 assert(u);
384
385 path = unit_default_cgroup_path(u);
386 if (!path)
387 return -ENOMEM;
388
389 r = hashmap_put(u->manager->cgroup_unit, path, u);
390 if (r == 0)
391 is_in_hash = true;
392
393 if (r < 0) {
394 log_error("cgroup %s exists already: %s", path, strerror(-r));
395 free(path);
396 return r;
397 }
398
399 /* First, create our own group */
400 r = cg_create_everywhere(u->manager->cgroup_supported, mask, path);
401 if (r < 0)
402 log_error("Failed to create cgroup %s: %s", path, strerror(-r));
403
404 /* Then, possibly move things over */
405 if (u->cgroup_path) {
406 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, path);
407 if (r < 0)
408 log_error("Failed to migrate cgroup %s: %s", path, strerror(-r));
409 }
410
411 if (!is_in_hash) {
412 /* And remember the new data */
413 free(u->cgroup_path);
414 u->cgroup_path = path;
415 }
416
417 u->cgroup_realized = true;
418 u->cgroup_mask = mask;
419
420 return 0;
421 }
422
423 static int unit_realize_cgroup_now(Unit *u) {
424 CGroupControllerMask mask;
425
426 assert(u);
427
428 if (u->in_cgroup_queue) {
429 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
430 u->in_cgroup_queue = false;
431 }
432
433 mask = unit_get_cgroup_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
434 mask &= u->manager->cgroup_supported;
435
436 if (u->cgroup_realized &&
437 u->cgroup_mask == mask)
438 return 0;
439
440 /* First, realize parents */
441 if (UNIT_ISSET(u->slice))
442 unit_realize_cgroup_now(UNIT_DEREF(u->slice));
443
444 /* And then do the real work */
445 return unit_create_cgroups(u, mask);
446 }
447
448 static void unit_add_to_cgroup_queue(Unit *u) {
449
450 if (u->in_cgroup_queue)
451 return;
452
453 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
454 u->in_cgroup_queue = true;
455 }
456
457 unsigned manager_dispatch_cgroup_queue(Manager *m) {
458 Unit *i;
459 unsigned n = 0;
460
461 while ((i = m->cgroup_queue)) {
462 assert(i->in_cgroup_queue);
463
464 if (unit_realize_cgroup_now(i) >= 0)
465 cgroup_context_apply(unit_get_cgroup_context(i), i->cgroup_mask, i->cgroup_path);
466
467 n++;
468 }
469
470 return n;
471 }
472
473 static void unit_queue_siblings(Unit *u) {
474 Unit *slice;
475
476 /* This adds the siblings of the specified unit and the
477 * siblings of all parent units to the cgroup queue. (But
478 * neither the specified unit itself nor the parents.) */
479
480 while ((slice = UNIT_DEREF(u->slice))) {
481 Iterator i;
482 Unit *m;
483
484 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
485 if (m == u)
486 continue;
487
488 if (UNIT_DEREF(m->slice) != slice)
489 continue;
490
491 unit_add_to_cgroup_queue(m);
492 }
493
494 u = slice;
495 }
496 }
497
498 int unit_realize_cgroup(Unit *u) {
499 CGroupContext *c;
500 int r;
501
502 assert(u);
503
504 c = unit_get_cgroup_context(u);
505 if (!c)
506 return 0;
507
508 /* So, here's the deal: when realizing the cgroups for this
509 * unit, we need to first create all parents, but there's more
510 * actually: for the weight-based controllers we also need to
511 * make sure that all our siblings (i.e. units that are in the
512 * same slice as we are) have cgroups, too. Otherwise things
513 * would become very uneven as each of their processes would
514 * get as much resources as all our group together. This call
515 * will synchronously create the parent cgroups, but will
516 * defer work on the siblings to the next event loop
517 * iteration. */
518
519 /* Add all sibling slices to the cgroup queue. */
520 unit_queue_siblings(u);
521
522 /* And realize this one now */
523 r = unit_realize_cgroup_now(u);
524
525 /* And apply the values */
526 if (r >= 0)
527 cgroup_context_apply(c, u->cgroup_mask, u->cgroup_path);
528
529 return r;
530 }
531
532 void unit_destroy_cgroup(Unit *u) {
533 int r;
534
535 assert(u);
536
537 if (!u->cgroup_path)
538 return;
539
540 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !unit_has_name(u, SPECIAL_ROOT_SLICE));
541 if (r < 0)
542 log_debug("Failed to destroy cgroup %s: %s", u->cgroup_path, strerror(-r));
543
544 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
545
546 free(u->cgroup_path);
547 u->cgroup_path = NULL;
548 u->cgroup_realized = false;
549 u->cgroup_mask = 0;
550
551 }
552
553 pid_t unit_search_main_pid(Unit *u) {
554 _cleanup_fclose_ FILE *f = NULL;
555 pid_t pid = 0, npid, mypid;
556
557 assert(u);
558
559 if (!u->cgroup_path)
560 return 0;
561
562 if (cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f) < 0)
563 return 0;
564
565 mypid = getpid();
566 while (cg_read_pid(f, &npid) > 0) {
567 pid_t ppid;
568
569 if (npid == pid)
570 continue;
571
572 /* Ignore processes that aren't our kids */
573 if (get_parent_of_pid(npid, &ppid) >= 0 && ppid != mypid)
574 continue;
575
576 if (pid != 0) {
577 /* Dang, there's more than one daemonized PID
578 in this group, so we don't know what process
579 is the main process. */
580 pid = 0;
581 break;
582 }
583
584 pid = npid;
585 }
586
587 return pid;
588 }
589
590 int manager_setup_cgroup(Manager *m) {
591 _cleanup_free_ char *path = NULL;
592 char *e;
593 int r;
594
595 assert(m);
596
597 /* 0. Be nice to Ingo Molnar #628004 */
598 if (path_is_mount_point("/sys/fs/cgroup/systemd", false) <= 0) {
599 log_warning("No control group support available, not creating root group.");
600 return 0;
601 }
602
603 /* 1. Determine hierarchy */
604 free(m->cgroup_root);
605 m->cgroup_root = NULL;
606
607 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
608 if (r < 0) {
609 log_error("Cannot determine cgroup we are running in: %s", strerror(-r));
610 return r;
611 }
612
613 /* LEGACY: Already in /system.slice? If so, let's cut this
614 * off. This is to support live upgrades from older systemd
615 * versions where PID 1 was moved there. */
616 if (m->running_as == SYSTEMD_SYSTEM) {
617 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
618 if (!e)
619 e = endswith(m->cgroup_root, "/system");
620 if (e)
621 *e = 0;
622 }
623
624 /* And make sure to store away the root value without trailing
625 * slash, even for the root dir, so that we can easily prepend
626 * it everywhere. */
627 if (streq(m->cgroup_root, "/"))
628 m->cgroup_root[0] = 0;
629
630 /* 2. Show data */
631 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
632 if (r < 0) {
633 log_error("Cannot find cgroup mount point: %s", strerror(-r));
634 return r;
635 }
636
637 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
638
639 /* 3. Install agent */
640 if (m->running_as == SYSTEMD_SYSTEM) {
641 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
642 if (r < 0)
643 log_warning("Failed to install release agent, ignoring: %s", strerror(-r));
644 else if (r > 0)
645 log_debug("Installed release agent.");
646 else
647 log_debug("Release agent already installed.");
648 }
649
650 /* 4. Make sure we are in the root cgroup */
651 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, 0);
652 if (r < 0) {
653 log_error("Failed to create root cgroup hierarchy: %s", strerror(-r));
654 return r;
655 }
656
657 /* 5. And pin it, so that it cannot be unmounted */
658 if (m->pin_cgroupfs_fd >= 0)
659 close_nointr_nofail(m->pin_cgroupfs_fd);
660
661 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
662 if (r < 0) {
663 log_error("Failed to open pin file: %m");
664 return -errno;
665 }
666
667 /* 6. Figure out which controllers are supported */
668 m->cgroup_supported = cg_mask_supported();
669
670 /* 7. Always enable hierarchial support if it exists... */
671 cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
672
673 return 0;
674 }
675
676 void manager_shutdown_cgroup(Manager *m, bool delete) {
677 assert(m);
678
679 /* We can't really delete the group, since we are in it. But
680 * let's trim it. */
681 if (delete && m->cgroup_root)
682 cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
683
684 if (m->pin_cgroupfs_fd >= 0) {
685 close_nointr_nofail(m->pin_cgroupfs_fd);
686 m->pin_cgroupfs_fd = -1;
687 }
688
689 free(m->cgroup_root);
690 m->cgroup_root = NULL;
691 }
692
693 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
694 char *p;
695 Unit *u;
696
697 assert(m);
698 assert(cgroup);
699
700 u = hashmap_get(m->cgroup_unit, cgroup);
701 if (u)
702 return u;
703
704 p = strdupa(cgroup);
705 for (;;) {
706 char *e;
707
708 e = strrchr(p, '/');
709 if (e == p || !e)
710 return NULL;
711
712 *e = 0;
713
714 u = hashmap_get(m->cgroup_unit, p);
715 if (u)
716 return u;
717 }
718 }
719
720 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
721 _cleanup_free_ char *cgroup = NULL;
722 int r;
723
724 assert(m);
725
726 if (pid <= 1)
727 return NULL;
728
729 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
730 if (r < 0)
731 return NULL;
732
733 return manager_get_unit_by_cgroup(m, cgroup);
734 }
735
736 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
737 Unit *u;
738 int r;
739
740 assert(m);
741 assert(cgroup);
742
743 u = manager_get_unit_by_cgroup(m, cgroup);
744 if (u) {
745 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, true);
746 if (r > 0) {
747 if (UNIT_VTABLE(u)->notify_cgroup_empty)
748 UNIT_VTABLE(u)->notify_cgroup_empty(u);
749
750 unit_add_to_gc_queue(u);
751 }
752 }
753
754 return 0;
755 }
756
757 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
758 [CGROUP_AUTO] = "auto",
759 [CGROUP_CLOSED] = "closed",
760 [CGROUP_STRICT] = "strict",
761 };
762
763 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);