]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/nspawn/nspawn-cgroup.c
util-lib: split out allocation calls into alloc-util.[ch]
[thirdparty/systemd.git] / src / nspawn / nspawn-cgroup.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2015 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <sys/mount.h>
23
24 #include "alloc-util.h"
25 #include "cgroup-util.h"
26 #include "fd-util.h"
27 #include "fileio.h"
28 #include "mkdir.h"
29 #include "nspawn-cgroup.h"
30 #include "string-util.h"
31 #include "strv.h"
32 #include "util.h"
33
34 int chown_cgroup(pid_t pid, uid_t uid_shift) {
35 _cleanup_free_ char *path = NULL, *fs = NULL;
36 _cleanup_close_ int fd = -1;
37 const char *fn;
38 int r;
39
40 r = cg_pid_get_path(NULL, pid, &path);
41 if (r < 0)
42 return log_error_errno(r, "Failed to get container cgroup path: %m");
43
44 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, path, NULL, &fs);
45 if (r < 0)
46 return log_error_errno(r, "Failed to get file system path for container cgroup: %m");
47
48 fd = open(fs, O_RDONLY|O_CLOEXEC|O_DIRECTORY);
49 if (fd < 0)
50 return log_error_errno(errno, "Failed to open %s: %m", fs);
51
52 FOREACH_STRING(fn,
53 ".",
54 "tasks",
55 "notify_on_release",
56 "cgroup.procs",
57 "cgroup.clone_children",
58 "cgroup.controllers",
59 "cgroup.subtree_control",
60 "cgroup.populated")
61 if (fchownat(fd, fn, uid_shift, uid_shift, 0) < 0)
62 log_full_errno(errno == ENOENT ? LOG_DEBUG : LOG_WARNING, errno,
63 "Failed to chown() cgroup file %s, ignoring: %m", fn);
64
65 return 0;
66 }
67
68 int sync_cgroup(pid_t pid, bool unified_requested) {
69 _cleanup_free_ char *cgroup = NULL;
70 char tree[] = "/tmp/unifiedXXXXXX", pid_string[DECIMAL_STR_MAX(pid) + 1];
71 bool undo_mount = false;
72 const char *fn;
73 int unified, r;
74
75 unified = cg_unified();
76 if (unified < 0)
77 return log_error_errno(unified, "Failed to determine whether the unified hierachy is used: %m");
78
79 if ((unified > 0) == unified_requested)
80 return 0;
81
82 /* When the host uses the legacy cgroup setup, but the
83 * container shall use the unified hierarchy, let's make sure
84 * we copy the path from the name=systemd hierarchy into the
85 * unified hierarchy. Similar for the reverse situation. */
86
87 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
88 if (r < 0)
89 return log_error_errno(r, "Failed to get control group of " PID_FMT ": %m", pid);
90
91 /* In order to access the unified hierarchy we need to mount it */
92 if (!mkdtemp(tree))
93 return log_error_errno(errno, "Failed to generate temporary mount point for unified hierarchy: %m");
94
95 if (unified)
96 r = mount("cgroup", tree, "cgroup", MS_NOSUID|MS_NOEXEC|MS_NODEV, "none,name=systemd,xattr");
97 else
98 r = mount("cgroup", tree, "cgroup", MS_NOSUID|MS_NOEXEC|MS_NODEV, "__DEVEL__sane_behavior");
99 if (r < 0) {
100 r = log_error_errno(errno, "Failed to mount unified hierarchy: %m");
101 goto finish;
102 }
103
104 undo_mount = true;
105
106 fn = strjoina(tree, cgroup, "/cgroup.procs");
107 (void) mkdir_parents(fn, 0755);
108
109 sprintf(pid_string, PID_FMT, pid);
110 r = write_string_file(fn, pid_string, 0);
111 if (r < 0)
112 log_error_errno(r, "Failed to move process: %m");
113
114 finish:
115 if (undo_mount)
116 (void) umount(tree);
117
118 (void) rmdir(tree);
119 return r;
120 }
121
122 int create_subcgroup(pid_t pid, bool unified_requested) {
123 _cleanup_free_ char *cgroup = NULL;
124 const char *child;
125 int unified, r;
126 CGroupMask supported;
127
128 /* In the unified hierarchy inner nodes may only only contain
129 * subgroups, but not processes. Hence, if we running in the
130 * unified hierarchy and the container does the same, and we
131 * did not create a scope unit for the container move us and
132 * the container into two separate subcgroups. */
133
134 if (!unified_requested)
135 return 0;
136
137 unified = cg_unified();
138 if (unified < 0)
139 return log_error_errno(unified, "Failed to determine whether the unified hierachy is used: %m");
140 if (unified == 0)
141 return 0;
142
143 r = cg_mask_supported(&supported);
144 if (r < 0)
145 return log_error_errno(r, "Failed to determine supported controllers: %m");
146
147 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
148 if (r < 0)
149 return log_error_errno(r, "Failed to get our control group: %m");
150
151 child = strjoina(cgroup, "/payload");
152 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, child, pid);
153 if (r < 0)
154 return log_error_errno(r, "Failed to create %s subcgroup: %m", child);
155
156 child = strjoina(cgroup, "/supervisor");
157 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, child, 0);
158 if (r < 0)
159 return log_error_errno(r, "Failed to create %s subcgroup: %m", child);
160
161 /* Try to enable as many controllers as possible for the new payload. */
162 (void) cg_enable_everywhere(supported, supported, cgroup);
163 return 0;
164 }