]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/nspawn/nspawn-cgroup.c
util-lib: split our string related calls from util.[ch] into its own file string...
[thirdparty/systemd.git] / src / nspawn / nspawn-cgroup.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2015 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <sys/mount.h>
23
24 #include "cgroup-util.h"
25 #include "fileio.h"
26 #include "mkdir.h"
27 #include "string-util.h"
28 #include "strv.h"
29 #include "util.h"
30 #include "nspawn-cgroup.h"
31
32 int chown_cgroup(pid_t pid, uid_t uid_shift) {
33 _cleanup_free_ char *path = NULL, *fs = NULL;
34 _cleanup_close_ int fd = -1;
35 const char *fn;
36 int r;
37
38 r = cg_pid_get_path(NULL, pid, &path);
39 if (r < 0)
40 return log_error_errno(r, "Failed to get container cgroup path: %m");
41
42 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, path, NULL, &fs);
43 if (r < 0)
44 return log_error_errno(r, "Failed to get file system path for container cgroup: %m");
45
46 fd = open(fs, O_RDONLY|O_CLOEXEC|O_DIRECTORY);
47 if (fd < 0)
48 return log_error_errno(errno, "Failed to open %s: %m", fs);
49
50 FOREACH_STRING(fn,
51 ".",
52 "tasks",
53 "notify_on_release",
54 "cgroup.procs",
55 "cgroup.clone_children",
56 "cgroup.controllers",
57 "cgroup.subtree_control",
58 "cgroup.populated")
59 if (fchownat(fd, fn, uid_shift, uid_shift, 0) < 0)
60 log_full_errno(errno == ENOENT ? LOG_DEBUG : LOG_WARNING, errno,
61 "Failed to chown() cgroup file %s, ignoring: %m", fn);
62
63 return 0;
64 }
65
66 int sync_cgroup(pid_t pid, bool unified_requested) {
67 _cleanup_free_ char *cgroup = NULL;
68 char tree[] = "/tmp/unifiedXXXXXX", pid_string[DECIMAL_STR_MAX(pid) + 1];
69 bool undo_mount = false;
70 const char *fn;
71 int unified, r;
72
73 unified = cg_unified();
74 if (unified < 0)
75 return log_error_errno(unified, "Failed to determine whether the unified hierachy is used: %m");
76
77 if ((unified > 0) == unified_requested)
78 return 0;
79
80 /* When the host uses the legacy cgroup setup, but the
81 * container shall use the unified hierarchy, let's make sure
82 * we copy the path from the name=systemd hierarchy into the
83 * unified hierarchy. Similar for the reverse situation. */
84
85 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
86 if (r < 0)
87 return log_error_errno(r, "Failed to get control group of " PID_FMT ": %m", pid);
88
89 /* In order to access the unified hierarchy we need to mount it */
90 if (!mkdtemp(tree))
91 return log_error_errno(errno, "Failed to generate temporary mount point for unified hierarchy: %m");
92
93 if (unified)
94 r = mount("cgroup", tree, "cgroup", MS_NOSUID|MS_NOEXEC|MS_NODEV, "none,name=systemd,xattr");
95 else
96 r = mount("cgroup", tree, "cgroup", MS_NOSUID|MS_NOEXEC|MS_NODEV, "__DEVEL__sane_behavior");
97 if (r < 0) {
98 r = log_error_errno(errno, "Failed to mount unified hierarchy: %m");
99 goto finish;
100 }
101
102 undo_mount = true;
103
104 fn = strjoina(tree, cgroup, "/cgroup.procs");
105 (void) mkdir_parents(fn, 0755);
106
107 sprintf(pid_string, PID_FMT, pid);
108 r = write_string_file(fn, pid_string, 0);
109 if (r < 0)
110 log_error_errno(r, "Failed to move process: %m");
111
112 finish:
113 if (undo_mount)
114 (void) umount(tree);
115
116 (void) rmdir(tree);
117 return r;
118 }
119
120 int create_subcgroup(pid_t pid, bool unified_requested) {
121 _cleanup_free_ char *cgroup = NULL;
122 const char *child;
123 int unified, r;
124 CGroupMask supported;
125
126 /* In the unified hierarchy inner nodes may only only contain
127 * subgroups, but not processes. Hence, if we running in the
128 * unified hierarchy and the container does the same, and we
129 * did not create a scope unit for the container move us and
130 * the container into two separate subcgroups. */
131
132 if (!unified_requested)
133 return 0;
134
135 unified = cg_unified();
136 if (unified < 0)
137 return log_error_errno(unified, "Failed to determine whether the unified hierachy is used: %m");
138 if (unified == 0)
139 return 0;
140
141 r = cg_mask_supported(&supported);
142 if (r < 0)
143 return log_error_errno(r, "Failed to determine supported controllers: %m");
144
145 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
146 if (r < 0)
147 return log_error_errno(r, "Failed to get our control group: %m");
148
149 child = strjoina(cgroup, "/payload");
150 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, child, pid);
151 if (r < 0)
152 return log_error_errno(r, "Failed to create %s subcgroup: %m", child);
153
154 child = strjoina(cgroup, "/supervisor");
155 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, child, 0);
156 if (r < 0)
157 return log_error_errno(r, "Failed to create %s subcgroup: %m", child);
158
159 /* Try to enable as many controllers as possible for the new payload. */
160 (void) cg_enable_everywhere(supported, supported, cgroup);
161 return 0;
162 }