]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | ||
3 | #include <string.h> | |
4 | ||
5 | #include "alloc-util.h" | |
6 | #include "errno-util.h" | |
7 | #include "fd-util.h" | |
8 | #include "format-util.h" | |
9 | #include "namespace-util.h" | |
10 | #include "path-util.h" | |
11 | #include "process-util.h" | |
12 | #include "sort-util.h" | |
13 | #include "stat-util.h" | |
14 | #include "uid-range.h" | |
15 | #include "user-util.h" | |
16 | ||
17 | UIDRange *uid_range_free(UIDRange *range) { | |
18 | if (!range) | |
19 | return NULL; | |
20 | ||
21 | free(range->entries); | |
22 | return mfree(range); | |
23 | } | |
24 | ||
25 | static bool uid_range_entry_intersect(const UIDRangeEntry *a, const UIDRangeEntry *b) { | |
26 | assert(a); | |
27 | assert(b); | |
28 | ||
29 | return a->start <= b->start + b->nr && a->start + a->nr >= b->start; | |
30 | } | |
31 | ||
32 | static int uid_range_entry_compare(const UIDRangeEntry *a, const UIDRangeEntry *b) { | |
33 | int r; | |
34 | ||
35 | assert(a); | |
36 | assert(b); | |
37 | ||
38 | r = CMP(a->start, b->start); | |
39 | if (r != 0) | |
40 | return r; | |
41 | ||
42 | return CMP(a->nr, b->nr); | |
43 | } | |
44 | ||
45 | static void uid_range_coalesce(UIDRange *range) { | |
46 | assert(range); | |
47 | ||
48 | if (range->n_entries <= 0) | |
49 | return; | |
50 | ||
51 | typesafe_qsort(range->entries, range->n_entries, uid_range_entry_compare); | |
52 | ||
53 | for (size_t i = 0; i < range->n_entries; i++) { | |
54 | UIDRangeEntry *x = range->entries + i; | |
55 | ||
56 | for (size_t j = i + 1; j < range->n_entries; j++) { | |
57 | UIDRangeEntry *y = range->entries + j; | |
58 | uid_t begin, end; | |
59 | ||
60 | if (!uid_range_entry_intersect(x, y)) | |
61 | break; | |
62 | ||
63 | begin = MIN(x->start, y->start); | |
64 | end = MAX(x->start + x->nr, y->start + y->nr); | |
65 | ||
66 | x->start = begin; | |
67 | x->nr = end - begin; | |
68 | ||
69 | if (range->n_entries > j + 1) | |
70 | memmove(y, y + 1, sizeof(UIDRangeEntry) * (range->n_entries - j - 1)); | |
71 | ||
72 | range->n_entries--; | |
73 | j--; | |
74 | } | |
75 | } | |
76 | } | |
77 | ||
78 | int uid_range_add_internal(UIDRange **range, uid_t start, uid_t nr, bool coalesce) { | |
79 | _cleanup_(uid_range_freep) UIDRange *range_new = NULL; | |
80 | UIDRange *p; | |
81 | ||
82 | assert(range); | |
83 | ||
84 | if (nr <= 0) | |
85 | return 0; | |
86 | ||
87 | if (start > UINT32_MAX - nr) /* overflow check */ | |
88 | return -ERANGE; | |
89 | ||
90 | if (*range) | |
91 | p = *range; | |
92 | else { | |
93 | range_new = new0(UIDRange, 1); | |
94 | if (!range_new) | |
95 | return -ENOMEM; | |
96 | ||
97 | p = range_new; | |
98 | } | |
99 | ||
100 | if (!GREEDY_REALLOC(p->entries, p->n_entries + 1)) | |
101 | return -ENOMEM; | |
102 | ||
103 | p->entries[p->n_entries++] = (UIDRangeEntry) { | |
104 | .start = start, | |
105 | .nr = nr, | |
106 | }; | |
107 | ||
108 | if (coalesce) | |
109 | uid_range_coalesce(p); | |
110 | ||
111 | TAKE_PTR(range_new); | |
112 | *range = p; | |
113 | ||
114 | return 0; | |
115 | } | |
116 | ||
117 | int uid_range_add_str(UIDRange **range, const char *s) { | |
118 | uid_t start, end; | |
119 | int r; | |
120 | ||
121 | assert(range); | |
122 | assert(s); | |
123 | ||
124 | r = parse_uid_range(s, &start, &end); | |
125 | if (r < 0) | |
126 | return r; | |
127 | ||
128 | return uid_range_add_internal(range, start, end - start + 1, /* coalesce = */ true); | |
129 | } | |
130 | ||
131 | int uid_range_next_lower(const UIDRange *range, uid_t *uid) { | |
132 | uid_t closest = UID_INVALID, candidate; | |
133 | ||
134 | assert(range); | |
135 | assert(uid); | |
136 | ||
137 | if (*uid == 0) | |
138 | return -EBUSY; | |
139 | ||
140 | candidate = *uid - 1; | |
141 | ||
142 | for (size_t i = 0; i < range->n_entries; i++) { | |
143 | uid_t begin, end; | |
144 | ||
145 | begin = range->entries[i].start; | |
146 | end = range->entries[i].start + range->entries[i].nr - 1; | |
147 | ||
148 | if (candidate >= begin && candidate <= end) { | |
149 | *uid = candidate; | |
150 | return 1; | |
151 | } | |
152 | ||
153 | if (end < candidate) | |
154 | closest = end; | |
155 | } | |
156 | ||
157 | if (closest == UID_INVALID) | |
158 | return -EBUSY; | |
159 | ||
160 | *uid = closest; | |
161 | return 1; | |
162 | } | |
163 | ||
164 | bool uid_range_covers(const UIDRange *range, uid_t start, uid_t nr) { | |
165 | if (nr == 0) /* empty range? always covered... */ | |
166 | return true; | |
167 | ||
168 | if (start > UINT32_MAX - nr) /* range overflows? definitely not covered... */ | |
169 | return false; | |
170 | ||
171 | if (!range) | |
172 | return false; | |
173 | ||
174 | FOREACH_ARRAY(i, range->entries, range->n_entries) | |
175 | if (start >= i->start && | |
176 | start + nr <= i->start + i->nr) | |
177 | return true; | |
178 | ||
179 | return false; | |
180 | } | |
181 | ||
182 | int uid_map_read_one(FILE *f, uid_t *ret_base, uid_t *ret_shift, uid_t *ret_range) { | |
183 | uid_t uid_base, uid_shift, uid_range; | |
184 | int r; | |
185 | ||
186 | assert(f); | |
187 | ||
188 | errno = 0; | |
189 | r = fscanf(f, UID_FMT " " UID_FMT " " UID_FMT "\n", &uid_base, &uid_shift, &uid_range); | |
190 | if (r == EOF) | |
191 | return errno_or_else(ENOMSG); | |
192 | assert(r >= 0); | |
193 | if (r != 3) | |
194 | return -EBADMSG; | |
195 | if (uid_range <= 0) | |
196 | return -EBADMSG; | |
197 | ||
198 | if (ret_base) | |
199 | *ret_base = uid_base; | |
200 | if (ret_shift) | |
201 | *ret_shift = uid_shift; | |
202 | if (ret_range) | |
203 | *ret_range = uid_range; | |
204 | ||
205 | return 0; | |
206 | } | |
207 | ||
208 | unsigned uid_range_size(const UIDRange *range) { | |
209 | if (!range) | |
210 | return 0; | |
211 | ||
212 | unsigned n = 0; | |
213 | ||
214 | FOREACH_ARRAY(e, range->entries, range->n_entries) | |
215 | n += e->nr; | |
216 | ||
217 | return n; | |
218 | } | |
219 | ||
220 | bool uid_range_is_empty(const UIDRange *range) { | |
221 | ||
222 | if (!range) | |
223 | return true; | |
224 | ||
225 | FOREACH_ARRAY(e, range->entries, range->n_entries) | |
226 | if (e->nr > 0) | |
227 | return false; | |
228 | ||
229 | return true; | |
230 | } | |
231 | ||
232 | int uid_range_load_userns(const char *path, UIDRangeUsernsMode mode, UIDRange **ret) { | |
233 | _cleanup_(uid_range_freep) UIDRange *range = NULL; | |
234 | _cleanup_fclose_ FILE *f = NULL; | |
235 | int r; | |
236 | ||
237 | /* If 'path' is NULL loads the UID range of the userns namespace we run. Otherwise load the data from | |
238 | * the specified file (which can be either uid_map or gid_map, in case caller needs to deal with GID | |
239 | * maps). | |
240 | * | |
241 | * To simplify things this will modify the passed array in case of later failure. */ | |
242 | ||
243 | assert(mode >= 0); | |
244 | assert(mode < _UID_RANGE_USERNS_MODE_MAX); | |
245 | assert(ret); | |
246 | ||
247 | if (!path) | |
248 | path = IN_SET(mode, UID_RANGE_USERNS_INSIDE, UID_RANGE_USERNS_OUTSIDE) ? "/proc/self/uid_map" : "/proc/self/gid_map"; | |
249 | ||
250 | f = fopen(path, "re"); | |
251 | if (!f) { | |
252 | r = -errno; | |
253 | ||
254 | if (r == -ENOENT && path_startswith(path, "/proc/")) | |
255 | return proc_mounted() > 0 ? -EOPNOTSUPP : -ENOSYS; | |
256 | ||
257 | return r; | |
258 | } | |
259 | ||
260 | range = new0(UIDRange, 1); | |
261 | if (!range) | |
262 | return -ENOMEM; | |
263 | ||
264 | for (;;) { | |
265 | uid_t uid_base, uid_shift, uid_range; | |
266 | ||
267 | r = uid_map_read_one(f, &uid_base, &uid_shift, &uid_range); | |
268 | if (r == -ENOMSG) | |
269 | break; | |
270 | if (r < 0) | |
271 | return r; | |
272 | ||
273 | r = uid_range_add_internal( | |
274 | &range, | |
275 | IN_SET(mode, UID_RANGE_USERNS_INSIDE, GID_RANGE_USERNS_INSIDE) ? uid_base : uid_shift, | |
276 | uid_range, | |
277 | /* coalesce = */ false); | |
278 | if (r < 0) | |
279 | return r; | |
280 | } | |
281 | ||
282 | uid_range_coalesce(range); | |
283 | ||
284 | *ret = TAKE_PTR(range); | |
285 | return 0; | |
286 | } | |
287 | ||
288 | int uid_range_load_userns_by_fd(int userns_fd, UIDRangeUsernsMode mode, UIDRange **ret) { | |
289 | _cleanup_(sigkill_waitp) pid_t pid = 0; | |
290 | int r; | |
291 | ||
292 | assert(userns_fd >= 0); | |
293 | assert(mode >= 0); | |
294 | assert(mode < _UID_RANGE_USERNS_MODE_MAX); | |
295 | assert(ret); | |
296 | ||
297 | r = userns_enter_and_pin(userns_fd, &pid); | |
298 | if (r < 0) | |
299 | return r; | |
300 | ||
301 | const char *p = procfs_file_alloca( | |
302 | pid, | |
303 | IN_SET(mode, UID_RANGE_USERNS_INSIDE, UID_RANGE_USERNS_OUTSIDE) ? "uid_map" : "gid_map"); | |
304 | ||
305 | return uid_range_load_userns(p, mode, ret); | |
306 | } | |
307 | ||
308 | bool uid_range_overlaps(const UIDRange *range, uid_t start, uid_t nr) { | |
309 | ||
310 | if (!range) | |
311 | return false; | |
312 | ||
313 | /* Avoid overflow */ | |
314 | if (start > UINT32_MAX - nr) | |
315 | nr = UINT32_MAX - start; | |
316 | ||
317 | if (nr == 0) | |
318 | return false; | |
319 | ||
320 | FOREACH_ARRAY(entry, range->entries, range->n_entries) | |
321 | if (start < entry->start + entry->nr && | |
322 | start + nr >= entry->start) | |
323 | return true; | |
324 | ||
325 | return false; | |
326 | } | |
327 | ||
328 | bool uid_range_equal(const UIDRange *a, const UIDRange *b) { | |
329 | if (a == b) | |
330 | return true; | |
331 | ||
332 | if (!a || !b) | |
333 | return false; | |
334 | ||
335 | if (a->n_entries != b->n_entries) | |
336 | return false; | |
337 | ||
338 | for (size_t i = 0; i < a->n_entries; i++) { | |
339 | if (a->entries[i].start != b->entries[i].start) | |
340 | return false; | |
341 | if (a->entries[i].nr != b->entries[i].nr) | |
342 | return false; | |
343 | } | |
344 | ||
345 | return true; | |
346 | } | |
347 | ||
348 | int uid_map_search_root(pid_t pid, UIDRangeUsernsMode mode, uid_t *ret) { | |
349 | int r; | |
350 | ||
351 | assert(pid_is_valid(pid)); | |
352 | assert(IN_SET(mode, UID_RANGE_USERNS_OUTSIDE, GID_RANGE_USERNS_OUTSIDE)); | |
353 | ||
354 | const char *p = procfs_file_alloca(pid, mode == UID_RANGE_USERNS_OUTSIDE ? "uid_map" : "gid_map"); | |
355 | _cleanup_fclose_ FILE *f = fopen(p, "re"); | |
356 | if (!f) { | |
357 | if (errno != ENOENT) | |
358 | return -errno; | |
359 | ||
360 | r = proc_mounted(); | |
361 | if (r < 0) | |
362 | return -ENOENT; /* original error, if we can't determine /proc/ state */ | |
363 | ||
364 | return r ? -ENOPKG : -ENOSYS; | |
365 | } | |
366 | ||
367 | for (;;) { | |
368 | uid_t uid_base = UID_INVALID, uid_shift = UID_INVALID; | |
369 | ||
370 | r = uid_map_read_one(f, &uid_base, &uid_shift, /* ret_range= */ NULL); | |
371 | if (r < 0) | |
372 | return r; | |
373 | ||
374 | if (uid_base == 0) { | |
375 | if (ret) | |
376 | *ret = uid_shift; | |
377 | return 0; | |
378 | } | |
379 | } | |
380 | } |