1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
5 #include "alloc-util.h"
6 #include "errno-util.h"
8 #include "format-util.h"
9 #include "namespace-util.h"
10 #include "path-util.h"
11 #include "process-util.h"
12 #include "sort-util.h"
13 #include "stat-util.h"
14 #include "uid-range.h"
15 #include "user-util.h"
17 UIDRange
*uid_range_free(UIDRange
*range
) {
25 static bool uid_range_entry_intersect(const UIDRangeEntry
*a
, const UIDRangeEntry
*b
) {
29 return a
->start
<= b
->start
+ b
->nr
&& a
->start
+ a
->nr
>= b
->start
;
32 static int uid_range_entry_compare(const UIDRangeEntry
*a
, const UIDRangeEntry
*b
) {
38 r
= CMP(a
->start
, b
->start
);
42 return CMP(a
->nr
, b
->nr
);
45 static void uid_range_coalesce(UIDRange
*range
) {
48 if (range
->n_entries
<= 0)
51 typesafe_qsort(range
->entries
, range
->n_entries
, uid_range_entry_compare
);
53 for (size_t i
= 0; i
< range
->n_entries
; i
++) {
54 UIDRangeEntry
*x
= range
->entries
+ i
;
56 for (size_t j
= i
+ 1; j
< range
->n_entries
; j
++) {
57 UIDRangeEntry
*y
= range
->entries
+ j
;
60 if (!uid_range_entry_intersect(x
, y
))
63 begin
= MIN(x
->start
, y
->start
);
64 end
= MAX(x
->start
+ x
->nr
, y
->start
+ y
->nr
);
69 if (range
->n_entries
> j
+ 1)
70 memmove(y
, y
+ 1, sizeof(UIDRangeEntry
) * (range
->n_entries
- j
- 1));
78 int uid_range_add_internal(UIDRange
**range
, uid_t start
, uid_t nr
, bool coalesce
) {
79 _cleanup_(uid_range_freep
) UIDRange
*range_new
= NULL
;
87 if (start
> UINT32_MAX
- nr
) /* overflow check */
93 range_new
= new0(UIDRange
, 1);
100 if (!GREEDY_REALLOC(p
->entries
, p
->n_entries
+ 1))
103 p
->entries
[p
->n_entries
++] = (UIDRangeEntry
) {
109 uid_range_coalesce(p
);
117 int uid_range_add_str(UIDRange
**range
, const char *s
) {
124 r
= parse_uid_range(s
, &start
, &end
);
128 return uid_range_add_internal(range
, start
, end
- start
+ 1, /* coalesce = */ true);
131 int uid_range_next_lower(const UIDRange
*range
, uid_t
*uid
) {
132 uid_t closest
= UID_INVALID
, candidate
;
140 candidate
= *uid
- 1;
142 for (size_t i
= 0; i
< range
->n_entries
; i
++) {
145 begin
= range
->entries
[i
].start
;
146 end
= range
->entries
[i
].start
+ range
->entries
[i
].nr
- 1;
148 if (candidate
>= begin
&& candidate
<= end
) {
157 if (closest
== UID_INVALID
)
164 bool uid_range_covers(const UIDRange
*range
, uid_t start
, uid_t nr
) {
165 if (nr
== 0) /* empty range? always covered... */
168 if (start
> UINT32_MAX
- nr
) /* range overflows? definitely not covered... */
174 FOREACH_ARRAY(i
, range
->entries
, range
->n_entries
)
175 if (start
>= i
->start
&&
176 start
+ nr
<= i
->start
+ i
->nr
)
182 int uid_map_read_one(FILE *f
, uid_t
*ret_base
, uid_t
*ret_shift
, uid_t
*ret_range
) {
183 uid_t uid_base
, uid_shift
, uid_range
;
189 r
= fscanf(f
, UID_FMT
" " UID_FMT
" " UID_FMT
"\n", &uid_base
, &uid_shift
, &uid_range
);
191 return errno_or_else(ENOMSG
);
199 *ret_base
= uid_base
;
201 *ret_shift
= uid_shift
;
203 *ret_range
= uid_range
;
208 unsigned uid_range_size(const UIDRange
*range
) {
214 FOREACH_ARRAY(e
, range
->entries
, range
->n_entries
)
220 bool uid_range_is_empty(const UIDRange
*range
) {
225 FOREACH_ARRAY(e
, range
->entries
, range
->n_entries
)
232 int uid_range_load_userns(const char *path
, UIDRangeUsernsMode mode
, UIDRange
**ret
) {
233 _cleanup_(uid_range_freep
) UIDRange
*range
= NULL
;
234 _cleanup_fclose_
FILE *f
= NULL
;
237 /* If 'path' is NULL loads the UID range of the userns namespace we run. Otherwise load the data from
238 * the specified file (which can be either uid_map or gid_map, in case caller needs to deal with GID
241 * To simplify things this will modify the passed array in case of later failure. */
244 assert(mode
< _UID_RANGE_USERNS_MODE_MAX
);
248 path
= IN_SET(mode
, UID_RANGE_USERNS_INSIDE
, UID_RANGE_USERNS_OUTSIDE
) ? "/proc/self/uid_map" : "/proc/self/gid_map";
250 f
= fopen(path
, "re");
254 if (r
== -ENOENT
&& path_startswith(path
, "/proc/"))
255 return proc_mounted() > 0 ? -EOPNOTSUPP
: -ENOSYS
;
260 range
= new0(UIDRange
, 1);
265 uid_t uid_base
, uid_shift
, uid_range
;
267 r
= uid_map_read_one(f
, &uid_base
, &uid_shift
, &uid_range
);
273 r
= uid_range_add_internal(
275 IN_SET(mode
, UID_RANGE_USERNS_INSIDE
, GID_RANGE_USERNS_INSIDE
) ? uid_base
: uid_shift
,
277 /* coalesce = */ false);
282 uid_range_coalesce(range
);
284 *ret
= TAKE_PTR(range
);
288 int uid_range_load_userns_by_fd(int userns_fd
, UIDRangeUsernsMode mode
, UIDRange
**ret
) {
289 _cleanup_(sigkill_waitp
) pid_t pid
= 0;
292 assert(userns_fd
>= 0);
294 assert(mode
< _UID_RANGE_USERNS_MODE_MAX
);
297 r
= userns_enter_and_pin(userns_fd
, &pid
);
301 const char *p
= procfs_file_alloca(
303 IN_SET(mode
, UID_RANGE_USERNS_INSIDE
, UID_RANGE_USERNS_OUTSIDE
) ? "uid_map" : "gid_map");
305 return uid_range_load_userns(p
, mode
, ret
);
308 bool uid_range_overlaps(const UIDRange
*range
, uid_t start
, uid_t nr
) {
314 if (start
> UINT32_MAX
- nr
)
315 nr
= UINT32_MAX
- start
;
320 FOREACH_ARRAY(entry
, range
->entries
, range
->n_entries
)
321 if (start
< entry
->start
+ entry
->nr
&&
322 start
+ nr
>= entry
->start
)
328 bool uid_range_equal(const UIDRange
*a
, const UIDRange
*b
) {
335 if (a
->n_entries
!= b
->n_entries
)
338 for (size_t i
= 0; i
< a
->n_entries
; i
++) {
339 if (a
->entries
[i
].start
!= b
->entries
[i
].start
)
341 if (a
->entries
[i
].nr
!= b
->entries
[i
].nr
)
348 int uid_map_search_root(pid_t pid
, UIDRangeUsernsMode mode
, uid_t
*ret
) {
351 assert(pid_is_valid(pid
));
352 assert(IN_SET(mode
, UID_RANGE_USERNS_OUTSIDE
, GID_RANGE_USERNS_OUTSIDE
));
354 const char *p
= procfs_file_alloca(pid
, mode
== UID_RANGE_USERNS_OUTSIDE
? "uid_map" : "gid_map");
355 _cleanup_fclose_
FILE *f
= fopen(p
, "re");
362 return -ENOENT
; /* original error, if we can't determine /proc/ state */
364 return r
? -ENOPKG
: -ENOSYS
;
368 uid_t uid_base
= UID_INVALID
, uid_shift
= UID_INVALID
;
370 r
= uid_map_read_one(f
, &uid_base
, &uid_shift
, /* ret_range= */ NULL
);