1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
13 #include <linux/kernel.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
27 unsigned int sysctl_nr_open __read_mostly
= 1024*1024;
28 unsigned int sysctl_nr_open_min
= BITS_PER_LONG
;
29 /* our min() is unusable in constant expressions ;-/ */
30 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
31 unsigned int sysctl_nr_open_max
=
32 __const_min(INT_MAX
, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG
;
34 static void __free_fdtable(struct fdtable
*fdt
)
37 kvfree(fdt
->open_fds
);
41 static void free_fdtable_rcu(struct rcu_head
*rcu
)
43 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
50 * Copy 'count' fd bits from the old table to the new table and clear the extra
51 * space if any. This does not copy the file pointers. Called with the files
52 * spinlock held for write.
54 static void copy_fd_bitmaps(struct fdtable
*nfdt
, struct fdtable
*ofdt
,
57 unsigned int cpy
, set
;
59 cpy
= count
/ BITS_PER_BYTE
;
60 set
= (nfdt
->max_fds
- count
) / BITS_PER_BYTE
;
61 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
62 memset((char *)nfdt
->open_fds
+ cpy
, 0, set
);
63 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
64 memset((char *)nfdt
->close_on_exec
+ cpy
, 0, set
);
66 cpy
= BITBIT_SIZE(count
);
67 set
= BITBIT_SIZE(nfdt
->max_fds
) - cpy
;
68 memcpy(nfdt
->full_fds_bits
, ofdt
->full_fds_bits
, cpy
);
69 memset((char *)nfdt
->full_fds_bits
+ cpy
, 0, set
);
73 * Copy all file descriptors from the old table to the new, expanded table and
74 * clear the extra space. Called with the files spinlock held for write.
76 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
80 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
82 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
83 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
84 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
85 memset((char *)nfdt
->fd
+ cpy
, 0, set
);
87 copy_fd_bitmaps(nfdt
, ofdt
, ofdt
->max_fds
);
91 * Note how the fdtable bitmap allocations very much have to be a multiple of
92 * BITS_PER_LONG. This is not only because we walk those things in chunks of
93 * 'unsigned long' in some places, but simply because that is how the Linux
94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
95 * they are very much "bits in an array of unsigned long".
97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient
99 * clear low bits. Clang seems to realize that, gcc ends up being confused.
101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
102 * let's consider it documentation (and maybe a test-case for gcc to improve
103 * its code generation ;)
105 static struct fdtable
* alloc_fdtable(unsigned int nr
)
111 * Figure out how many fds we actually want to support in this fdtable.
112 * Allocation steps are keyed to the size of the fdarray, since it
113 * grows far faster than any of the other dynamic data. We try to fit
114 * the fdarray into comfortable page-tuned chunks: starting at 1024B
115 * and growing in powers of two from there on.
117 nr
/= (1024 / sizeof(struct file
*));
118 nr
= roundup_pow_of_two(nr
+ 1);
119 nr
*= (1024 / sizeof(struct file
*));
120 nr
= ALIGN(nr
, BITS_PER_LONG
);
122 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
123 * had been set lower between the check in expand_files() and here. Deal
124 * with that in caller, it's cheaper that way.
126 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
127 * bitmaps handling below becomes unpleasant, to put it mildly...
129 if (unlikely(nr
> sysctl_nr_open
))
130 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
132 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL_ACCOUNT
);
136 data
= kvmalloc_array(nr
, sizeof(struct file
*), GFP_KERNEL_ACCOUNT
);
141 data
= kvmalloc(max_t(size_t,
142 2 * nr
/ BITS_PER_BYTE
+ BITBIT_SIZE(nr
), L1_CACHE_BYTES
),
146 fdt
->open_fds
= data
;
147 data
+= nr
/ BITS_PER_BYTE
;
148 fdt
->close_on_exec
= data
;
149 data
+= nr
/ BITS_PER_BYTE
;
150 fdt
->full_fds_bits
= data
;
163 * Expand the file descriptor table.
164 * This function will allocate a new fdtable and both fd array and fdset, of
166 * Return <0 error code on error; 1 on successful completion.
167 * The files->file_lock should be held on entry, and will be held on exit.
169 static int expand_fdtable(struct files_struct
*files
, unsigned int nr
)
170 __releases(files
->file_lock
)
171 __acquires(files
->file_lock
)
173 struct fdtable
*new_fdt
, *cur_fdt
;
175 spin_unlock(&files
->file_lock
);
176 new_fdt
= alloc_fdtable(nr
);
178 /* make sure all fd_install() have seen resize_in_progress
179 * or have finished their rcu_read_lock_sched() section.
181 if (atomic_read(&files
->count
) > 1)
184 spin_lock(&files
->file_lock
);
188 * extremely unlikely race - sysctl_nr_open decreased between the check in
189 * caller and alloc_fdtable(). Cheaper to catch it here...
191 if (unlikely(new_fdt
->max_fds
<= nr
)) {
192 __free_fdtable(new_fdt
);
195 cur_fdt
= files_fdtable(files
);
196 BUG_ON(nr
< cur_fdt
->max_fds
);
197 copy_fdtable(new_fdt
, cur_fdt
);
198 rcu_assign_pointer(files
->fdt
, new_fdt
);
199 if (cur_fdt
!= &files
->fdtab
)
200 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
201 /* coupled with smp_rmb() in fd_install() */
208 * This function will expand the file structures, if the requested size exceeds
209 * the current capacity and there is room for expansion.
210 * Return <0 error code on error; 0 when nothing done; 1 when files were
211 * expanded and execution may have blocked.
212 * The files->file_lock should be held on entry, and will be held on exit.
214 static int expand_files(struct files_struct
*files
, unsigned int nr
)
215 __releases(files
->file_lock
)
216 __acquires(files
->file_lock
)
222 fdt
= files_fdtable(files
);
224 /* Do we need to expand? */
225 if (nr
< fdt
->max_fds
)
229 if (nr
>= sysctl_nr_open
)
232 if (unlikely(files
->resize_in_progress
)) {
233 spin_unlock(&files
->file_lock
);
235 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
236 spin_lock(&files
->file_lock
);
240 /* All good, so we try */
241 files
->resize_in_progress
= true;
242 expanded
= expand_fdtable(files
, nr
);
243 files
->resize_in_progress
= false;
245 wake_up_all(&files
->resize_wait
);
249 static inline void __set_close_on_exec(unsigned int fd
, struct fdtable
*fdt
)
251 __set_bit(fd
, fdt
->close_on_exec
);
254 static inline void __clear_close_on_exec(unsigned int fd
, struct fdtable
*fdt
)
256 if (test_bit(fd
, fdt
->close_on_exec
))
257 __clear_bit(fd
, fdt
->close_on_exec
);
260 static inline void __set_open_fd(unsigned int fd
, struct fdtable
*fdt
)
262 __set_bit(fd
, fdt
->open_fds
);
264 if (!~fdt
->open_fds
[fd
])
265 __set_bit(fd
, fdt
->full_fds_bits
);
268 static inline void __clear_open_fd(unsigned int fd
, struct fdtable
*fdt
)
270 __clear_bit(fd
, fdt
->open_fds
);
271 __clear_bit(fd
/ BITS_PER_LONG
, fdt
->full_fds_bits
);
274 static unsigned int count_open_files(struct fdtable
*fdt
)
276 unsigned int size
= fdt
->max_fds
;
279 /* Find the last open fd */
280 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
281 if (fdt
->open_fds
[--i
])
284 i
= (i
+ 1) * BITS_PER_LONG
;
289 * Note that a sane fdtable size always has to be a multiple of
290 * BITS_PER_LONG, since we have bitmaps that are sized by this.
292 * 'max_fds' will normally already be properly aligned, but it
293 * turns out that in the close_range() -> __close_range() ->
294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
295 * up having a 'max_fds' value that isn't already aligned.
297 * Rather than make close_range() have to worry about this,
298 * just make that BITS_PER_LONG alignment be part of a sane
299 * fdtable size. Becuase that's really what it is.
301 static unsigned int sane_fdtable_size(struct fdtable
*fdt
, unsigned int max_fds
)
305 count
= count_open_files(fdt
);
306 if (max_fds
< NR_OPEN_DEFAULT
)
307 max_fds
= NR_OPEN_DEFAULT
;
308 return ALIGN(min(count
, max_fds
), BITS_PER_LONG
);
312 * Allocate a new files structure and copy contents from the
313 * passed in files structure.
314 * errorp will be valid only when the returned files_struct is NULL.
316 struct files_struct
*dup_fd(struct files_struct
*oldf
, unsigned int max_fds
, int *errorp
)
318 struct files_struct
*newf
;
319 struct file
**old_fds
, **new_fds
;
320 unsigned int open_files
, i
;
321 struct fdtable
*old_fdt
, *new_fdt
;
324 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
328 atomic_set(&newf
->count
, 1);
330 spin_lock_init(&newf
->file_lock
);
331 newf
->resize_in_progress
= false;
332 init_waitqueue_head(&newf
->resize_wait
);
334 new_fdt
= &newf
->fdtab
;
335 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
336 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
337 new_fdt
->open_fds
= newf
->open_fds_init
;
338 new_fdt
->full_fds_bits
= newf
->full_fds_bits_init
;
339 new_fdt
->fd
= &newf
->fd_array
[0];
341 spin_lock(&oldf
->file_lock
);
342 old_fdt
= files_fdtable(oldf
);
343 open_files
= sane_fdtable_size(old_fdt
, max_fds
);
346 * Check whether we need to allocate a larger fd array and fd set.
348 while (unlikely(open_files
> new_fdt
->max_fds
)) {
349 spin_unlock(&oldf
->file_lock
);
351 if (new_fdt
!= &newf
->fdtab
)
352 __free_fdtable(new_fdt
);
354 new_fdt
= alloc_fdtable(open_files
- 1);
360 /* beyond sysctl_nr_open; nothing to do */
361 if (unlikely(new_fdt
->max_fds
< open_files
)) {
362 __free_fdtable(new_fdt
);
368 * Reacquire the oldf lock and a pointer to its fd table
369 * who knows it may have a new bigger fd table. We need
370 * the latest pointer.
372 spin_lock(&oldf
->file_lock
);
373 old_fdt
= files_fdtable(oldf
);
374 open_files
= sane_fdtable_size(old_fdt
, max_fds
);
377 copy_fd_bitmaps(new_fdt
, old_fdt
, open_files
);
379 old_fds
= old_fdt
->fd
;
380 new_fds
= new_fdt
->fd
;
382 for (i
= open_files
; i
!= 0; i
--) {
383 struct file
*f
= *old_fds
++;
388 * The fd may be claimed in the fd bitmap but not yet
389 * instantiated in the files array if a sibling thread
390 * is partway through open(). So make sure that this
391 * fd is available to the new process.
393 __clear_open_fd(open_files
- i
, new_fdt
);
395 rcu_assign_pointer(*new_fds
++, f
);
397 spin_unlock(&oldf
->file_lock
);
399 /* clear the remainder */
400 memset(new_fds
, 0, (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*));
402 rcu_assign_pointer(newf
->fdt
, new_fdt
);
407 kmem_cache_free(files_cachep
, newf
);
412 static struct fdtable
*close_files(struct files_struct
* files
)
415 * It is safe to dereference the fd table without RCU or
416 * ->file_lock because this is the last reference to the
419 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
420 unsigned int i
, j
= 0;
424 i
= j
* BITS_PER_LONG
;
425 if (i
>= fdt
->max_fds
)
427 set
= fdt
->open_fds
[j
++];
430 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
432 filp_close(file
, files
);
444 void put_files_struct(struct files_struct
*files
)
446 if (atomic_dec_and_test(&files
->count
)) {
447 struct fdtable
*fdt
= close_files(files
);
449 /* free the arrays if they are not embedded */
450 if (fdt
!= &files
->fdtab
)
452 kmem_cache_free(files_cachep
, files
);
456 void exit_files(struct task_struct
*tsk
)
458 struct files_struct
* files
= tsk
->files
;
464 put_files_struct(files
);
468 struct files_struct init_files
= {
469 .count
= ATOMIC_INIT(1),
470 .fdt
= &init_files
.fdtab
,
472 .max_fds
= NR_OPEN_DEFAULT
,
473 .fd
= &init_files
.fd_array
[0],
474 .close_on_exec
= init_files
.close_on_exec_init
,
475 .open_fds
= init_files
.open_fds_init
,
476 .full_fds_bits
= init_files
.full_fds_bits_init
,
478 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
479 .resize_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(init_files
.resize_wait
),
482 static unsigned int find_next_fd(struct fdtable
*fdt
, unsigned int start
)
484 unsigned int maxfd
= fdt
->max_fds
;
485 unsigned int maxbit
= maxfd
/ BITS_PER_LONG
;
486 unsigned int bitbit
= start
/ BITS_PER_LONG
;
488 bitbit
= find_next_zero_bit(fdt
->full_fds_bits
, maxbit
, bitbit
) * BITS_PER_LONG
;
493 return find_next_zero_bit(fdt
->open_fds
, maxfd
, start
);
497 * allocate a file descriptor, mark it busy.
499 static int alloc_fd(unsigned start
, unsigned end
, unsigned flags
)
501 struct files_struct
*files
= current
->files
;
506 spin_lock(&files
->file_lock
);
508 fdt
= files_fdtable(files
);
510 if (fd
< files
->next_fd
)
513 if (fd
< fdt
->max_fds
)
514 fd
= find_next_fd(fdt
, fd
);
517 * N.B. For clone tasks sharing a files structure, this test
518 * will limit the total number of files that can be opened.
524 error
= expand_files(files
, fd
);
529 * If we needed to expand the fs array we
530 * might have blocked - try again.
535 if (start
<= files
->next_fd
)
536 files
->next_fd
= fd
+ 1;
538 __set_open_fd(fd
, fdt
);
539 if (flags
& O_CLOEXEC
)
540 __set_close_on_exec(fd
, fdt
);
542 __clear_close_on_exec(fd
, fdt
);
546 if (rcu_access_pointer(fdt
->fd
[fd
]) != NULL
) {
547 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
548 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
553 spin_unlock(&files
->file_lock
);
557 int __get_unused_fd_flags(unsigned flags
, unsigned long nofile
)
559 return alloc_fd(0, nofile
, flags
);
562 int get_unused_fd_flags(unsigned flags
)
564 return __get_unused_fd_flags(flags
, rlimit(RLIMIT_NOFILE
));
566 EXPORT_SYMBOL(get_unused_fd_flags
);
568 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
570 struct fdtable
*fdt
= files_fdtable(files
);
571 __clear_open_fd(fd
, fdt
);
572 if (fd
< files
->next_fd
)
576 void put_unused_fd(unsigned int fd
)
578 struct files_struct
*files
= current
->files
;
579 spin_lock(&files
->file_lock
);
580 __put_unused_fd(files
, fd
);
581 spin_unlock(&files
->file_lock
);
584 EXPORT_SYMBOL(put_unused_fd
);
587 * Install a file pointer in the fd array.
589 * The VFS is full of places where we drop the files lock between
590 * setting the open_fds bitmap and installing the file in the file
591 * array. At any such point, we are vulnerable to a dup2() race
592 * installing a file in the array before us. We need to detect this and
593 * fput() the struct file we are about to overwrite in this case.
595 * It should never happen - if we allow dup2() do it, _really_ bad things
598 * This consumes the "file" refcount, so callers should treat it
599 * as if they had called fput(file).
602 void fd_install(unsigned int fd
, struct file
*file
)
604 struct files_struct
*files
= current
->files
;
607 if (WARN_ON_ONCE(unlikely(file
->f_mode
& FMODE_BACKING
)))
610 rcu_read_lock_sched();
612 if (unlikely(files
->resize_in_progress
)) {
613 rcu_read_unlock_sched();
614 spin_lock(&files
->file_lock
);
615 fdt
= files_fdtable(files
);
616 BUG_ON(fdt
->fd
[fd
] != NULL
);
617 rcu_assign_pointer(fdt
->fd
[fd
], file
);
618 spin_unlock(&files
->file_lock
);
621 /* coupled with smp_wmb() in expand_fdtable() */
623 fdt
= rcu_dereference_sched(files
->fdt
);
624 BUG_ON(fdt
->fd
[fd
] != NULL
);
625 rcu_assign_pointer(fdt
->fd
[fd
], file
);
626 rcu_read_unlock_sched();
629 EXPORT_SYMBOL(fd_install
);
632 * pick_file - return file associatd with fd
633 * @files: file struct to retrieve file from
634 * @fd: file descriptor to retrieve file for
636 * Context: files_lock must be held.
638 * Returns: The file associated with @fd (NULL if @fd is not open)
640 static struct file
*pick_file(struct files_struct
*files
, unsigned fd
)
642 struct fdtable
*fdt
= files_fdtable(files
);
645 if (fd
>= fdt
->max_fds
)
648 fd
= array_index_nospec(fd
, fdt
->max_fds
);
651 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
652 __put_unused_fd(files
, fd
);
657 int close_fd(unsigned fd
)
659 struct files_struct
*files
= current
->files
;
662 spin_lock(&files
->file_lock
);
663 file
= pick_file(files
, fd
);
664 spin_unlock(&files
->file_lock
);
668 return filp_close(file
, files
);
670 EXPORT_SYMBOL(close_fd
); /* for ksys_close() */
673 * last_fd - return last valid index into fd table
674 * @fdt: File descriptor table.
676 * Context: Either rcu read lock or files_lock must be held.
678 * Returns: Last valid index into fdtable.
680 static inline unsigned last_fd(struct fdtable
*fdt
)
682 return fdt
->max_fds
- 1;
685 static inline void __range_cloexec(struct files_struct
*cur_fds
,
686 unsigned int fd
, unsigned int max_fd
)
690 /* make sure we're using the correct maximum value */
691 spin_lock(&cur_fds
->file_lock
);
692 fdt
= files_fdtable(cur_fds
);
693 max_fd
= min(last_fd(fdt
), max_fd
);
695 bitmap_set(fdt
->close_on_exec
, fd
, max_fd
- fd
+ 1);
696 spin_unlock(&cur_fds
->file_lock
);
699 static inline void __range_close(struct files_struct
*files
, unsigned int fd
,
705 spin_lock(&files
->file_lock
);
706 n
= last_fd(files_fdtable(files
));
707 max_fd
= min(max_fd
, n
);
709 for (; fd
<= max_fd
; fd
++) {
710 file
= pick_file(files
, fd
);
712 spin_unlock(&files
->file_lock
);
713 filp_close(file
, files
);
715 spin_lock(&files
->file_lock
);
716 } else if (need_resched()) {
717 spin_unlock(&files
->file_lock
);
719 spin_lock(&files
->file_lock
);
722 spin_unlock(&files
->file_lock
);
726 * __close_range() - Close all file descriptors in a given range.
728 * @fd: starting file descriptor to close
729 * @max_fd: last file descriptor to close
730 * @flags: CLOSE_RANGE flags.
732 * This closes a range of file descriptors. All file descriptors
733 * from @fd up to and including @max_fd are closed.
735 int __close_range(unsigned fd
, unsigned max_fd
, unsigned int flags
)
737 struct task_struct
*me
= current
;
738 struct files_struct
*cur_fds
= me
->files
, *fds
= NULL
;
740 if (flags
& ~(CLOSE_RANGE_UNSHARE
| CLOSE_RANGE_CLOEXEC
))
746 if (flags
& CLOSE_RANGE_UNSHARE
) {
748 unsigned int max_unshare_fds
= NR_OPEN_MAX
;
751 * If the caller requested all fds to be made cloexec we always
752 * copy all of the file descriptors since they still want to
755 if (!(flags
& CLOSE_RANGE_CLOEXEC
)) {
757 * If the requested range is greater than the current
758 * maximum, we're closing everything so only copy all
759 * file descriptors beneath the lowest file descriptor.
762 if (max_fd
>= last_fd(files_fdtable(cur_fds
)))
763 max_unshare_fds
= fd
;
767 ret
= unshare_fd(CLONE_FILES
, max_unshare_fds
, &fds
);
772 * We used to share our file descriptor table, and have now
773 * created a private one, make sure we're using it below.
779 if (flags
& CLOSE_RANGE_CLOEXEC
)
780 __range_cloexec(cur_fds
, fd
, max_fd
);
782 __range_close(cur_fds
, fd
, max_fd
);
786 * We're done closing the files we were supposed to. Time to install
787 * the new file descriptor table and drop the old one.
792 put_files_struct(fds
);
799 * See close_fd_get_file() below, this variant assumes current->files->file_lock
802 struct file
*__close_fd_get_file(unsigned int fd
)
804 return pick_file(current
->files
, fd
);
808 * variant of close_fd that gets a ref on the file for later fput.
809 * The caller must ensure that filp_close() called on the file.
811 struct file
*close_fd_get_file(unsigned int fd
)
813 struct files_struct
*files
= current
->files
;
816 spin_lock(&files
->file_lock
);
817 file
= pick_file(files
, fd
);
818 spin_unlock(&files
->file_lock
);
823 void do_close_on_exec(struct files_struct
*files
)
828 /* exec unshares first */
829 spin_lock(&files
->file_lock
);
832 unsigned fd
= i
* BITS_PER_LONG
;
833 fdt
= files_fdtable(files
);
834 if (fd
>= fdt
->max_fds
)
836 set
= fdt
->close_on_exec
[i
];
839 fdt
->close_on_exec
[i
] = 0;
840 for ( ; set
; fd
++, set
>>= 1) {
847 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
848 __put_unused_fd(files
, fd
);
849 spin_unlock(&files
->file_lock
);
850 filp_close(file
, files
);
852 spin_lock(&files
->file_lock
);
856 spin_unlock(&files
->file_lock
);
859 static struct file
*__get_file_rcu(struct file __rcu
**f
)
861 struct file __rcu
*file
;
862 struct file __rcu
*file_reloaded
;
863 struct file __rcu
*file_reloaded_cmp
;
865 file
= rcu_dereference_raw(*f
);
869 if (unlikely(!atomic_long_inc_not_zero(&file
->f_count
)))
870 return ERR_PTR(-EAGAIN
);
872 file_reloaded
= rcu_dereference_raw(*f
);
875 * Ensure that all accesses have a dependency on the load from
876 * rcu_dereference_raw() above so we get correct ordering
877 * between reuse/allocation and the pointer check below.
879 file_reloaded_cmp
= file_reloaded
;
880 OPTIMIZER_HIDE_VAR(file_reloaded_cmp
);
883 * atomic_long_inc_not_zero() above provided a full memory
884 * barrier when we acquired a reference.
886 * This is paired with the write barrier from assigning to the
887 * __rcu protected file pointer so that if that pointer still
888 * matches the current file, we know we have successfully
889 * acquired a reference to the right file.
891 * If the pointers don't match the file has been reallocated by
892 * SLAB_TYPESAFE_BY_RCU.
894 if (file
== file_reloaded_cmp
)
895 return file_reloaded
;
898 return ERR_PTR(-EAGAIN
);
902 * get_file_rcu - try go get a reference to a file under rcu
903 * @f: the file to get a reference on
905 * This function tries to get a reference on @f carefully verifying that
906 * @f hasn't been reused.
908 * This function should rarely have to be used and only by users who
909 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
911 * Return: Returns @f with the reference count increased or NULL.
913 struct file
*get_file_rcu(struct file __rcu
**f
)
916 struct file __rcu
*file
;
918 file
= __get_file_rcu(f
);
922 if (unlikely(IS_ERR(file
)))
928 EXPORT_SYMBOL_GPL(get_file_rcu
);
931 * get_file_active - try go get a reference to a file
932 * @f: the file to get a reference on
934 * In contast to get_file_rcu() the pointer itself isn't part of the
935 * reference counting.
937 * This function should rarely have to be used and only by users who
938 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
940 * Return: Returns @f with the reference count increased or NULL.
942 struct file
*get_file_active(struct file
**f
)
944 struct file __rcu
*file
;
947 file
= __get_file_rcu(f
);
953 EXPORT_SYMBOL_GPL(get_file_active
);
955 static inline struct file
*__fget_files_rcu(struct files_struct
*files
,
956 unsigned int fd
, fmode_t mask
)
960 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
961 struct file __rcu
**fdentry
;
963 if (unlikely(fd
>= fdt
->max_fds
))
966 fdentry
= fdt
->fd
+ array_index_nospec(fd
, fdt
->max_fds
);
969 * Ok, we have a file pointer. However, because we do
970 * this all locklessly under RCU, we may be racing with
971 * that file being closed.
973 * Such a race can take two forms:
975 * (a) the file ref already went down to zero and the
976 * file hasn't been reused yet or the file count
977 * isn't zero but the file has already been reused.
979 file
= __get_file_rcu(fdentry
);
983 if (unlikely(IS_ERR(file
)))
987 * (b) the file table entry has changed under us.
988 * Note that we don't need to re-check the 'fdt->fd'
989 * pointer having changed, because it always goes
990 * hand-in-hand with 'fdt'.
992 * If so, we need to put our ref and try again.
994 if (unlikely(rcu_dereference_raw(files
->fdt
) != fdt
)) {
1000 * This isn't the file we're looking for or we're not
1001 * allowed to get a reference to it.
1003 if (unlikely(file
->f_mode
& mask
)) {
1009 * Ok, we have a ref to the file, and checked that it
1016 static struct file
*__fget_files(struct files_struct
*files
, unsigned int fd
,
1022 file
= __fget_files_rcu(files
, fd
, mask
);
1028 static inline struct file
*__fget(unsigned int fd
, fmode_t mask
)
1030 return __fget_files(current
->files
, fd
, mask
);
1033 struct file
*fget(unsigned int fd
)
1035 return __fget(fd
, FMODE_PATH
);
1037 EXPORT_SYMBOL(fget
);
1039 struct file
*fget_raw(unsigned int fd
)
1041 return __fget(fd
, 0);
1043 EXPORT_SYMBOL(fget_raw
);
1045 struct file
*fget_task(struct task_struct
*task
, unsigned int fd
)
1047 struct file
*file
= NULL
;
1051 file
= __fget_files(task
->files
, fd
, 0);
1057 struct file
*lookup_fdget_rcu(unsigned int fd
)
1059 return __fget_files_rcu(current
->files
, fd
, 0);
1062 EXPORT_SYMBOL_GPL(lookup_fdget_rcu
);
1064 struct file
*task_lookup_fdget_rcu(struct task_struct
*task
, unsigned int fd
)
1066 /* Must be called with rcu_read_lock held */
1067 struct files_struct
*files
;
1068 struct file
*file
= NULL
;
1071 files
= task
->files
;
1073 file
= __fget_files_rcu(files
, fd
, 0);
1079 struct file
*task_lookup_next_fdget_rcu(struct task_struct
*task
, unsigned int *ret_fd
)
1081 /* Must be called with rcu_read_lock held */
1082 struct files_struct
*files
;
1083 unsigned int fd
= *ret_fd
;
1084 struct file
*file
= NULL
;
1087 files
= task
->files
;
1089 for (; fd
< files_fdtable(files
)->max_fds
; fd
++) {
1090 file
= __fget_files_rcu(files
, fd
, 0);
1099 EXPORT_SYMBOL(task_lookup_next_fdget_rcu
);
1102 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1104 * You can use this instead of fget if you satisfy all of the following
1106 * 1) You must call fput_light before exiting the syscall and returning control
1107 * to userspace (i.e. you cannot remember the returned struct file * after
1108 * returning to userspace).
1109 * 2) You must not call filp_close on the returned struct file * in between
1110 * calls to fget_light and fput_light.
1111 * 3) You must not clone the current task in between the calls to fget_light
1114 * The fput_needed flag returned by fget_light should be passed to the
1115 * corresponding fput_light.
1117 static unsigned long __fget_light(unsigned int fd
, fmode_t mask
)
1119 struct files_struct
*files
= current
->files
;
1123 * If another thread is concurrently calling close_fd() followed
1124 * by put_files_struct(), we must not observe the old table
1125 * entry combined with the new refcount - otherwise we could
1126 * return a file that is concurrently being freed.
1128 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1129 * put_files_struct().
1131 if (atomic_read_acquire(&files
->count
) == 1) {
1132 file
= files_lookup_fd_raw(files
, fd
);
1133 if (!file
|| unlikely(file
->f_mode
& mask
))
1135 return (unsigned long)file
;
1137 file
= __fget(fd
, mask
);
1140 return FDPUT_FPUT
| (unsigned long)file
;
1143 unsigned long __fdget(unsigned int fd
)
1145 return __fget_light(fd
, FMODE_PATH
);
1147 EXPORT_SYMBOL(__fdget
);
1149 unsigned long __fdget_raw(unsigned int fd
)
1151 return __fget_light(fd
, 0);
1155 * Try to avoid f_pos locking. We only need it if the
1156 * file is marked for FMODE_ATOMIC_POS, and it can be
1157 * accessed multiple ways.
1159 * Always do it for directories, because pidfd_getfd()
1160 * can make a file accessible even if it otherwise would
1161 * not be, and for directories this is a correctness
1162 * issue, not a "POSIX requirement".
1164 static inline bool file_needs_f_pos_lock(struct file
*file
)
1166 return (file
->f_mode
& FMODE_ATOMIC_POS
) &&
1167 (file_count(file
) > 1 || file
->f_op
->iterate_shared
);
1170 unsigned long __fdget_pos(unsigned int fd
)
1172 unsigned long v
= __fdget(fd
);
1173 struct file
*file
= (struct file
*)(v
& ~3);
1175 if (file
&& file_needs_f_pos_lock(file
)) {
1176 v
|= FDPUT_POS_UNLOCK
;
1177 mutex_lock(&file
->f_pos_lock
);
1182 void __f_unlock_pos(struct file
*f
)
1184 mutex_unlock(&f
->f_pos_lock
);
1188 * We only lock f_pos if we have threads or if the file might be
1189 * shared with another process. In both cases we'll have an elevated
1190 * file count (done either by fdget() or by fork()).
1193 void set_close_on_exec(unsigned int fd
, int flag
)
1195 struct files_struct
*files
= current
->files
;
1196 struct fdtable
*fdt
;
1197 spin_lock(&files
->file_lock
);
1198 fdt
= files_fdtable(files
);
1200 __set_close_on_exec(fd
, fdt
);
1202 __clear_close_on_exec(fd
, fdt
);
1203 spin_unlock(&files
->file_lock
);
1206 bool get_close_on_exec(unsigned int fd
)
1208 struct files_struct
*files
= current
->files
;
1209 struct fdtable
*fdt
;
1212 fdt
= files_fdtable(files
);
1213 res
= close_on_exec(fd
, fdt
);
1218 static int do_dup2(struct files_struct
*files
,
1219 struct file
*file
, unsigned fd
, unsigned flags
)
1220 __releases(&files
->file_lock
)
1222 struct file
*tofree
;
1223 struct fdtable
*fdt
;
1226 * We need to detect attempts to do dup2() over allocated but still
1227 * not finished descriptor. NB: OpenBSD avoids that at the price of
1228 * extra work in their equivalent of fget() - they insert struct
1229 * file immediately after grabbing descriptor, mark it larval if
1230 * more work (e.g. actual opening) is needed and make sure that
1231 * fget() treats larval files as absent. Potentially interesting,
1232 * but while extra work in fget() is trivial, locking implications
1233 * and amount of surgery on open()-related paths in VFS are not.
1234 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1235 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1236 * scope of POSIX or SUS, since neither considers shared descriptor
1237 * tables and this condition does not arise without those.
1239 fdt
= files_fdtable(files
);
1240 tofree
= fdt
->fd
[fd
];
1241 if (!tofree
&& fd_is_open(fd
, fdt
))
1244 rcu_assign_pointer(fdt
->fd
[fd
], file
);
1245 __set_open_fd(fd
, fdt
);
1246 if (flags
& O_CLOEXEC
)
1247 __set_close_on_exec(fd
, fdt
);
1249 __clear_close_on_exec(fd
, fdt
);
1250 spin_unlock(&files
->file_lock
);
1253 filp_close(tofree
, files
);
1258 spin_unlock(&files
->file_lock
);
1262 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
1265 struct files_struct
*files
= current
->files
;
1268 return close_fd(fd
);
1270 if (fd
>= rlimit(RLIMIT_NOFILE
))
1273 spin_lock(&files
->file_lock
);
1274 err
= expand_files(files
, fd
);
1275 if (unlikely(err
< 0))
1277 return do_dup2(files
, file
, fd
, flags
);
1280 spin_unlock(&files
->file_lock
);
1285 * __receive_fd() - Install received file into file descriptor table
1286 * @file: struct file that was received from another process
1287 * @ufd: __user pointer to write new fd number to
1288 * @o_flags: the O_* flags to apply to the new fd entry
1290 * Installs a received file into the file descriptor table, with appropriate
1291 * checks and count updates. Optionally writes the fd number to userspace, if
1294 * This helper handles its own reference counting of the incoming
1297 * Returns newly install fd or -ve on error.
1299 int __receive_fd(struct file
*file
, int __user
*ufd
, unsigned int o_flags
)
1304 error
= security_file_receive(file
);
1308 new_fd
= get_unused_fd_flags(o_flags
);
1313 error
= put_user(new_fd
, ufd
);
1315 put_unused_fd(new_fd
);
1320 fd_install(new_fd
, get_file(file
));
1321 __receive_sock(file
);
1325 int receive_fd_replace(int new_fd
, struct file
*file
, unsigned int o_flags
)
1329 error
= security_file_receive(file
);
1332 error
= replace_fd(new_fd
, file
, o_flags
);
1335 __receive_sock(file
);
1339 int receive_fd(struct file
*file
, unsigned int o_flags
)
1341 return __receive_fd(file
, NULL
, o_flags
);
1343 EXPORT_SYMBOL_GPL(receive_fd
);
1345 static int ksys_dup3(unsigned int oldfd
, unsigned int newfd
, int flags
)
1349 struct files_struct
*files
= current
->files
;
1351 if ((flags
& ~O_CLOEXEC
) != 0)
1354 if (unlikely(oldfd
== newfd
))
1357 if (newfd
>= rlimit(RLIMIT_NOFILE
))
1360 spin_lock(&files
->file_lock
);
1361 err
= expand_files(files
, newfd
);
1362 file
= files_lookup_fd_locked(files
, oldfd
);
1363 if (unlikely(!file
))
1365 if (unlikely(err
< 0)) {
1370 return do_dup2(files
, file
, newfd
, flags
);
1375 spin_unlock(&files
->file_lock
);
1379 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
1381 return ksys_dup3(oldfd
, newfd
, flags
);
1384 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
1386 if (unlikely(newfd
== oldfd
)) { /* corner case */
1387 struct files_struct
*files
= current
->files
;
1392 f
= __fget_files_rcu(files
, oldfd
, 0);
1400 return ksys_dup3(oldfd
, newfd
, 0);
1403 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
1406 struct file
*file
= fget_raw(fildes
);
1409 ret
= get_unused_fd_flags(0);
1411 fd_install(ret
, file
);
1418 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
1420 unsigned long nofile
= rlimit(RLIMIT_NOFILE
);
1424 err
= alloc_fd(from
, nofile
, flags
);
1427 fd_install(err
, file
);
1432 int iterate_fd(struct files_struct
*files
, unsigned n
,
1433 int (*f
)(const void *, struct file
*, unsigned),
1436 struct fdtable
*fdt
;
1440 spin_lock(&files
->file_lock
);
1441 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
1443 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
1446 res
= f(p
, file
, n
);
1450 spin_unlock(&files
->file_lock
);
1453 EXPORT_SYMBOL(iterate_fd
);