1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
13 #include <uapi/linux/io_uring.h>
16 #include "openclose.h"
19 struct io_rsrc_update
{
26 static void io_rsrc_buf_put(struct io_ring_ctx
*ctx
, struct io_rsrc_put
*prsrc
);
27 static int io_sqe_buffer_register(struct io_ring_ctx
*ctx
, struct iovec
*iov
,
28 struct io_mapped_ubuf
**pimu
,
29 struct page
**last_hpage
);
32 #define IORING_MAX_FIXED_FILES (1U << 20)
33 #define IORING_MAX_REG_BUFFERS (1U << 14)
35 static const struct io_mapped_ubuf dummy_ubuf
= {
36 /* set invalid range, so io_import_fixed() fails meeting it */
41 int __io_account_mem(struct user_struct
*user
, unsigned long nr_pages
)
43 unsigned long page_limit
, cur_pages
, new_pages
;
48 /* Don't allow more pages than we can safely lock */
49 page_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
51 cur_pages
= atomic_long_read(&user
->locked_vm
);
53 new_pages
= cur_pages
+ nr_pages
;
54 if (new_pages
> page_limit
)
56 } while (!atomic_long_try_cmpxchg(&user
->locked_vm
,
57 &cur_pages
, new_pages
));
61 static void io_unaccount_mem(struct io_ring_ctx
*ctx
, unsigned long nr_pages
)
64 __io_unaccount_mem(ctx
->user
, nr_pages
);
67 atomic64_sub(nr_pages
, &ctx
->mm_account
->pinned_vm
);
70 static int io_account_mem(struct io_ring_ctx
*ctx
, unsigned long nr_pages
)
75 ret
= __io_account_mem(ctx
->user
, nr_pages
);
81 atomic64_add(nr_pages
, &ctx
->mm_account
->pinned_vm
);
86 static int io_copy_iov(struct io_ring_ctx
*ctx
, struct iovec
*dst
,
87 void __user
*arg
, unsigned index
)
89 struct iovec __user
*src
;
93 struct compat_iovec __user
*ciovs
;
94 struct compat_iovec ciov
;
96 ciovs
= (struct compat_iovec __user
*) arg
;
97 if (copy_from_user(&ciov
, &ciovs
[index
], sizeof(ciov
)))
100 dst
->iov_base
= u64_to_user_ptr((u64
)ciov
.iov_base
);
101 dst
->iov_len
= ciov
.iov_len
;
105 src
= (struct iovec __user
*) arg
;
106 if (copy_from_user(dst
, &src
[index
], sizeof(*dst
)))
111 static int io_buffer_validate(struct iovec
*iov
)
113 unsigned long tmp
, acct_len
= iov
->iov_len
+ (PAGE_SIZE
- 1);
116 * Don't impose further limits on the size and buffer
117 * constraints here, we'll -EINVAL later when IO is
118 * submitted if they are wrong.
121 return iov
->iov_len
? -EFAULT
: 0;
125 /* arbitrary limit, but we need something */
126 if (iov
->iov_len
> SZ_1G
)
129 if (check_add_overflow((unsigned long)iov
->iov_base
, acct_len
, &tmp
))
135 static void io_buffer_unmap(struct io_ring_ctx
*ctx
, struct io_mapped_ubuf
**slot
)
137 struct io_mapped_ubuf
*imu
= *slot
;
140 if (imu
!= &dummy_ubuf
) {
141 for (i
= 0; i
< imu
->nr_bvecs
; i
++)
142 unpin_user_page(imu
->bvec
[i
].bv_page
);
144 io_unaccount_mem(ctx
, imu
->acct_pages
);
150 static void io_rsrc_put_work(struct io_rsrc_node
*node
)
152 struct io_rsrc_put
*prsrc
= &node
->item
;
155 io_post_aux_cqe(node
->ctx
, prsrc
->tag
, 0, 0);
157 switch (node
->type
) {
158 case IORING_RSRC_FILE
:
161 case IORING_RSRC_BUFFER
:
162 io_rsrc_buf_put(node
->ctx
, prsrc
);
170 void io_rsrc_node_destroy(struct io_ring_ctx
*ctx
, struct io_rsrc_node
*node
)
172 if (!io_alloc_cache_put(&ctx
->rsrc_node_cache
, &node
->cache
))
176 void io_rsrc_node_ref_zero(struct io_rsrc_node
*node
)
177 __must_hold(&node
->ctx
->uring_lock
)
179 struct io_ring_ctx
*ctx
= node
->ctx
;
181 while (!list_empty(&ctx
->rsrc_ref_list
)) {
182 node
= list_first_entry(&ctx
->rsrc_ref_list
,
183 struct io_rsrc_node
, node
);
184 /* recycle ref nodes in order */
187 list_del(&node
->node
);
189 if (likely(!node
->empty
))
190 io_rsrc_put_work(node
);
191 io_rsrc_node_destroy(ctx
, node
);
193 if (list_empty(&ctx
->rsrc_ref_list
) && unlikely(ctx
->rsrc_quiesce
))
194 wake_up_all(&ctx
->rsrc_quiesce_wq
);
197 struct io_rsrc_node
*io_rsrc_node_alloc(struct io_ring_ctx
*ctx
)
199 struct io_rsrc_node
*ref_node
;
200 struct io_cache_entry
*entry
;
202 entry
= io_alloc_cache_get(&ctx
->rsrc_node_cache
);
204 ref_node
= container_of(entry
, struct io_rsrc_node
, cache
);
206 ref_node
= kzalloc(sizeof(*ref_node
), GFP_KERNEL
);
217 __cold
static int io_rsrc_ref_quiesce(struct io_rsrc_data
*data
,
218 struct io_ring_ctx
*ctx
)
220 struct io_rsrc_node
*backup
;
224 /* As We may drop ->uring_lock, other task may have started quiesce */
228 backup
= io_rsrc_node_alloc(ctx
);
231 ctx
->rsrc_node
->empty
= true;
232 ctx
->rsrc_node
->type
= -1;
233 list_add_tail(&ctx
->rsrc_node
->node
, &ctx
->rsrc_ref_list
);
234 io_put_rsrc_node(ctx
, ctx
->rsrc_node
);
235 ctx
->rsrc_node
= backup
;
237 if (list_empty(&ctx
->rsrc_ref_list
))
240 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) {
241 atomic_set(&ctx
->cq_wait_nr
, 1);
246 data
->quiesce
= true;
248 prepare_to_wait(&ctx
->rsrc_quiesce_wq
, &we
, TASK_INTERRUPTIBLE
);
249 mutex_unlock(&ctx
->uring_lock
);
251 ret
= io_run_task_work_sig(ctx
);
253 mutex_lock(&ctx
->uring_lock
);
254 if (list_empty(&ctx
->rsrc_ref_list
))
260 __set_current_state(TASK_RUNNING
);
261 mutex_lock(&ctx
->uring_lock
);
263 } while (!list_empty(&ctx
->rsrc_ref_list
));
265 finish_wait(&ctx
->rsrc_quiesce_wq
, &we
);
266 data
->quiesce
= false;
269 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) {
270 atomic_set(&ctx
->cq_wait_nr
, 0);
276 static void io_free_page_table(void **table
, size_t size
)
278 unsigned i
, nr_tables
= DIV_ROUND_UP(size
, PAGE_SIZE
);
280 for (i
= 0; i
< nr_tables
; i
++)
285 static void io_rsrc_data_free(struct io_rsrc_data
*data
)
287 size_t size
= data
->nr
* sizeof(data
->tags
[0][0]);
290 io_free_page_table((void **)data
->tags
, size
);
294 static __cold
void **io_alloc_page_table(size_t size
)
296 unsigned i
, nr_tables
= DIV_ROUND_UP(size
, PAGE_SIZE
);
297 size_t init_size
= size
;
300 table
= kcalloc(nr_tables
, sizeof(*table
), GFP_KERNEL_ACCOUNT
);
304 for (i
= 0; i
< nr_tables
; i
++) {
305 unsigned int this_size
= min_t(size_t, size
, PAGE_SIZE
);
307 table
[i
] = kzalloc(this_size
, GFP_KERNEL_ACCOUNT
);
309 io_free_page_table(table
, init_size
);
317 __cold
static int io_rsrc_data_alloc(struct io_ring_ctx
*ctx
, int type
,
319 unsigned nr
, struct io_rsrc_data
**pdata
)
321 struct io_rsrc_data
*data
;
325 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
328 data
->tags
= (u64
**)io_alloc_page_table(nr
* sizeof(data
->tags
[0][0]));
336 data
->rsrc_type
= type
;
339 for (i
= 0; i
< nr
; i
++) {
340 u64
*tag_slot
= io_get_tag_slot(data
, i
);
342 if (copy_from_user(tag_slot
, &utags
[i
],
350 io_rsrc_data_free(data
);
354 static int __io_sqe_files_update(struct io_ring_ctx
*ctx
,
355 struct io_uring_rsrc_update2
*up
,
358 u64 __user
*tags
= u64_to_user_ptr(up
->tags
);
359 __s32 __user
*fds
= u64_to_user_ptr(up
->data
);
360 struct io_rsrc_data
*data
= ctx
->file_data
;
361 struct io_fixed_file
*file_slot
;
367 if (up
->offset
+ nr_args
> ctx
->nr_user_files
)
370 for (done
= 0; done
< nr_args
; done
++) {
373 if ((tags
&& copy_from_user(&tag
, &tags
[done
], sizeof(tag
))) ||
374 copy_from_user(&fd
, &fds
[done
], sizeof(fd
))) {
378 if ((fd
== IORING_REGISTER_FILES_SKIP
|| fd
== -1) && tag
) {
382 if (fd
== IORING_REGISTER_FILES_SKIP
)
385 i
= array_index_nospec(up
->offset
+ done
, ctx
->nr_user_files
);
386 file_slot
= io_fixed_file_slot(&ctx
->file_table
, i
);
388 if (file_slot
->file_ptr
) {
389 err
= io_queue_rsrc_removal(data
, i
,
390 io_slot_file(file_slot
));
393 file_slot
->file_ptr
= 0;
394 io_file_bitmap_clear(&ctx
->file_table
, i
);
397 struct file
*file
= fget(fd
);
404 * Don't allow io_uring instances to be registered.
406 if (io_is_uring_fops(file
)) {
411 *io_get_tag_slot(data
, i
) = tag
;
412 io_fixed_file_set(file_slot
, file
);
413 io_file_bitmap_set(&ctx
->file_table
, i
);
416 return done
? done
: err
;
419 static int __io_sqe_buffers_update(struct io_ring_ctx
*ctx
,
420 struct io_uring_rsrc_update2
*up
,
421 unsigned int nr_args
)
423 u64 __user
*tags
= u64_to_user_ptr(up
->tags
);
424 struct iovec iov
, __user
*iovs
= u64_to_user_ptr(up
->data
);
425 struct page
*last_hpage
= NULL
;
431 if (up
->offset
+ nr_args
> ctx
->nr_user_bufs
)
434 for (done
= 0; done
< nr_args
; done
++) {
435 struct io_mapped_ubuf
*imu
;
438 err
= io_copy_iov(ctx
, &iov
, iovs
, done
);
441 if (tags
&& copy_from_user(&tag
, &tags
[done
], sizeof(tag
))) {
445 err
= io_buffer_validate(&iov
);
448 if (!iov
.iov_base
&& tag
) {
452 err
= io_sqe_buffer_register(ctx
, &iov
, &imu
, &last_hpage
);
456 i
= array_index_nospec(up
->offset
+ done
, ctx
->nr_user_bufs
);
457 if (ctx
->user_bufs
[i
] != &dummy_ubuf
) {
458 err
= io_queue_rsrc_removal(ctx
->buf_data
, i
,
461 io_buffer_unmap(ctx
, &imu
);
464 ctx
->user_bufs
[i
] = (struct io_mapped_ubuf
*)&dummy_ubuf
;
467 ctx
->user_bufs
[i
] = imu
;
468 *io_get_tag_slot(ctx
->buf_data
, i
) = tag
;
470 return done
? done
: err
;
473 static int __io_register_rsrc_update(struct io_ring_ctx
*ctx
, unsigned type
,
474 struct io_uring_rsrc_update2
*up
,
479 lockdep_assert_held(&ctx
->uring_lock
);
481 if (check_add_overflow(up
->offset
, nr_args
, &tmp
))
485 case IORING_RSRC_FILE
:
486 return __io_sqe_files_update(ctx
, up
, nr_args
);
487 case IORING_RSRC_BUFFER
:
488 return __io_sqe_buffers_update(ctx
, up
, nr_args
);
493 int io_register_files_update(struct io_ring_ctx
*ctx
, void __user
*arg
,
496 struct io_uring_rsrc_update2 up
;
500 memset(&up
, 0, sizeof(up
));
501 if (copy_from_user(&up
, arg
, sizeof(struct io_uring_rsrc_update
)))
503 if (up
.resv
|| up
.resv2
)
505 return __io_register_rsrc_update(ctx
, IORING_RSRC_FILE
, &up
, nr_args
);
508 int io_register_rsrc_update(struct io_ring_ctx
*ctx
, void __user
*arg
,
509 unsigned size
, unsigned type
)
511 struct io_uring_rsrc_update2 up
;
513 if (size
!= sizeof(up
))
515 if (copy_from_user(&up
, arg
, sizeof(up
)))
517 if (!up
.nr
|| up
.resv
|| up
.resv2
)
519 return __io_register_rsrc_update(ctx
, type
, &up
, up
.nr
);
522 __cold
int io_register_rsrc(struct io_ring_ctx
*ctx
, void __user
*arg
,
523 unsigned int size
, unsigned int type
)
525 struct io_uring_rsrc_register rr
;
527 /* keep it extendible */
528 if (size
!= sizeof(rr
))
531 memset(&rr
, 0, sizeof(rr
));
532 if (copy_from_user(&rr
, arg
, size
))
534 if (!rr
.nr
|| rr
.resv2
)
536 if (rr
.flags
& ~IORING_RSRC_REGISTER_SPARSE
)
540 case IORING_RSRC_FILE
:
541 if (rr
.flags
& IORING_RSRC_REGISTER_SPARSE
&& rr
.data
)
543 return io_sqe_files_register(ctx
, u64_to_user_ptr(rr
.data
),
544 rr
.nr
, u64_to_user_ptr(rr
.tags
));
545 case IORING_RSRC_BUFFER
:
546 if (rr
.flags
& IORING_RSRC_REGISTER_SPARSE
&& rr
.data
)
548 return io_sqe_buffers_register(ctx
, u64_to_user_ptr(rr
.data
),
549 rr
.nr
, u64_to_user_ptr(rr
.tags
));
554 int io_files_update_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
556 struct io_rsrc_update
*up
= io_kiocb_to_cmd(req
, struct io_rsrc_update
);
558 if (unlikely(req
->flags
& (REQ_F_FIXED_FILE
| REQ_F_BUFFER_SELECT
)))
560 if (sqe
->rw_flags
|| sqe
->splice_fd_in
)
563 up
->offset
= READ_ONCE(sqe
->off
);
564 up
->nr_args
= READ_ONCE(sqe
->len
);
567 up
->arg
= READ_ONCE(sqe
->addr
);
571 static int io_files_update_with_index_alloc(struct io_kiocb
*req
,
572 unsigned int issue_flags
)
574 struct io_rsrc_update
*up
= io_kiocb_to_cmd(req
, struct io_rsrc_update
);
575 __s32 __user
*fds
= u64_to_user_ptr(up
->arg
);
580 if (!req
->ctx
->file_data
)
583 for (done
= 0; done
< up
->nr_args
; done
++) {
584 if (copy_from_user(&fd
, &fds
[done
], sizeof(fd
))) {
594 ret
= io_fixed_fd_install(req
, issue_flags
, file
,
595 IORING_FILE_INDEX_ALLOC
);
598 if (copy_to_user(&fds
[done
], &ret
, sizeof(ret
))) {
599 __io_close_fixed(req
->ctx
, issue_flags
, ret
);
610 int io_files_update(struct io_kiocb
*req
, unsigned int issue_flags
)
612 struct io_rsrc_update
*up
= io_kiocb_to_cmd(req
, struct io_rsrc_update
);
613 struct io_ring_ctx
*ctx
= req
->ctx
;
614 struct io_uring_rsrc_update2 up2
;
617 up2
.offset
= up
->offset
;
624 if (up
->offset
== IORING_FILE_INDEX_ALLOC
) {
625 ret
= io_files_update_with_index_alloc(req
, issue_flags
);
627 io_ring_submit_lock(ctx
, issue_flags
);
628 ret
= __io_register_rsrc_update(ctx
, IORING_RSRC_FILE
,
630 io_ring_submit_unlock(ctx
, issue_flags
);
635 io_req_set_res(req
, ret
, 0);
639 int io_queue_rsrc_removal(struct io_rsrc_data
*data
, unsigned idx
, void *rsrc
)
641 struct io_ring_ctx
*ctx
= data
->ctx
;
642 struct io_rsrc_node
*node
= ctx
->rsrc_node
;
643 u64
*tag_slot
= io_get_tag_slot(data
, idx
);
645 ctx
->rsrc_node
= io_rsrc_node_alloc(ctx
);
646 if (unlikely(!ctx
->rsrc_node
)) {
647 ctx
->rsrc_node
= node
;
651 node
->item
.rsrc
= rsrc
;
652 node
->type
= data
->rsrc_type
;
653 node
->item
.tag
= *tag_slot
;
655 list_add_tail(&node
->node
, &ctx
->rsrc_ref_list
);
656 io_put_rsrc_node(ctx
, node
);
660 void __io_sqe_files_unregister(struct io_ring_ctx
*ctx
)
664 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
665 struct file
*file
= io_file_from_index(&ctx
->file_table
, i
);
669 io_file_bitmap_clear(&ctx
->file_table
, i
);
673 io_free_file_tables(&ctx
->file_table
);
674 io_file_table_set_alloc_range(ctx
, 0, 0);
675 io_rsrc_data_free(ctx
->file_data
);
676 ctx
->file_data
= NULL
;
677 ctx
->nr_user_files
= 0;
680 int io_sqe_files_unregister(struct io_ring_ctx
*ctx
)
682 unsigned nr
= ctx
->nr_user_files
;
689 * Quiesce may unlock ->uring_lock, and while it's not held
690 * prevent new requests using the table.
692 ctx
->nr_user_files
= 0;
693 ret
= io_rsrc_ref_quiesce(ctx
->file_data
, ctx
);
694 ctx
->nr_user_files
= nr
;
696 __io_sqe_files_unregister(ctx
);
700 int io_sqe_files_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
701 unsigned nr_args
, u64 __user
*tags
)
703 __s32 __user
*fds
= (__s32 __user
*) arg
;
712 if (nr_args
> IORING_MAX_FIXED_FILES
)
714 if (nr_args
> rlimit(RLIMIT_NOFILE
))
716 ret
= io_rsrc_data_alloc(ctx
, IORING_RSRC_FILE
, tags
, nr_args
,
721 if (!io_alloc_file_tables(&ctx
->file_table
, nr_args
)) {
722 io_rsrc_data_free(ctx
->file_data
);
723 ctx
->file_data
= NULL
;
727 for (i
= 0; i
< nr_args
; i
++, ctx
->nr_user_files
++) {
728 struct io_fixed_file
*file_slot
;
730 if (fds
&& copy_from_user(&fd
, &fds
[i
], sizeof(fd
))) {
734 /* allow sparse sets */
735 if (!fds
|| fd
== -1) {
737 if (unlikely(*io_get_tag_slot(ctx
->file_data
, i
)))
748 * Don't allow io_uring instances to be registered.
750 if (io_is_uring_fops(file
)) {
754 file_slot
= io_fixed_file_slot(&ctx
->file_table
, i
);
755 io_fixed_file_set(file_slot
, file
);
756 io_file_bitmap_set(&ctx
->file_table
, i
);
759 /* default it to the whole table */
760 io_file_table_set_alloc_range(ctx
, 0, ctx
->nr_user_files
);
763 __io_sqe_files_unregister(ctx
);
767 static void io_rsrc_buf_put(struct io_ring_ctx
*ctx
, struct io_rsrc_put
*prsrc
)
769 io_buffer_unmap(ctx
, &prsrc
->buf
);
773 void __io_sqe_buffers_unregister(struct io_ring_ctx
*ctx
)
777 for (i
= 0; i
< ctx
->nr_user_bufs
; i
++)
778 io_buffer_unmap(ctx
, &ctx
->user_bufs
[i
]);
779 kfree(ctx
->user_bufs
);
780 io_rsrc_data_free(ctx
->buf_data
);
781 ctx
->user_bufs
= NULL
;
782 ctx
->buf_data
= NULL
;
783 ctx
->nr_user_bufs
= 0;
786 int io_sqe_buffers_unregister(struct io_ring_ctx
*ctx
)
788 unsigned nr
= ctx
->nr_user_bufs
;
795 * Quiesce may unlock ->uring_lock, and while it's not held
796 * prevent new requests using the table.
798 ctx
->nr_user_bufs
= 0;
799 ret
= io_rsrc_ref_quiesce(ctx
->buf_data
, ctx
);
800 ctx
->nr_user_bufs
= nr
;
802 __io_sqe_buffers_unregister(ctx
);
807 * Not super efficient, but this is just a registration time. And we do cache
808 * the last compound head, so generally we'll only do a full search if we don't
811 * We check if the given compound head page has already been accounted, to
812 * avoid double accounting it. This allows us to account the full size of the
813 * page, not just the constituent pages of a huge page.
815 static bool headpage_already_acct(struct io_ring_ctx
*ctx
, struct page
**pages
,
816 int nr_pages
, struct page
*hpage
)
820 /* check current page array */
821 for (i
= 0; i
< nr_pages
; i
++) {
822 if (!PageCompound(pages
[i
]))
824 if (compound_head(pages
[i
]) == hpage
)
828 /* check previously registered pages */
829 for (i
= 0; i
< ctx
->nr_user_bufs
; i
++) {
830 struct io_mapped_ubuf
*imu
= ctx
->user_bufs
[i
];
832 for (j
= 0; j
< imu
->nr_bvecs
; j
++) {
833 if (!PageCompound(imu
->bvec
[j
].bv_page
))
835 if (compound_head(imu
->bvec
[j
].bv_page
) == hpage
)
843 static int io_buffer_account_pin(struct io_ring_ctx
*ctx
, struct page
**pages
,
844 int nr_pages
, struct io_mapped_ubuf
*imu
,
845 struct page
**last_hpage
)
850 for (i
= 0; i
< nr_pages
; i
++) {
851 if (!PageCompound(pages
[i
])) {
856 hpage
= compound_head(pages
[i
]);
857 if (hpage
== *last_hpage
)
860 if (headpage_already_acct(ctx
, pages
, i
, hpage
))
862 imu
->acct_pages
+= page_size(hpage
) >> PAGE_SHIFT
;
866 if (!imu
->acct_pages
)
869 ret
= io_account_mem(ctx
, imu
->acct_pages
);
875 struct page
**io_pin_pages(unsigned long ubuf
, unsigned long len
, int *npages
)
877 unsigned long start
, end
, nr_pages
;
878 struct page
**pages
= NULL
;
881 end
= (ubuf
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
882 start
= ubuf
>> PAGE_SHIFT
;
883 nr_pages
= end
- start
;
886 pages
= kvmalloc_array(nr_pages
, sizeof(struct page
*), GFP_KERNEL
);
888 return ERR_PTR(-ENOMEM
);
890 mmap_read_lock(current
->mm
);
891 ret
= pin_user_pages(ubuf
, nr_pages
, FOLL_WRITE
| FOLL_LONGTERM
, pages
);
892 mmap_read_unlock(current
->mm
);
894 /* success, mapped all pages */
895 if (ret
== nr_pages
) {
900 /* partial map, or didn't map anything */
902 /* if we did partial map, release any pages we did get */
904 unpin_user_pages(pages
, ret
);
911 static int io_sqe_buffer_register(struct io_ring_ctx
*ctx
, struct iovec
*iov
,
912 struct io_mapped_ubuf
**pimu
,
913 struct page
**last_hpage
)
915 struct io_mapped_ubuf
*imu
= NULL
;
916 struct page
**pages
= NULL
;
919 int ret
, nr_pages
, i
;
920 struct folio
*folio
= NULL
;
922 *pimu
= (struct io_mapped_ubuf
*)&dummy_ubuf
;
927 pages
= io_pin_pages((unsigned long) iov
->iov_base
, iov
->iov_len
,
930 ret
= PTR_ERR(pages
);
935 /* If it's a huge page, try to coalesce them into a single bvec entry */
937 folio
= page_folio(pages
[0]);
938 for (i
= 1; i
< nr_pages
; i
++) {
940 * Pages must be consecutive and on the same folio for
943 if (page_folio(pages
[i
]) != folio
||
944 pages
[i
] != pages
[i
- 1] + 1) {
951 * The pages are bound to the folio, it doesn't
952 * actually unpin them but drops all but one reference,
953 * which is usually put down by io_buffer_unmap().
954 * Note, needs a better helper.
956 unpin_user_pages(&pages
[1], nr_pages
- 1);
961 imu
= kvmalloc(struct_size(imu
, bvec
, nr_pages
), GFP_KERNEL
);
965 ret
= io_buffer_account_pin(ctx
, pages
, nr_pages
, imu
, last_hpage
);
967 unpin_user_pages(pages
, nr_pages
);
971 off
= (unsigned long) iov
->iov_base
& ~PAGE_MASK
;
973 /* store original address for later verification */
974 imu
->ubuf
= (unsigned long) iov
->iov_base
;
975 imu
->ubuf_end
= imu
->ubuf
+ iov
->iov_len
;
976 imu
->nr_bvecs
= nr_pages
;
981 bvec_set_page(&imu
->bvec
[0], pages
[0], size
, off
);
984 for (i
= 0; i
< nr_pages
; i
++) {
987 vec_len
= min_t(size_t, size
, PAGE_SIZE
- off
);
988 bvec_set_page(&imu
->bvec
[i
], pages
[i
], vec_len
, off
);
999 static int io_buffers_map_alloc(struct io_ring_ctx
*ctx
, unsigned int nr_args
)
1001 ctx
->user_bufs
= kcalloc(nr_args
, sizeof(*ctx
->user_bufs
), GFP_KERNEL
);
1002 return ctx
->user_bufs
? 0 : -ENOMEM
;
1005 int io_sqe_buffers_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
1006 unsigned int nr_args
, u64 __user
*tags
)
1008 struct page
*last_hpage
= NULL
;
1009 struct io_rsrc_data
*data
;
1013 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS
>= (1u << 16));
1017 if (!nr_args
|| nr_args
> IORING_MAX_REG_BUFFERS
)
1019 ret
= io_rsrc_data_alloc(ctx
, IORING_RSRC_BUFFER
, tags
, nr_args
, &data
);
1022 ret
= io_buffers_map_alloc(ctx
, nr_args
);
1024 io_rsrc_data_free(data
);
1028 for (i
= 0; i
< nr_args
; i
++, ctx
->nr_user_bufs
++) {
1030 ret
= io_copy_iov(ctx
, &iov
, arg
, i
);
1033 ret
= io_buffer_validate(&iov
);
1037 memset(&iov
, 0, sizeof(iov
));
1040 if (!iov
.iov_base
&& *io_get_tag_slot(data
, i
)) {
1045 ret
= io_sqe_buffer_register(ctx
, &iov
, &ctx
->user_bufs
[i
],
1051 WARN_ON_ONCE(ctx
->buf_data
);
1053 ctx
->buf_data
= data
;
1055 __io_sqe_buffers_unregister(ctx
);
1059 int io_import_fixed(int ddir
, struct iov_iter
*iter
,
1060 struct io_mapped_ubuf
*imu
,
1061 u64 buf_addr
, size_t len
)
1066 if (WARN_ON_ONCE(!imu
))
1068 if (unlikely(check_add_overflow(buf_addr
, (u64
)len
, &buf_end
)))
1070 /* not inside the mapped region */
1071 if (unlikely(buf_addr
< imu
->ubuf
|| buf_end
> imu
->ubuf_end
))
1075 * Might not be a start of buffer, set size appropriately
1076 * and advance us to the beginning.
1078 offset
= buf_addr
- imu
->ubuf
;
1079 iov_iter_bvec(iter
, ddir
, imu
->bvec
, imu
->nr_bvecs
, offset
+ len
);
1083 * Don't use iov_iter_advance() here, as it's really slow for
1084 * using the latter parts of a big fixed buffer - it iterates
1085 * over each segment manually. We can cheat a bit here, because
1088 * 1) it's a BVEC iter, we set it up
1089 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1090 * first and last bvec
1092 * So just find our index, and adjust the iterator afterwards.
1093 * If the offset is within the first bvec (or the whole first
1094 * bvec, just use iov_iter_advance(). This makes it easier
1095 * since we can just skip the first segment, which may not
1096 * be PAGE_SIZE aligned.
1098 const struct bio_vec
*bvec
= imu
->bvec
;
1100 if (offset
< bvec
->bv_len
) {
1102 * Note, huge pages buffers consists of one large
1103 * bvec entry and should always go this way. The other
1104 * branch doesn't expect non PAGE_SIZE'd chunks.
1107 iter
->nr_segs
= bvec
->bv_len
;
1108 iter
->count
-= offset
;
1109 iter
->iov_offset
= offset
;
1111 unsigned long seg_skip
;
1113 /* skip first vec */
1114 offset
-= bvec
->bv_len
;
1115 seg_skip
= 1 + (offset
>> PAGE_SHIFT
);
1117 iter
->bvec
= bvec
+ seg_skip
;
1118 iter
->nr_segs
-= seg_skip
;
1119 iter
->count
-= bvec
->bv_len
+ offset
;
1120 iter
->iov_offset
= offset
& ~PAGE_MASK
;