]>
git.ipfire.org Git - thirdparty/kernel/linux.git/blob - io_uring/rw.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
14 #include <linux/indirect_call_wrapper.h>
16 #include <uapi/linux/io_uring.h>
21 #include "alloc_cache.h"
27 /* NOTE: kiocb has the file as the first member, so don't do it here */
34 static inline bool io_file_supports_nowait(struct io_kiocb
*req
)
36 return req
->flags
& REQ_F_SUPPORT_NOWAIT
;
40 static int io_iov_compat_buffer_select_prep(struct io_rw
*rw
)
42 struct compat_iovec __user
*uiov
;
45 uiov
= u64_to_user_ptr(rw
->addr
);
46 if (!access_ok(uiov
, sizeof(*uiov
)))
48 if (__get_user(clen
, &uiov
->iov_len
))
58 static int io_iov_buffer_select_prep(struct io_kiocb
*req
)
60 struct iovec __user
*uiov
;
62 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
69 return io_iov_compat_buffer_select_prep(rw
);
72 uiov
= u64_to_user_ptr(rw
->addr
);
73 if (copy_from_user(&iov
, uiov
, sizeof(*uiov
)))
75 rw
->len
= iov
.iov_len
;
79 static int __io_import_iovec(int ddir
, struct io_kiocb
*req
,
80 struct io_async_rw
*io
,
81 unsigned int issue_flags
)
83 const struct io_issue_def
*def
= &io_issue_defs
[req
->opcode
];
84 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
90 buf
= u64_to_user_ptr(rw
->addr
);
93 if (!def
->vectored
|| req
->flags
& REQ_F_BUFFER_SELECT
) {
94 if (io_do_buffer_select(req
)) {
95 buf
= io_buffer_select(req
, &sqe_len
, issue_flags
);
98 rw
->addr
= (unsigned long) buf
;
102 return import_ubuf(ddir
, buf
, sqe_len
, &io
->iter
);
105 if (io
->free_iovec
) {
106 nr_segs
= io
->free_iov_nr
;
107 iov
= io
->free_iovec
;
112 ret
= __import_iovec(ddir
, buf
, sqe_len
, nr_segs
, &iov
, &io
->iter
,
114 if (unlikely(ret
< 0))
117 req
->flags
|= REQ_F_NEED_CLEANUP
;
118 io
->free_iov_nr
= io
->iter
.nr_segs
;
119 kfree(io
->free_iovec
);
120 io
->free_iovec
= iov
;
125 static inline int io_import_iovec(int rw
, struct io_kiocb
*req
,
126 struct io_async_rw
*io
,
127 unsigned int issue_flags
)
131 ret
= __io_import_iovec(rw
, req
, io
, issue_flags
);
132 if (unlikely(ret
< 0))
135 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
139 static void io_rw_iovec_free(struct io_async_rw
*rw
)
141 if (rw
->free_iovec
) {
142 kfree(rw
->free_iovec
);
144 rw
->free_iovec
= NULL
;
148 static void io_rw_recycle(struct io_kiocb
*req
, unsigned int issue_flags
)
150 struct io_async_rw
*rw
= req
->async_data
;
153 if (unlikely(issue_flags
& IO_URING_F_UNLOCKED
)) {
154 io_rw_iovec_free(rw
);
157 iov
= rw
->free_iovec
;
158 if (io_alloc_cache_put(&req
->ctx
->rw_cache
, rw
)) {
160 kasan_mempool_poison_object(iov
);
161 req
->async_data
= NULL
;
162 req
->flags
&= ~REQ_F_ASYNC_DATA
;
166 static void io_req_rw_cleanup(struct io_kiocb
*req
, unsigned int issue_flags
)
169 * Disable quick recycling for anything that's gone through io-wq.
170 * In theory, this should be fine to cleanup. However, some read or
171 * write iter handling touches the iovec AFTER having called into the
172 * handler, eg to reexpand or revert. This means we can have:
178 * blkdev_write_iter()
184 * iov_iter_count() <- look at iov_iter again
186 * which can lead to a UAF. This is only possible for io-wq offload
187 * as the cleanup can run in parallel. As io-wq is not the fast path,
188 * just leave cleanup to the end.
190 * This is really a bug in the core code that does this, any issue
191 * path should assume that a successful (or -EIOCBQUEUED) return can
192 * mean that the underlying data can be gone at any time. But that
193 * should be fixed seperately, and then this check could be killed.
195 if (!(req
->flags
& REQ_F_REFCOUNT
)) {
196 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
197 io_rw_recycle(req
, issue_flags
);
201 static int io_rw_alloc_async(struct io_kiocb
*req
)
203 struct io_ring_ctx
*ctx
= req
->ctx
;
204 struct io_async_rw
*rw
;
206 rw
= io_alloc_cache_get(&ctx
->rw_cache
);
208 if (rw
->free_iovec
) {
209 kasan_mempool_unpoison_object(rw
->free_iovec
,
210 rw
->free_iov_nr
* sizeof(struct iovec
));
211 req
->flags
|= REQ_F_NEED_CLEANUP
;
213 req
->flags
|= REQ_F_ASYNC_DATA
;
214 req
->async_data
= rw
;
218 if (!io_alloc_async_data(req
)) {
219 rw
= req
->async_data
;
220 rw
->free_iovec
= NULL
;
230 static int io_prep_rw_setup(struct io_kiocb
*req
, int ddir
, bool do_import
)
232 struct io_async_rw
*rw
;
235 if (io_rw_alloc_async(req
))
238 if (!do_import
|| io_do_buffer_select(req
))
241 rw
= req
->async_data
;
242 ret
= io_import_iovec(ddir
, req
, rw
, 0);
243 if (unlikely(ret
< 0))
246 iov_iter_save_state(&rw
->iter
, &rw
->iter_state
);
250 static int io_prep_rw(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
251 int ddir
, bool do_import
)
253 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
257 rw
->kiocb
.ki_pos
= READ_ONCE(sqe
->off
);
258 /* used for fixed read/write too - just read unconditionally */
259 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
261 ioprio
= READ_ONCE(sqe
->ioprio
);
263 ret
= ioprio_check_cap(ioprio
);
267 rw
->kiocb
.ki_ioprio
= ioprio
;
269 rw
->kiocb
.ki_ioprio
= get_current_ioprio();
271 rw
->kiocb
.dio_complete
= NULL
;
273 rw
->addr
= READ_ONCE(sqe
->addr
);
274 rw
->len
= READ_ONCE(sqe
->len
);
275 rw
->flags
= READ_ONCE(sqe
->rw_flags
);
276 return io_prep_rw_setup(req
, ddir
, do_import
);
279 int io_prep_read(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
281 return io_prep_rw(req
, sqe
, ITER_DEST
, true);
284 int io_prep_write(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
286 return io_prep_rw(req
, sqe
, ITER_SOURCE
, true);
289 static int io_prep_rwv(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
292 const bool do_import
= !(req
->flags
& REQ_F_BUFFER_SELECT
);
295 ret
= io_prep_rw(req
, sqe
, ddir
, do_import
);
302 * Have to do this validation here, as this is in io_read() rw->len
303 * might have chanaged due to buffer selection
305 return io_iov_buffer_select_prep(req
);
308 int io_prep_readv(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
310 return io_prep_rwv(req
, sqe
, ITER_DEST
);
313 int io_prep_writev(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
315 return io_prep_rwv(req
, sqe
, ITER_SOURCE
);
318 static int io_prep_rw_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
321 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
322 struct io_ring_ctx
*ctx
= req
->ctx
;
323 struct io_async_rw
*io
;
327 ret
= io_prep_rw(req
, sqe
, ddir
, false);
331 if (unlikely(req
->buf_index
>= ctx
->nr_user_bufs
))
333 index
= array_index_nospec(req
->buf_index
, ctx
->nr_user_bufs
);
334 req
->imu
= ctx
->user_bufs
[index
];
335 io_req_set_rsrc_node(req
, ctx
, 0);
337 io
= req
->async_data
;
338 ret
= io_import_fixed(ddir
, &io
->iter
, req
->imu
, rw
->addr
, rw
->len
);
339 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
343 int io_prep_read_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
345 return io_prep_rw_fixed(req
, sqe
, ITER_DEST
);
348 int io_prep_write_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
350 return io_prep_rw_fixed(req
, sqe
, ITER_SOURCE
);
354 * Multishot read is prepared just like a normal read/write request, only
355 * difference is that we set the MULTISHOT flag.
357 int io_read_mshot_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
359 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
362 /* must be used with provided buffers */
363 if (!(req
->flags
& REQ_F_BUFFER_SELECT
))
366 ret
= io_prep_rw(req
, sqe
, ITER_DEST
, false);
370 if (rw
->addr
|| rw
->len
)
373 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
377 void io_readv_writev_cleanup(struct io_kiocb
*req
)
379 io_rw_iovec_free(req
->async_data
);
382 static inline loff_t
*io_kiocb_update_pos(struct io_kiocb
*req
)
384 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
386 if (rw
->kiocb
.ki_pos
!= -1)
387 return &rw
->kiocb
.ki_pos
;
389 if (!(req
->file
->f_mode
& FMODE_STREAM
)) {
390 req
->flags
|= REQ_F_CUR_POS
;
391 rw
->kiocb
.ki_pos
= req
->file
->f_pos
;
392 return &rw
->kiocb
.ki_pos
;
395 rw
->kiocb
.ki_pos
= 0;
400 static void io_resubmit_prep(struct io_kiocb
*req
)
402 struct io_async_rw
*io
= req
->async_data
;
404 iov_iter_restore(&io
->iter
, &io
->iter_state
);
407 static bool io_rw_should_reissue(struct io_kiocb
*req
)
409 umode_t mode
= file_inode(req
->file
)->i_mode
;
410 struct io_ring_ctx
*ctx
= req
->ctx
;
412 if (!S_ISBLK(mode
) && !S_ISREG(mode
))
414 if ((req
->flags
& REQ_F_NOWAIT
) || (io_wq_current_is_worker() &&
415 !(ctx
->flags
& IORING_SETUP_IOPOLL
)))
418 * If ref is dying, we might be running poll reap from the exit work.
419 * Don't attempt to reissue from that path, just let it fail with
422 if (percpu_ref_is_dying(&ctx
->refs
))
425 * Play it safe and assume not safe to re-import and reissue if we're
426 * not in the original thread group (or in task context).
428 if (!same_thread_group(req
->task
, current
) || !in_task())
433 static void io_resubmit_prep(struct io_kiocb
*req
)
436 static bool io_rw_should_reissue(struct io_kiocb
*req
)
442 static void io_req_end_write(struct io_kiocb
*req
)
444 if (req
->flags
& REQ_F_ISREG
) {
445 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
447 kiocb_end_write(&rw
->kiocb
);
452 * Trigger the notifications after having done some IO, and finish the write
453 * accounting, if any.
455 static void io_req_io_end(struct io_kiocb
*req
)
457 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
459 if (rw
->kiocb
.ki_flags
& IOCB_WRITE
) {
460 io_req_end_write(req
);
461 fsnotify_modify(req
->file
);
463 fsnotify_access(req
->file
);
467 static bool __io_complete_rw_common(struct io_kiocb
*req
, long res
)
469 if (unlikely(res
!= req
->cqe
.res
)) {
470 if ((res
== -EAGAIN
|| res
== -EOPNOTSUPP
) &&
471 io_rw_should_reissue(req
)) {
473 * Reissue will start accounting again, finish the
477 req
->flags
|= REQ_F_REISSUE
| REQ_F_BL_NO_RECYCLE
;
486 static inline int io_fixup_rw_res(struct io_kiocb
*req
, long res
)
488 struct io_async_rw
*io
= req
->async_data
;
490 /* add previously done IO, if any */
491 if (req_has_async_data(req
) && io
->bytes_done
> 0) {
493 res
= io
->bytes_done
;
495 res
+= io
->bytes_done
;
500 void io_req_rw_complete(struct io_kiocb
*req
, struct io_tw_state
*ts
)
502 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
503 struct kiocb
*kiocb
= &rw
->kiocb
;
505 if ((kiocb
->ki_flags
& IOCB_DIO_CALLER_COMP
) && kiocb
->dio_complete
) {
506 long res
= kiocb
->dio_complete(rw
->kiocb
.private);
508 io_req_set_res(req
, io_fixup_rw_res(req
, res
), 0);
513 if (req
->flags
& (REQ_F_BUFFER_SELECTED
|REQ_F_BUFFER_RING
))
514 req
->cqe
.flags
|= io_put_kbuf(req
, 0);
516 io_req_rw_cleanup(req
, 0);
517 io_req_task_complete(req
, ts
);
520 static void io_complete_rw(struct kiocb
*kiocb
, long res
)
522 struct io_rw
*rw
= container_of(kiocb
, struct io_rw
, kiocb
);
523 struct io_kiocb
*req
= cmd_to_io_kiocb(rw
);
525 if (!kiocb
->dio_complete
|| !(kiocb
->ki_flags
& IOCB_DIO_CALLER_COMP
)) {
526 if (__io_complete_rw_common(req
, res
))
528 io_req_set_res(req
, io_fixup_rw_res(req
, res
), 0);
530 req
->io_task_work
.func
= io_req_rw_complete
;
531 __io_req_task_work_add(req
, IOU_F_TWQ_LAZY_WAKE
);
534 static void io_complete_rw_iopoll(struct kiocb
*kiocb
, long res
)
536 struct io_rw
*rw
= container_of(kiocb
, struct io_rw
, kiocb
);
537 struct io_kiocb
*req
= cmd_to_io_kiocb(rw
);
539 if (kiocb
->ki_flags
& IOCB_WRITE
)
540 io_req_end_write(req
);
541 if (unlikely(res
!= req
->cqe
.res
)) {
542 if (res
== -EAGAIN
&& io_rw_should_reissue(req
)) {
543 req
->flags
|= REQ_F_REISSUE
| REQ_F_BL_NO_RECYCLE
;
549 /* order with io_iopoll_complete() checking ->iopoll_completed */
550 smp_store_release(&req
->iopoll_completed
, 1);
553 static inline void io_rw_done(struct kiocb
*kiocb
, ssize_t ret
)
555 /* IO was queued async, completion will happen later */
556 if (ret
== -EIOCBQUEUED
)
559 /* transform internal restart error codes */
560 if (unlikely(ret
< 0)) {
563 case -ERESTARTNOINTR
:
564 case -ERESTARTNOHAND
:
565 case -ERESTART_RESTARTBLOCK
:
567 * We can't just restart the syscall, since previously
568 * submitted sqes may already be in progress. Just fail
569 * this IO with EINTR.
576 INDIRECT_CALL_2(kiocb
->ki_complete
, io_complete_rw_iopoll
,
577 io_complete_rw
, kiocb
, ret
);
580 static int kiocb_done(struct io_kiocb
*req
, ssize_t ret
,
581 unsigned int issue_flags
)
583 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
584 unsigned final_ret
= io_fixup_rw_res(req
, ret
);
586 if (ret
>= 0 && req
->flags
& REQ_F_CUR_POS
)
587 req
->file
->f_pos
= rw
->kiocb
.ki_pos
;
588 if (ret
>= 0 && (rw
->kiocb
.ki_complete
== io_complete_rw
)) {
589 if (!__io_complete_rw_common(req
, ret
)) {
591 * Safe to call io_end from here as we're inline
592 * from the submission path.
595 io_req_set_res(req
, final_ret
,
596 io_put_kbuf(req
, issue_flags
));
597 io_req_rw_cleanup(req
, issue_flags
);
601 io_rw_done(&rw
->kiocb
, ret
);
604 if (req
->flags
& REQ_F_REISSUE
) {
605 req
->flags
&= ~REQ_F_REISSUE
;
606 io_resubmit_prep(req
);
609 return IOU_ISSUE_SKIP_COMPLETE
;
612 static inline loff_t
*io_kiocb_ppos(struct kiocb
*kiocb
)
614 return (kiocb
->ki_filp
->f_mode
& FMODE_STREAM
) ? NULL
: &kiocb
->ki_pos
;
618 * For files that don't have ->read_iter() and ->write_iter(), handle them
619 * by looping over ->read() or ->write() manually.
621 static ssize_t
loop_rw_iter(int ddir
, struct io_rw
*rw
, struct iov_iter
*iter
)
623 struct kiocb
*kiocb
= &rw
->kiocb
;
624 struct file
*file
= kiocb
->ki_filp
;
629 * Don't support polled IO through this interface, and we can't
630 * support non-blocking either. For the latter, this just causes
631 * the kiocb to be handled from an async context.
633 if (kiocb
->ki_flags
& IOCB_HIPRI
)
635 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) &&
636 !(kiocb
->ki_filp
->f_flags
& O_NONBLOCK
))
639 ppos
= io_kiocb_ppos(kiocb
);
641 while (iov_iter_count(iter
)) {
646 if (iter_is_ubuf(iter
)) {
647 addr
= iter
->ubuf
+ iter
->iov_offset
;
648 len
= iov_iter_count(iter
);
649 } else if (!iov_iter_is_bvec(iter
)) {
650 addr
= iter_iov_addr(iter
);
651 len
= iter_iov_len(iter
);
653 addr
= u64_to_user_ptr(rw
->addr
);
658 nr
= file
->f_op
->read(file
, addr
, len
, ppos
);
660 nr
= file
->f_op
->write(file
, addr
, len
, ppos
);
668 if (!iov_iter_is_bvec(iter
)) {
669 iov_iter_advance(iter
, nr
);
684 * This is our waitqueue callback handler, registered through __folio_lock_async()
685 * when we initially tried to do the IO with the iocb armed our waitqueue.
686 * This gets called when the page is unlocked, and we generally expect that to
687 * happen when the page IO is completed and the page is now uptodate. This will
688 * queue a task_work based retry of the operation, attempting to copy the data
689 * again. If the latter fails because the page was NOT uptodate, then we will
690 * do a thread based blocking retry of the operation. That's the unexpected
693 static int io_async_buf_func(struct wait_queue_entry
*wait
, unsigned mode
,
696 struct wait_page_queue
*wpq
;
697 struct io_kiocb
*req
= wait
->private;
698 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
699 struct wait_page_key
*key
= arg
;
701 wpq
= container_of(wait
, struct wait_page_queue
, wait
);
703 if (!wake_page_match(wpq
, key
))
706 rw
->kiocb
.ki_flags
&= ~IOCB_WAITQ
;
707 list_del_init(&wait
->entry
);
708 io_req_task_queue(req
);
713 * This controls whether a given IO request should be armed for async page
714 * based retry. If we return false here, the request is handed to the async
715 * worker threads for retry. If we're doing buffered reads on a regular file,
716 * we prepare a private wait_page_queue entry and retry the operation. This
717 * will either succeed because the page is now uptodate and unlocked, or it
718 * will register a callback when the page is unlocked at IO completion. Through
719 * that callback, io_uring uses task_work to setup a retry of the operation.
720 * That retry will attempt the buffered read again. The retry will generally
721 * succeed, or in rare cases where it fails, we then fall back to using the
722 * async worker threads for a blocking retry.
724 static bool io_rw_should_retry(struct io_kiocb
*req
)
726 struct io_async_rw
*io
= req
->async_data
;
727 struct wait_page_queue
*wait
= &io
->wpq
;
728 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
729 struct kiocb
*kiocb
= &rw
->kiocb
;
731 /* never retry for NOWAIT, we just complete with -EAGAIN */
732 if (req
->flags
& REQ_F_NOWAIT
)
735 /* Only for buffered IO */
736 if (kiocb
->ki_flags
& (IOCB_DIRECT
| IOCB_HIPRI
))
740 * just use poll if we can, and don't attempt if the fs doesn't
741 * support callback based unlocks
743 if (io_file_can_poll(req
) ||
744 !(req
->file
->f_op
->fop_flags
& FOP_BUFFER_RASYNC
))
747 wait
->wait
.func
= io_async_buf_func
;
748 wait
->wait
.private = req
;
749 wait
->wait
.flags
= 0;
750 INIT_LIST_HEAD(&wait
->wait
.entry
);
751 kiocb
->ki_flags
|= IOCB_WAITQ
;
752 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
753 kiocb
->ki_waitq
= wait
;
757 static inline int io_iter_do_read(struct io_rw
*rw
, struct iov_iter
*iter
)
759 struct file
*file
= rw
->kiocb
.ki_filp
;
761 if (likely(file
->f_op
->read_iter
))
762 return file
->f_op
->read_iter(&rw
->kiocb
, iter
);
763 else if (file
->f_op
->read
)
764 return loop_rw_iter(READ
, rw
, iter
);
769 static bool need_complete_io(struct io_kiocb
*req
)
771 return req
->flags
& REQ_F_ISREG
||
772 S_ISBLK(file_inode(req
->file
)->i_mode
);
775 static int io_rw_init_file(struct io_kiocb
*req
, fmode_t mode
)
777 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
778 struct kiocb
*kiocb
= &rw
->kiocb
;
779 struct io_ring_ctx
*ctx
= req
->ctx
;
780 struct file
*file
= req
->file
;
783 if (unlikely(!(file
->f_mode
& mode
)))
786 if (!(req
->flags
& REQ_F_FIXED_FILE
))
787 req
->flags
|= io_file_get_flags(file
);
789 kiocb
->ki_flags
= file
->f_iocb_flags
;
790 ret
= kiocb_set_rw_flags(kiocb
, rw
->flags
);
793 kiocb
->ki_flags
|= IOCB_ALLOC_CACHE
;
796 * If the file is marked O_NONBLOCK, still allow retry for it if it
797 * supports async. Otherwise it's impossible to use O_NONBLOCK files
798 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
800 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) ||
801 ((file
->f_flags
& O_NONBLOCK
) && !io_file_supports_nowait(req
)))
802 req
->flags
|= REQ_F_NOWAIT
;
804 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
805 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) || !file
->f_op
->iopoll
)
808 kiocb
->private = NULL
;
809 kiocb
->ki_flags
|= IOCB_HIPRI
;
810 kiocb
->ki_complete
= io_complete_rw_iopoll
;
811 req
->iopoll_completed
= 0;
813 if (kiocb
->ki_flags
& IOCB_HIPRI
)
815 kiocb
->ki_complete
= io_complete_rw
;
821 static int __io_read(struct io_kiocb
*req
, unsigned int issue_flags
)
823 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
824 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
825 struct io_async_rw
*io
= req
->async_data
;
826 struct kiocb
*kiocb
= &rw
->kiocb
;
830 if (io_do_buffer_select(req
)) {
831 ret
= io_import_iovec(ITER_DEST
, req
, io
, issue_flags
);
832 if (unlikely(ret
< 0))
836 ret
= io_rw_init_file(req
, FMODE_READ
);
839 req
->cqe
.res
= iov_iter_count(&io
->iter
);
841 if (force_nonblock
) {
842 /* If the file doesn't support async, just async punt */
843 if (unlikely(!io_file_supports_nowait(req
)))
845 kiocb
->ki_flags
|= IOCB_NOWAIT
;
847 /* Ensure we clear previously set non-block flag */
848 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
851 ppos
= io_kiocb_update_pos(req
);
853 ret
= rw_verify_area(READ
, req
->file
, ppos
, req
->cqe
.res
);
857 ret
= io_iter_do_read(rw
, &io
->iter
);
859 if (ret
== -EAGAIN
|| (req
->flags
& REQ_F_REISSUE
)) {
860 req
->flags
&= ~REQ_F_REISSUE
;
861 /* If we can poll, just do that. */
862 if (io_file_can_poll(req
))
864 /* IOPOLL retry should happen for io-wq threads */
865 if (!force_nonblock
&& !(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
867 /* no retry on NONBLOCK nor RWF_NOWAIT */
868 if (req
->flags
& REQ_F_NOWAIT
)
871 } else if (ret
== -EIOCBQUEUED
) {
872 return IOU_ISSUE_SKIP_COMPLETE
;
873 } else if (ret
== req
->cqe
.res
|| ret
<= 0 || !force_nonblock
||
874 (req
->flags
& REQ_F_NOWAIT
) || !need_complete_io(req
)) {
875 /* read all, failed, already did sync or don't want to retry */
880 * Don't depend on the iter state matching what was consumed, or being
881 * untouched in case of error. Restore it and we'll advance it
882 * manually if we need to.
884 iov_iter_restore(&io
->iter
, &io
->iter_state
);
888 * We end up here because of a partial read, either from
889 * above or inside this loop. Advance the iter by the bytes
890 * that were consumed.
892 iov_iter_advance(&io
->iter
, ret
);
893 if (!iov_iter_count(&io
->iter
))
895 io
->bytes_done
+= ret
;
896 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
898 /* if we can retry, do so with the callbacks armed */
899 if (!io_rw_should_retry(req
)) {
900 kiocb
->ki_flags
&= ~IOCB_WAITQ
;
904 req
->cqe
.res
= iov_iter_count(&io
->iter
);
906 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
907 * we get -EIOCBQUEUED, then we'll get a notification when the
908 * desired page gets unlocked. We can also get a partial read
909 * here, and if we do, then just retry at the new offset.
911 ret
= io_iter_do_read(rw
, &io
->iter
);
912 if (ret
== -EIOCBQUEUED
)
913 return IOU_ISSUE_SKIP_COMPLETE
;
914 /* we got some bytes, but not all. retry. */
915 kiocb
->ki_flags
&= ~IOCB_WAITQ
;
916 iov_iter_restore(&io
->iter
, &io
->iter_state
);
919 /* it's faster to check here then delegate to kfree */
923 int io_read(struct io_kiocb
*req
, unsigned int issue_flags
)
927 ret
= __io_read(req
, issue_flags
);
929 return kiocb_done(req
, ret
, issue_flags
);
934 int io_read_mshot(struct io_kiocb
*req
, unsigned int issue_flags
)
936 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
937 unsigned int cflags
= 0;
941 * Multishot MUST be used on a pollable file
943 if (!io_file_can_poll(req
))
946 ret
= __io_read(req
, issue_flags
);
949 * If the file doesn't support proper NOWAIT, then disable multishot
950 * and stay in single shot mode.
952 if (!io_file_supports_nowait(req
))
953 req
->flags
&= ~REQ_F_APOLL_MULTISHOT
;
956 * If we get -EAGAIN, recycle our buffer and just let normal poll
959 if (ret
== -EAGAIN
) {
961 * Reset rw->len to 0 again to avoid clamping future mshot
962 * reads, in case the buffer size varies.
964 if (io_kbuf_recycle(req
, issue_flags
))
966 if (issue_flags
& IO_URING_F_MULTISHOT
)
967 return IOU_ISSUE_SKIP_COMPLETE
;
972 * Any successful return value will keep the multishot read armed.
974 if (ret
> 0 && req
->flags
& REQ_F_APOLL_MULTISHOT
) {
976 * Put our buffer and post a CQE. If we fail to post a CQE, then
977 * jump to the termination path. This request is then done.
979 cflags
= io_put_kbuf(req
, issue_flags
);
980 rw
->len
= 0; /* similarly to above, reset len to 0 */
982 if (io_req_post_cqe(req
, ret
, cflags
| IORING_CQE_F_MORE
)) {
983 if (issue_flags
& IO_URING_F_MULTISHOT
) {
985 * Force retry, as we might have more data to
986 * be read and otherwise it won't get retried
987 * until (if ever) another poll is triggered.
989 io_poll_multishot_retry(req
);
990 return IOU_ISSUE_SKIP_COMPLETE
;
997 * Either an error, or we've hit overflow posting the CQE. For any
998 * multishot request, hitting overflow will terminate it.
1000 io_req_set_res(req
, ret
, cflags
);
1001 io_req_rw_cleanup(req
, issue_flags
);
1002 if (issue_flags
& IO_URING_F_MULTISHOT
)
1003 return IOU_STOP_MULTISHOT
;
1007 int io_write(struct io_kiocb
*req
, unsigned int issue_flags
)
1009 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
1010 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
1011 struct io_async_rw
*io
= req
->async_data
;
1012 struct kiocb
*kiocb
= &rw
->kiocb
;
1016 ret
= io_rw_init_file(req
, FMODE_WRITE
);
1019 req
->cqe
.res
= iov_iter_count(&io
->iter
);
1021 if (force_nonblock
) {
1022 /* If the file doesn't support async, just async punt */
1023 if (unlikely(!io_file_supports_nowait(req
)))
1026 /* Check if we can support NOWAIT. */
1027 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) &&
1028 !(req
->file
->f_op
->fop_flags
& FOP_BUFFER_WASYNC
) &&
1029 (req
->flags
& REQ_F_ISREG
))
1032 kiocb
->ki_flags
|= IOCB_NOWAIT
;
1034 /* Ensure we clear previously set non-block flag */
1035 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
1038 ppos
= io_kiocb_update_pos(req
);
1040 ret
= rw_verify_area(WRITE
, req
->file
, ppos
, req
->cqe
.res
);
1044 if (req
->flags
& REQ_F_ISREG
)
1045 kiocb_start_write(kiocb
);
1046 kiocb
->ki_flags
|= IOCB_WRITE
;
1048 if (likely(req
->file
->f_op
->write_iter
))
1049 ret2
= req
->file
->f_op
->write_iter(kiocb
, &io
->iter
);
1050 else if (req
->file
->f_op
->write
)
1051 ret2
= loop_rw_iter(WRITE
, rw
, &io
->iter
);
1055 if (req
->flags
& REQ_F_REISSUE
) {
1056 req
->flags
&= ~REQ_F_REISSUE
;
1061 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1062 * retry them without IOCB_NOWAIT.
1064 if (ret2
== -EOPNOTSUPP
&& (kiocb
->ki_flags
& IOCB_NOWAIT
))
1066 /* no retry on NONBLOCK nor RWF_NOWAIT */
1067 if (ret2
== -EAGAIN
&& (req
->flags
& REQ_F_NOWAIT
))
1069 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
1070 /* IOPOLL retry should happen for io-wq threads */
1071 if (ret2
== -EAGAIN
&& (req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
1074 if (ret2
!= req
->cqe
.res
&& ret2
>= 0 && need_complete_io(req
)) {
1075 trace_io_uring_short_write(req
->ctx
, kiocb
->ki_pos
- ret2
,
1076 req
->cqe
.res
, ret2
);
1078 /* This is a partial write. The file pos has already been
1079 * updated, setup the async struct to complete the request
1080 * in the worker. Also update bytes_done to account for
1081 * the bytes already written.
1083 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
1084 io
->bytes_done
+= ret2
;
1086 if (kiocb
->ki_flags
& IOCB_WRITE
)
1087 io_req_end_write(req
);
1091 return kiocb_done(req
, ret2
, issue_flags
);
1094 iov_iter_restore(&io
->iter
, &io
->iter_state
);
1095 if (kiocb
->ki_flags
& IOCB_WRITE
)
1096 io_req_end_write(req
);
1101 void io_rw_fail(struct io_kiocb
*req
)
1105 res
= io_fixup_rw_res(req
, req
->cqe
.res
);
1106 io_req_set_res(req
, res
, req
->cqe
.flags
);
1109 int io_do_iopoll(struct io_ring_ctx
*ctx
, bool force_nonspin
)
1111 struct io_wq_work_node
*pos
, *start
, *prev
;
1112 unsigned int poll_flags
= 0;
1113 DEFINE_IO_COMP_BATCH(iob
);
1117 * Only spin for completions if we don't have multiple devices hanging
1118 * off our complete list.
1120 if (ctx
->poll_multi_queue
|| force_nonspin
)
1121 poll_flags
|= BLK_POLL_ONESHOT
;
1123 wq_list_for_each(pos
, start
, &ctx
->iopoll_list
) {
1124 struct io_kiocb
*req
= container_of(pos
, struct io_kiocb
, comp_list
);
1125 struct file
*file
= req
->file
;
1129 * Move completed and retryable entries to our local lists.
1130 * If we find a request that requires polling, break out
1131 * and complete those lists first, if we have entries there.
1133 if (READ_ONCE(req
->iopoll_completed
))
1136 if (req
->opcode
== IORING_OP_URING_CMD
) {
1137 struct io_uring_cmd
*ioucmd
;
1139 ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
1140 ret
= file
->f_op
->uring_cmd_iopoll(ioucmd
, &iob
,
1143 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
1145 ret
= file
->f_op
->iopoll(&rw
->kiocb
, &iob
, poll_flags
);
1147 if (unlikely(ret
< 0))
1150 poll_flags
|= BLK_POLL_ONESHOT
;
1152 /* iopoll may have completed current req */
1153 if (!rq_list_empty(iob
.req_list
) ||
1154 READ_ONCE(req
->iopoll_completed
))
1158 if (!rq_list_empty(iob
.req_list
))
1164 wq_list_for_each_resume(pos
, prev
) {
1165 struct io_kiocb
*req
= container_of(pos
, struct io_kiocb
, comp_list
);
1167 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1168 if (!smp_load_acquire(&req
->iopoll_completed
))
1171 req
->cqe
.flags
= io_put_kbuf(req
, 0);
1172 if (req
->opcode
!= IORING_OP_URING_CMD
)
1173 io_req_rw_cleanup(req
, 0);
1175 if (unlikely(!nr_events
))
1178 pos
= start
? start
->next
: ctx
->iopoll_list
.first
;
1179 wq_list_cut(&ctx
->iopoll_list
, prev
, start
);
1181 if (WARN_ON_ONCE(!wq_list_empty(&ctx
->submit_state
.compl_reqs
)))
1183 ctx
->submit_state
.compl_reqs
.first
= pos
;
1184 __io_submit_flush_completions(ctx
);
1188 void io_rw_cache_free(const void *entry
)
1190 struct io_async_rw
*rw
= (struct io_async_rw
*) entry
;
1192 if (rw
->free_iovec
) {
1193 kasan_mempool_unpoison_object(rw
->free_iovec
,
1194 rw
->free_iov_nr
* sizeof(struct iovec
));
1195 io_rw_iovec_free(rw
);