1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
15 #include <uapi/linux/io_uring.h>
24 /* NOTE: kiocb has the file as the first member, so don't do it here */
31 static inline bool io_file_supports_nowait(struct io_kiocb
*req
)
33 return req
->flags
& REQ_F_SUPPORT_NOWAIT
;
37 static int io_iov_compat_buffer_select_prep(struct io_rw
*rw
)
39 struct compat_iovec __user
*uiov
;
42 uiov
= u64_to_user_ptr(rw
->addr
);
43 if (!access_ok(uiov
, sizeof(*uiov
)))
45 if (__get_user(clen
, &uiov
->iov_len
))
55 static int io_iov_buffer_select_prep(struct io_kiocb
*req
)
57 struct iovec __user
*uiov
;
59 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
66 return io_iov_compat_buffer_select_prep(rw
);
69 uiov
= u64_to_user_ptr(rw
->addr
);
70 if (copy_from_user(&iov
, uiov
, sizeof(*uiov
)))
72 rw
->len
= iov
.iov_len
;
76 int io_prep_rw(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
78 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
82 rw
->kiocb
.ki_pos
= READ_ONCE(sqe
->off
);
83 /* used for fixed read/write too - just read unconditionally */
84 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
86 ioprio
= READ_ONCE(sqe
->ioprio
);
88 ret
= ioprio_check_cap(ioprio
);
92 rw
->kiocb
.ki_ioprio
= ioprio
;
94 rw
->kiocb
.ki_ioprio
= get_current_ioprio();
96 rw
->kiocb
.dio_complete
= NULL
;
98 rw
->addr
= READ_ONCE(sqe
->addr
);
99 rw
->len
= READ_ONCE(sqe
->len
);
100 rw
->flags
= READ_ONCE(sqe
->rw_flags
);
104 int io_prep_rwv(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
108 ret
= io_prep_rw(req
, sqe
);
113 * Have to do this validation here, as this is in io_read() rw->len
114 * might have chanaged due to buffer selection
116 if (req
->flags
& REQ_F_BUFFER_SELECT
)
117 return io_iov_buffer_select_prep(req
);
122 int io_prep_rw_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
124 struct io_ring_ctx
*ctx
= req
->ctx
;
128 ret
= io_prep_rw(req
, sqe
);
132 if (unlikely(req
->buf_index
>= ctx
->nr_user_bufs
))
134 index
= array_index_nospec(req
->buf_index
, ctx
->nr_user_bufs
);
135 req
->imu
= ctx
->user_bufs
[index
];
136 io_req_set_rsrc_node(req
, ctx
, 0);
141 * Multishot read is prepared just like a normal read/write request, only
142 * difference is that we set the MULTISHOT flag.
144 int io_read_mshot_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
146 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
149 /* must be used with provided buffers */
150 if (!(req
->flags
& REQ_F_BUFFER_SELECT
))
153 ret
= io_prep_rw(req
, sqe
);
157 if (rw
->addr
|| rw
->len
)
160 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
164 void io_readv_writev_cleanup(struct io_kiocb
*req
)
166 struct io_async_rw
*io
= req
->async_data
;
168 kfree(io
->free_iovec
);
171 static inline void io_rw_done(struct kiocb
*kiocb
, ssize_t ret
)
177 case -ERESTARTNOINTR
:
178 case -ERESTARTNOHAND
:
179 case -ERESTART_RESTARTBLOCK
:
181 * We can't just restart the syscall, since previously
182 * submitted sqes may already be in progress. Just fail this
188 kiocb
->ki_complete(kiocb
, ret
);
192 static inline loff_t
*io_kiocb_update_pos(struct io_kiocb
*req
)
194 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
196 if (rw
->kiocb
.ki_pos
!= -1)
197 return &rw
->kiocb
.ki_pos
;
199 if (!(req
->file
->f_mode
& FMODE_STREAM
)) {
200 req
->flags
|= REQ_F_CUR_POS
;
201 rw
->kiocb
.ki_pos
= req
->file
->f_pos
;
202 return &rw
->kiocb
.ki_pos
;
205 rw
->kiocb
.ki_pos
= 0;
209 static void io_req_task_queue_reissue(struct io_kiocb
*req
)
211 req
->io_task_work
.func
= io_queue_iowq
;
212 io_req_task_work_add(req
);
216 static bool io_resubmit_prep(struct io_kiocb
*req
)
218 struct io_async_rw
*io
= req
->async_data
;
220 if (!req_has_async_data(req
))
221 return !io_req_prep_async(req
);
222 iov_iter_restore(&io
->s
.iter
, &io
->s
.iter_state
);
226 static bool io_rw_should_reissue(struct io_kiocb
*req
)
228 umode_t mode
= file_inode(req
->file
)->i_mode
;
229 struct io_ring_ctx
*ctx
= req
->ctx
;
231 if (!S_ISBLK(mode
) && !S_ISREG(mode
))
233 if ((req
->flags
& REQ_F_NOWAIT
) || (io_wq_current_is_worker() &&
234 !(ctx
->flags
& IORING_SETUP_IOPOLL
)))
237 * If ref is dying, we might be running poll reap from the exit work.
238 * Don't attempt to reissue from that path, just let it fail with
241 if (percpu_ref_is_dying(&ctx
->refs
))
244 * Play it safe and assume not safe to re-import and reissue if we're
245 * not in the original thread group (or in task context).
247 if (!same_thread_group(req
->task
, current
) || !in_task())
252 static bool io_resubmit_prep(struct io_kiocb
*req
)
256 static bool io_rw_should_reissue(struct io_kiocb
*req
)
262 static void io_req_end_write(struct io_kiocb
*req
)
264 if (req
->flags
& REQ_F_ISREG
) {
265 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
267 kiocb_end_write(&rw
->kiocb
);
272 * Trigger the notifications after having done some IO, and finish the write
273 * accounting, if any.
275 static void io_req_io_end(struct io_kiocb
*req
)
277 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
279 if (rw
->kiocb
.ki_flags
& IOCB_WRITE
) {
280 io_req_end_write(req
);
281 fsnotify_modify(req
->file
);
283 fsnotify_access(req
->file
);
287 static bool __io_complete_rw_common(struct io_kiocb
*req
, long res
)
289 if (unlikely(res
!= req
->cqe
.res
)) {
290 if ((res
== -EAGAIN
|| res
== -EOPNOTSUPP
) &&
291 io_rw_should_reissue(req
)) {
293 * Reissue will start accounting again, finish the
297 req
->flags
|= REQ_F_REISSUE
| REQ_F_PARTIAL_IO
;
306 static inline int io_fixup_rw_res(struct io_kiocb
*req
, long res
)
308 struct io_async_rw
*io
= req
->async_data
;
310 /* add previously done IO, if any */
311 if (req_has_async_data(req
) && io
->bytes_done
> 0) {
313 res
= io
->bytes_done
;
315 res
+= io
->bytes_done
;
320 void io_req_rw_complete(struct io_kiocb
*req
, struct io_tw_state
*ts
)
322 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
323 struct kiocb
*kiocb
= &rw
->kiocb
;
325 if ((kiocb
->ki_flags
& IOCB_DIO_CALLER_COMP
) && kiocb
->dio_complete
) {
326 long res
= kiocb
->dio_complete(rw
->kiocb
.private);
328 io_req_set_res(req
, io_fixup_rw_res(req
, res
), 0);
333 if (req
->flags
& (REQ_F_BUFFER_SELECTED
|REQ_F_BUFFER_RING
)) {
334 unsigned issue_flags
= ts
->locked
? 0 : IO_URING_F_UNLOCKED
;
336 req
->cqe
.flags
|= io_put_kbuf(req
, issue_flags
);
338 io_req_task_complete(req
, ts
);
341 static void io_complete_rw(struct kiocb
*kiocb
, long res
)
343 struct io_rw
*rw
= container_of(kiocb
, struct io_rw
, kiocb
);
344 struct io_kiocb
*req
= cmd_to_io_kiocb(rw
);
346 if (!kiocb
->dio_complete
|| !(kiocb
->ki_flags
& IOCB_DIO_CALLER_COMP
)) {
347 if (__io_complete_rw_common(req
, res
))
349 io_req_set_res(req
, io_fixup_rw_res(req
, res
), 0);
351 req
->io_task_work
.func
= io_req_rw_complete
;
352 __io_req_task_work_add(req
, IOU_F_TWQ_LAZY_WAKE
);
355 static void io_complete_rw_iopoll(struct kiocb
*kiocb
, long res
)
357 struct io_rw
*rw
= container_of(kiocb
, struct io_rw
, kiocb
);
358 struct io_kiocb
*req
= cmd_to_io_kiocb(rw
);
360 if (kiocb
->ki_flags
& IOCB_WRITE
)
361 io_req_end_write(req
);
362 if (unlikely(res
!= req
->cqe
.res
)) {
363 if (res
== -EAGAIN
&& io_rw_should_reissue(req
)) {
364 req
->flags
|= REQ_F_REISSUE
| REQ_F_PARTIAL_IO
;
370 /* order with io_iopoll_complete() checking ->iopoll_completed */
371 smp_store_release(&req
->iopoll_completed
, 1);
374 static int kiocb_done(struct io_kiocb
*req
, ssize_t ret
,
375 unsigned int issue_flags
)
377 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
378 unsigned final_ret
= io_fixup_rw_res(req
, ret
);
380 if (ret
>= 0 && req
->flags
& REQ_F_CUR_POS
)
381 req
->file
->f_pos
= rw
->kiocb
.ki_pos
;
382 if (ret
>= 0 && (rw
->kiocb
.ki_complete
== io_complete_rw
)) {
383 if (!__io_complete_rw_common(req
, ret
)) {
385 * Safe to call io_end from here as we're inline
386 * from the submission path.
389 io_req_set_res(req
, final_ret
,
390 io_put_kbuf(req
, issue_flags
));
394 io_rw_done(&rw
->kiocb
, ret
);
397 if (req
->flags
& REQ_F_REISSUE
) {
398 req
->flags
&= ~REQ_F_REISSUE
;
399 if (io_resubmit_prep(req
))
400 io_req_task_queue_reissue(req
);
402 io_req_task_queue_fail(req
, final_ret
);
404 return IOU_ISSUE_SKIP_COMPLETE
;
407 static struct iovec
*__io_import_iovec(int ddir
, struct io_kiocb
*req
,
408 struct io_rw_state
*s
,
409 unsigned int issue_flags
)
411 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
412 struct iov_iter
*iter
= &s
->iter
;
413 u8 opcode
= req
->opcode
;
419 if (opcode
== IORING_OP_READ_FIXED
|| opcode
== IORING_OP_WRITE_FIXED
) {
420 ret
= io_import_fixed(ddir
, iter
, req
->imu
, rw
->addr
, rw
->len
);
426 buf
= u64_to_user_ptr(rw
->addr
);
429 if (!io_issue_defs
[opcode
].vectored
|| req
->flags
& REQ_F_BUFFER_SELECT
) {
430 if (io_do_buffer_select(req
)) {
431 buf
= io_buffer_select(req
, &sqe_len
, issue_flags
);
433 return ERR_PTR(-ENOBUFS
);
434 rw
->addr
= (unsigned long) buf
;
438 ret
= import_ubuf(ddir
, buf
, sqe_len
, iter
);
445 ret
= __import_iovec(ddir
, buf
, sqe_len
, UIO_FASTIOV
, &iovec
, iter
,
447 if (unlikely(ret
< 0))
452 static inline int io_import_iovec(int rw
, struct io_kiocb
*req
,
453 struct iovec
**iovec
, struct io_rw_state
*s
,
454 unsigned int issue_flags
)
456 *iovec
= __io_import_iovec(rw
, req
, s
, issue_flags
);
458 return PTR_ERR(*iovec
);
460 iov_iter_save_state(&s
->iter
, &s
->iter_state
);
464 static inline loff_t
*io_kiocb_ppos(struct kiocb
*kiocb
)
466 return (kiocb
->ki_filp
->f_mode
& FMODE_STREAM
) ? NULL
: &kiocb
->ki_pos
;
470 * For files that don't have ->read_iter() and ->write_iter(), handle them
471 * by looping over ->read() or ->write() manually.
473 static ssize_t
loop_rw_iter(int ddir
, struct io_rw
*rw
, struct iov_iter
*iter
)
475 struct kiocb
*kiocb
= &rw
->kiocb
;
476 struct file
*file
= kiocb
->ki_filp
;
481 * Don't support polled IO through this interface, and we can't
482 * support non-blocking either. For the latter, this just causes
483 * the kiocb to be handled from an async context.
485 if (kiocb
->ki_flags
& IOCB_HIPRI
)
487 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) &&
488 !(kiocb
->ki_filp
->f_flags
& O_NONBLOCK
))
491 ppos
= io_kiocb_ppos(kiocb
);
493 while (iov_iter_count(iter
)) {
498 if (iter_is_ubuf(iter
)) {
499 addr
= iter
->ubuf
+ iter
->iov_offset
;
500 len
= iov_iter_count(iter
);
501 } else if (!iov_iter_is_bvec(iter
)) {
502 addr
= iter_iov_addr(iter
);
503 len
= iter_iov_len(iter
);
505 addr
= u64_to_user_ptr(rw
->addr
);
510 nr
= file
->f_op
->read(file
, addr
, len
, ppos
);
512 nr
= file
->f_op
->write(file
, addr
, len
, ppos
);
520 if (!iov_iter_is_bvec(iter
)) {
521 iov_iter_advance(iter
, nr
);
535 static void io_req_map_rw(struct io_kiocb
*req
, const struct iovec
*iovec
,
536 const struct iovec
*fast_iov
, struct iov_iter
*iter
)
538 struct io_async_rw
*io
= req
->async_data
;
540 memcpy(&io
->s
.iter
, iter
, sizeof(*iter
));
541 io
->free_iovec
= iovec
;
543 /* can only be fixed buffers, no need to do anything */
544 if (iov_iter_is_bvec(iter
) || iter_is_ubuf(iter
))
547 unsigned iov_off
= 0;
549 io
->s
.iter
.__iov
= io
->s
.fast_iov
;
550 if (iter
->__iov
!= fast_iov
) {
551 iov_off
= iter_iov(iter
) - fast_iov
;
552 io
->s
.iter
.__iov
+= iov_off
;
554 if (io
->s
.fast_iov
!= fast_iov
)
555 memcpy(io
->s
.fast_iov
+ iov_off
, fast_iov
+ iov_off
,
556 sizeof(struct iovec
) * iter
->nr_segs
);
558 req
->flags
|= REQ_F_NEED_CLEANUP
;
562 static int io_setup_async_rw(struct io_kiocb
*req
, const struct iovec
*iovec
,
563 struct io_rw_state
*s
, bool force
)
565 if (!force
&& !io_cold_defs
[req
->opcode
].prep_async
)
567 /* opcode type doesn't need async data */
568 if (!io_cold_defs
[req
->opcode
].async_size
)
570 if (!req_has_async_data(req
)) {
571 struct io_async_rw
*iorw
;
573 if (io_alloc_async_data(req
)) {
578 io_req_map_rw(req
, iovec
, s
->fast_iov
, &s
->iter
);
579 iorw
= req
->async_data
;
580 /* we've copied and mapped the iter, ensure state is saved */
581 iov_iter_save_state(&iorw
->s
.iter
, &iorw
->s
.iter_state
);
586 static inline int io_rw_prep_async(struct io_kiocb
*req
, int rw
)
588 struct io_async_rw
*iorw
= req
->async_data
;
592 /* submission path, ->uring_lock should already be taken */
593 ret
= io_import_iovec(rw
, req
, &iov
, &iorw
->s
, 0);
594 if (unlikely(ret
< 0))
597 iorw
->bytes_done
= 0;
598 iorw
->free_iovec
= iov
;
600 req
->flags
|= REQ_F_NEED_CLEANUP
;
604 int io_readv_prep_async(struct io_kiocb
*req
)
606 return io_rw_prep_async(req
, ITER_DEST
);
609 int io_writev_prep_async(struct io_kiocb
*req
)
611 return io_rw_prep_async(req
, ITER_SOURCE
);
615 * This is our waitqueue callback handler, registered through __folio_lock_async()
616 * when we initially tried to do the IO with the iocb armed our waitqueue.
617 * This gets called when the page is unlocked, and we generally expect that to
618 * happen when the page IO is completed and the page is now uptodate. This will
619 * queue a task_work based retry of the operation, attempting to copy the data
620 * again. If the latter fails because the page was NOT uptodate, then we will
621 * do a thread based blocking retry of the operation. That's the unexpected
624 static int io_async_buf_func(struct wait_queue_entry
*wait
, unsigned mode
,
627 struct wait_page_queue
*wpq
;
628 struct io_kiocb
*req
= wait
->private;
629 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
630 struct wait_page_key
*key
= arg
;
632 wpq
= container_of(wait
, struct wait_page_queue
, wait
);
634 if (!wake_page_match(wpq
, key
))
637 rw
->kiocb
.ki_flags
&= ~IOCB_WAITQ
;
638 list_del_init(&wait
->entry
);
639 io_req_task_queue(req
);
644 * This controls whether a given IO request should be armed for async page
645 * based retry. If we return false here, the request is handed to the async
646 * worker threads for retry. If we're doing buffered reads on a regular file,
647 * we prepare a private wait_page_queue entry and retry the operation. This
648 * will either succeed because the page is now uptodate and unlocked, or it
649 * will register a callback when the page is unlocked at IO completion. Through
650 * that callback, io_uring uses task_work to setup a retry of the operation.
651 * That retry will attempt the buffered read again. The retry will generally
652 * succeed, or in rare cases where it fails, we then fall back to using the
653 * async worker threads for a blocking retry.
655 static bool io_rw_should_retry(struct io_kiocb
*req
)
657 struct io_async_rw
*io
= req
->async_data
;
658 struct wait_page_queue
*wait
= &io
->wpq
;
659 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
660 struct kiocb
*kiocb
= &rw
->kiocb
;
662 /* never retry for NOWAIT, we just complete with -EAGAIN */
663 if (req
->flags
& REQ_F_NOWAIT
)
666 /* Only for buffered IO */
667 if (kiocb
->ki_flags
& (IOCB_DIRECT
| IOCB_HIPRI
))
671 * just use poll if we can, and don't attempt if the fs doesn't
672 * support callback based unlocks
674 if (file_can_poll(req
->file
) || !(req
->file
->f_mode
& FMODE_BUF_RASYNC
))
677 wait
->wait
.func
= io_async_buf_func
;
678 wait
->wait
.private = req
;
679 wait
->wait
.flags
= 0;
680 INIT_LIST_HEAD(&wait
->wait
.entry
);
681 kiocb
->ki_flags
|= IOCB_WAITQ
;
682 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
683 kiocb
->ki_waitq
= wait
;
687 static inline int io_iter_do_read(struct io_rw
*rw
, struct iov_iter
*iter
)
689 struct file
*file
= rw
->kiocb
.ki_filp
;
691 if (likely(file
->f_op
->read_iter
))
692 return call_read_iter(file
, &rw
->kiocb
, iter
);
693 else if (file
->f_op
->read
)
694 return loop_rw_iter(READ
, rw
, iter
);
699 static bool need_complete_io(struct io_kiocb
*req
)
701 return req
->flags
& REQ_F_ISREG
||
702 S_ISBLK(file_inode(req
->file
)->i_mode
);
705 static int io_rw_init_file(struct io_kiocb
*req
, fmode_t mode
)
707 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
708 struct kiocb
*kiocb
= &rw
->kiocb
;
709 struct io_ring_ctx
*ctx
= req
->ctx
;
710 struct file
*file
= req
->file
;
713 if (unlikely(!file
|| !(file
->f_mode
& mode
)))
716 if (!(req
->flags
& REQ_F_FIXED_FILE
))
717 req
->flags
|= io_file_get_flags(file
);
719 kiocb
->ki_flags
= file
->f_iocb_flags
;
720 ret
= kiocb_set_rw_flags(kiocb
, rw
->flags
);
723 kiocb
->ki_flags
|= IOCB_ALLOC_CACHE
;
726 * If the file is marked O_NONBLOCK, still allow retry for it if it
727 * supports async. Otherwise it's impossible to use O_NONBLOCK files
728 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
730 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) ||
731 ((file
->f_flags
& O_NONBLOCK
) && !io_file_supports_nowait(req
)))
732 req
->flags
|= REQ_F_NOWAIT
;
734 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
735 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) || !file
->f_op
->iopoll
)
738 kiocb
->private = NULL
;
739 kiocb
->ki_flags
|= IOCB_HIPRI
;
740 kiocb
->ki_complete
= io_complete_rw_iopoll
;
741 req
->iopoll_completed
= 0;
743 if (kiocb
->ki_flags
& IOCB_HIPRI
)
745 kiocb
->ki_complete
= io_complete_rw
;
751 static int __io_read(struct io_kiocb
*req
, unsigned int issue_flags
)
753 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
754 struct io_rw_state __s
, *s
= &__s
;
756 struct kiocb
*kiocb
= &rw
->kiocb
;
757 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
758 struct io_async_rw
*io
;
762 if (!req_has_async_data(req
)) {
763 ret
= io_import_iovec(ITER_DEST
, req
, &iovec
, s
, issue_flags
);
764 if (unlikely(ret
< 0))
767 io
= req
->async_data
;
771 * Safe and required to re-import if we're using provided
772 * buffers, as we dropped the selected one before retry.
774 if (io_do_buffer_select(req
)) {
775 ret
= io_import_iovec(ITER_DEST
, req
, &iovec
, s
, issue_flags
);
776 if (unlikely(ret
< 0))
781 * We come here from an earlier attempt, restore our state to
782 * match in case it doesn't. It's cheap enough that we don't
783 * need to make this conditional.
785 iov_iter_restore(&s
->iter
, &s
->iter_state
);
788 ret
= io_rw_init_file(req
, FMODE_READ
);
793 req
->cqe
.res
= iov_iter_count(&s
->iter
);
795 if (force_nonblock
) {
796 /* If the file doesn't support async, just async punt */
797 if (unlikely(!io_file_supports_nowait(req
))) {
798 ret
= io_setup_async_rw(req
, iovec
, s
, true);
799 return ret
?: -EAGAIN
;
801 kiocb
->ki_flags
|= IOCB_NOWAIT
;
803 /* Ensure we clear previously set non-block flag */
804 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
807 ppos
= io_kiocb_update_pos(req
);
809 ret
= rw_verify_area(READ
, req
->file
, ppos
, req
->cqe
.res
);
815 ret
= io_iter_do_read(rw
, &s
->iter
);
817 if (ret
== -EAGAIN
|| (req
->flags
& REQ_F_REISSUE
)) {
818 req
->flags
&= ~REQ_F_REISSUE
;
820 * If we can poll, just do that. For a vectored read, we'll
821 * need to copy state first.
823 if (file_can_poll(req
->file
) && !io_issue_defs
[req
->opcode
].vectored
)
825 /* IOPOLL retry should happen for io-wq threads */
826 if (!force_nonblock
&& !(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
828 /* no retry on NONBLOCK nor RWF_NOWAIT */
829 if (req
->flags
& REQ_F_NOWAIT
)
832 } else if (ret
== -EIOCBQUEUED
) {
835 return IOU_ISSUE_SKIP_COMPLETE
;
836 } else if (ret
== req
->cqe
.res
|| ret
<= 0 || !force_nonblock
||
837 (req
->flags
& REQ_F_NOWAIT
) || !need_complete_io(req
)) {
838 /* read all, failed, already did sync or don't want to retry */
843 * Don't depend on the iter state matching what was consumed, or being
844 * untouched in case of error. Restore it and we'll advance it
845 * manually if we need to.
847 iov_iter_restore(&s
->iter
, &s
->iter_state
);
849 ret2
= io_setup_async_rw(req
, iovec
, s
, true);
852 ret
= ret
> 0 ? ret
: ret2
;
856 io
= req
->async_data
;
859 * Now use our persistent iterator and state, if we aren't already.
860 * We've restored and mapped the iter to match.
865 * We end up here because of a partial read, either from
866 * above or inside this loop. Advance the iter by the bytes
867 * that were consumed.
869 iov_iter_advance(&s
->iter
, ret
);
870 if (!iov_iter_count(&s
->iter
))
872 io
->bytes_done
+= ret
;
873 iov_iter_save_state(&s
->iter
, &s
->iter_state
);
875 /* if we can retry, do so with the callbacks armed */
876 if (!io_rw_should_retry(req
)) {
877 kiocb
->ki_flags
&= ~IOCB_WAITQ
;
881 req
->cqe
.res
= iov_iter_count(&s
->iter
);
883 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
884 * we get -EIOCBQUEUED, then we'll get a notification when the
885 * desired page gets unlocked. We can also get a partial read
886 * here, and if we do, then just retry at the new offset.
888 ret
= io_iter_do_read(rw
, &s
->iter
);
889 if (ret
== -EIOCBQUEUED
)
890 return IOU_ISSUE_SKIP_COMPLETE
;
891 /* we got some bytes, but not all. retry. */
892 kiocb
->ki_flags
&= ~IOCB_WAITQ
;
893 iov_iter_restore(&s
->iter
, &s
->iter_state
);
896 /* it's faster to check here then delegate to kfree */
902 int io_read(struct io_kiocb
*req
, unsigned int issue_flags
)
906 ret
= __io_read(req
, issue_flags
);
908 return kiocb_done(req
, ret
, issue_flags
);
913 int io_read_mshot(struct io_kiocb
*req
, unsigned int issue_flags
)
915 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
916 unsigned int cflags
= 0;
920 * Multishot MUST be used on a pollable file
922 if (!file_can_poll(req
->file
))
925 ret
= __io_read(req
, issue_flags
);
928 * If we get -EAGAIN, recycle our buffer and just let normal poll
931 if (ret
== -EAGAIN
) {
933 * Reset rw->len to 0 again to avoid clamping future mshot
934 * reads, in case the buffer size varies.
936 if (io_kbuf_recycle(req
, issue_flags
))
942 * Any successful return value will keep the multishot read armed.
946 * Put our buffer and post a CQE. If we fail to post a CQE, then
947 * jump to the termination path. This request is then done.
949 cflags
= io_put_kbuf(req
, issue_flags
);
950 rw
->len
= 0; /* similarly to above, reset len to 0 */
952 if (io_fill_cqe_req_aux(req
,
953 issue_flags
& IO_URING_F_COMPLETE_DEFER
,
954 ret
, cflags
| IORING_CQE_F_MORE
)) {
955 if (issue_flags
& IO_URING_F_MULTISHOT
)
956 return IOU_ISSUE_SKIP_COMPLETE
;
962 * Either an error, or we've hit overflow posting the CQE. For any
963 * multishot request, hitting overflow will terminate it.
965 io_req_set_res(req
, ret
, cflags
);
966 if (issue_flags
& IO_URING_F_MULTISHOT
)
967 return IOU_STOP_MULTISHOT
;
971 int io_write(struct io_kiocb
*req
, unsigned int issue_flags
)
973 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
974 struct io_rw_state __s
, *s
= &__s
;
976 struct kiocb
*kiocb
= &rw
->kiocb
;
977 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
981 if (!req_has_async_data(req
)) {
982 ret
= io_import_iovec(ITER_SOURCE
, req
, &iovec
, s
, issue_flags
);
983 if (unlikely(ret
< 0))
986 struct io_async_rw
*io
= req
->async_data
;
989 iov_iter_restore(&s
->iter
, &s
->iter_state
);
992 ret
= io_rw_init_file(req
, FMODE_WRITE
);
997 req
->cqe
.res
= iov_iter_count(&s
->iter
);
999 if (force_nonblock
) {
1000 /* If the file doesn't support async, just async punt */
1001 if (unlikely(!io_file_supports_nowait(req
)))
1004 /* File path supports NOWAIT for non-direct_IO only for block devices. */
1005 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) &&
1006 !(kiocb
->ki_filp
->f_mode
& FMODE_BUF_WASYNC
) &&
1007 (req
->flags
& REQ_F_ISREG
))
1010 kiocb
->ki_flags
|= IOCB_NOWAIT
;
1012 /* Ensure we clear previously set non-block flag */
1013 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
1016 ppos
= io_kiocb_update_pos(req
);
1018 ret
= rw_verify_area(WRITE
, req
->file
, ppos
, req
->cqe
.res
);
1019 if (unlikely(ret
)) {
1024 if (req
->flags
& REQ_F_ISREG
)
1025 kiocb_start_write(kiocb
);
1026 kiocb
->ki_flags
|= IOCB_WRITE
;
1028 if (likely(req
->file
->f_op
->write_iter
))
1029 ret2
= call_write_iter(req
->file
, kiocb
, &s
->iter
);
1030 else if (req
->file
->f_op
->write
)
1031 ret2
= loop_rw_iter(WRITE
, rw
, &s
->iter
);
1035 if (req
->flags
& REQ_F_REISSUE
) {
1036 req
->flags
&= ~REQ_F_REISSUE
;
1041 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1042 * retry them without IOCB_NOWAIT.
1044 if (ret2
== -EOPNOTSUPP
&& (kiocb
->ki_flags
& IOCB_NOWAIT
))
1046 /* no retry on NONBLOCK nor RWF_NOWAIT */
1047 if (ret2
== -EAGAIN
&& (req
->flags
& REQ_F_NOWAIT
))
1049 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
1050 /* IOPOLL retry should happen for io-wq threads */
1051 if (ret2
== -EAGAIN
&& (req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
1054 if (ret2
!= req
->cqe
.res
&& ret2
>= 0 && need_complete_io(req
)) {
1055 struct io_async_rw
*io
;
1057 trace_io_uring_short_write(req
->ctx
, kiocb
->ki_pos
- ret2
,
1058 req
->cqe
.res
, ret2
);
1060 /* This is a partial write. The file pos has already been
1061 * updated, setup the async struct to complete the request
1062 * in the worker. Also update bytes_done to account for
1063 * the bytes already written.
1065 iov_iter_save_state(&s
->iter
, &s
->iter_state
);
1066 ret
= io_setup_async_rw(req
, iovec
, s
, true);
1068 io
= req
->async_data
;
1070 io
->bytes_done
+= ret2
;
1072 if (kiocb
->ki_flags
& IOCB_WRITE
)
1073 io_req_end_write(req
);
1074 return ret
? ret
: -EAGAIN
;
1077 ret
= kiocb_done(req
, ret2
, issue_flags
);
1080 iov_iter_restore(&s
->iter
, &s
->iter_state
);
1081 ret
= io_setup_async_rw(req
, iovec
, s
, false);
1083 if (kiocb
->ki_flags
& IOCB_WRITE
)
1084 io_req_end_write(req
);
1089 /* it's reportedly faster than delegating the null check to kfree() */
1095 void io_rw_fail(struct io_kiocb
*req
)
1099 res
= io_fixup_rw_res(req
, req
->cqe
.res
);
1100 io_req_set_res(req
, res
, req
->cqe
.flags
);
1103 int io_do_iopoll(struct io_ring_ctx
*ctx
, bool force_nonspin
)
1105 struct io_wq_work_node
*pos
, *start
, *prev
;
1106 unsigned int poll_flags
= 0;
1107 DEFINE_IO_COMP_BATCH(iob
);
1111 * Only spin for completions if we don't have multiple devices hanging
1112 * off our complete list.
1114 if (ctx
->poll_multi_queue
|| force_nonspin
)
1115 poll_flags
|= BLK_POLL_ONESHOT
;
1117 wq_list_for_each(pos
, start
, &ctx
->iopoll_list
) {
1118 struct io_kiocb
*req
= container_of(pos
, struct io_kiocb
, comp_list
);
1119 struct file
*file
= req
->file
;
1123 * Move completed and retryable entries to our local lists.
1124 * If we find a request that requires polling, break out
1125 * and complete those lists first, if we have entries there.
1127 if (READ_ONCE(req
->iopoll_completed
))
1130 if (req
->opcode
== IORING_OP_URING_CMD
) {
1131 struct io_uring_cmd
*ioucmd
;
1133 ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
1134 ret
= file
->f_op
->uring_cmd_iopoll(ioucmd
, &iob
,
1137 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
1139 ret
= file
->f_op
->iopoll(&rw
->kiocb
, &iob
, poll_flags
);
1141 if (unlikely(ret
< 0))
1144 poll_flags
|= BLK_POLL_ONESHOT
;
1146 /* iopoll may have completed current req */
1147 if (!rq_list_empty(iob
.req_list
) ||
1148 READ_ONCE(req
->iopoll_completed
))
1152 if (!rq_list_empty(iob
.req_list
))
1158 wq_list_for_each_resume(pos
, prev
) {
1159 struct io_kiocb
*req
= container_of(pos
, struct io_kiocb
, comp_list
);
1161 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1162 if (!smp_load_acquire(&req
->iopoll_completed
))
1165 req
->cqe
.flags
= io_put_kbuf(req
, 0);
1167 if (unlikely(!nr_events
))
1170 pos
= start
? start
->next
: ctx
->iopoll_list
.first
;
1171 wq_list_cut(&ctx
->iopoll_list
, prev
, start
);
1173 if (WARN_ON_ONCE(!wq_list_empty(&ctx
->submit_state
.compl_reqs
)))
1175 ctx
->submit_state
.compl_reqs
.first
= pos
;
1176 __io_submit_flush_completions(ctx
);