]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - io_uring/rw.c
Merge tag 'pull-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[thirdparty/kernel/linux.git] / io_uring / rw.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
14 #include <linux/indirect_call_wrapper.h>
15
16 #include <uapi/linux/io_uring.h>
17
18 #include "io_uring.h"
19 #include "opdef.h"
20 #include "kbuf.h"
21 #include "alloc_cache.h"
22 #include "rsrc.h"
23 #include "poll.h"
24 #include "rw.h"
25
26 struct io_rw {
27 /* NOTE: kiocb has the file as the first member, so don't do it here */
28 struct kiocb kiocb;
29 u64 addr;
30 u32 len;
31 rwf_t flags;
32 };
33
34 static inline bool io_file_supports_nowait(struct io_kiocb *req)
35 {
36 return req->flags & REQ_F_SUPPORT_NOWAIT;
37 }
38
39 #ifdef CONFIG_COMPAT
40 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
41 {
42 struct compat_iovec __user *uiov;
43 compat_ssize_t clen;
44
45 uiov = u64_to_user_ptr(rw->addr);
46 if (!access_ok(uiov, sizeof(*uiov)))
47 return -EFAULT;
48 if (__get_user(clen, &uiov->iov_len))
49 return -EFAULT;
50 if (clen < 0)
51 return -EINVAL;
52
53 rw->len = clen;
54 return 0;
55 }
56 #endif
57
58 static int io_iov_buffer_select_prep(struct io_kiocb *req)
59 {
60 struct iovec __user *uiov;
61 struct iovec iov;
62 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
63
64 if (rw->len != 1)
65 return -EINVAL;
66
67 #ifdef CONFIG_COMPAT
68 if (req->ctx->compat)
69 return io_iov_compat_buffer_select_prep(rw);
70 #endif
71
72 uiov = u64_to_user_ptr(rw->addr);
73 if (copy_from_user(&iov, uiov, sizeof(*uiov)))
74 return -EFAULT;
75 rw->len = iov.iov_len;
76 return 0;
77 }
78
79 static int __io_import_iovec(int ddir, struct io_kiocb *req,
80 struct io_async_rw *io,
81 unsigned int issue_flags)
82 {
83 const struct io_issue_def *def = &io_issue_defs[req->opcode];
84 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
85 struct iovec *iov;
86 void __user *buf;
87 int nr_segs, ret;
88 size_t sqe_len;
89
90 buf = u64_to_user_ptr(rw->addr);
91 sqe_len = rw->len;
92
93 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) {
94 if (io_do_buffer_select(req)) {
95 buf = io_buffer_select(req, &sqe_len, issue_flags);
96 if (!buf)
97 return -ENOBUFS;
98 rw->addr = (unsigned long) buf;
99 rw->len = sqe_len;
100 }
101
102 return import_ubuf(ddir, buf, sqe_len, &io->iter);
103 }
104
105 if (io->free_iovec) {
106 nr_segs = io->free_iov_nr;
107 iov = io->free_iovec;
108 } else {
109 iov = &io->fast_iov;
110 nr_segs = 1;
111 }
112 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter,
113 req->ctx->compat);
114 if (unlikely(ret < 0))
115 return ret;
116 if (iov) {
117 req->flags |= REQ_F_NEED_CLEANUP;
118 io->free_iov_nr = io->iter.nr_segs;
119 kfree(io->free_iovec);
120 io->free_iovec = iov;
121 }
122 return 0;
123 }
124
125 static inline int io_import_iovec(int rw, struct io_kiocb *req,
126 struct io_async_rw *io,
127 unsigned int issue_flags)
128 {
129 int ret;
130
131 ret = __io_import_iovec(rw, req, io, issue_flags);
132 if (unlikely(ret < 0))
133 return ret;
134
135 iov_iter_save_state(&io->iter, &io->iter_state);
136 return 0;
137 }
138
139 static void io_rw_iovec_free(struct io_async_rw *rw)
140 {
141 if (rw->free_iovec) {
142 kfree(rw->free_iovec);
143 rw->free_iov_nr = 0;
144 rw->free_iovec = NULL;
145 }
146 }
147
148 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
149 {
150 struct io_async_rw *rw = req->async_data;
151 struct iovec *iov;
152
153 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
154 io_rw_iovec_free(rw);
155 return;
156 }
157 iov = rw->free_iovec;
158 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
159 if (iov)
160 kasan_mempool_poison_object(iov);
161 req->async_data = NULL;
162 req->flags &= ~REQ_F_ASYNC_DATA;
163 }
164 }
165
166 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
167 {
168 /*
169 * Disable quick recycling for anything that's gone through io-wq.
170 * In theory, this should be fine to cleanup. However, some read or
171 * write iter handling touches the iovec AFTER having called into the
172 * handler, eg to reexpand or revert. This means we can have:
173 *
174 * task io-wq
175 * issue
176 * punt to io-wq
177 * issue
178 * blkdev_write_iter()
179 * ->ki_complete()
180 * io_complete_rw()
181 * queue tw complete
182 * run tw
183 * req_rw_cleanup
184 * iov_iter_count() <- look at iov_iter again
185 *
186 * which can lead to a UAF. This is only possible for io-wq offload
187 * as the cleanup can run in parallel. As io-wq is not the fast path,
188 * just leave cleanup to the end.
189 *
190 * This is really a bug in the core code that does this, any issue
191 * path should assume that a successful (or -EIOCBQUEUED) return can
192 * mean that the underlying data can be gone at any time. But that
193 * should be fixed seperately, and then this check could be killed.
194 */
195 if (!(req->flags & REQ_F_REFCOUNT)) {
196 req->flags &= ~REQ_F_NEED_CLEANUP;
197 io_rw_recycle(req, issue_flags);
198 }
199 }
200
201 static int io_rw_alloc_async(struct io_kiocb *req)
202 {
203 struct io_ring_ctx *ctx = req->ctx;
204 struct io_async_rw *rw;
205
206 rw = io_alloc_cache_get(&ctx->rw_cache);
207 if (rw) {
208 if (rw->free_iovec) {
209 kasan_mempool_unpoison_object(rw->free_iovec,
210 rw->free_iov_nr * sizeof(struct iovec));
211 req->flags |= REQ_F_NEED_CLEANUP;
212 }
213 req->flags |= REQ_F_ASYNC_DATA;
214 req->async_data = rw;
215 goto done;
216 }
217
218 if (!io_alloc_async_data(req)) {
219 rw = req->async_data;
220 rw->free_iovec = NULL;
221 rw->free_iov_nr = 0;
222 done:
223 rw->bytes_done = 0;
224 return 0;
225 }
226
227 return -ENOMEM;
228 }
229
230 static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
231 {
232 struct io_async_rw *rw;
233 int ret;
234
235 if (io_rw_alloc_async(req))
236 return -ENOMEM;
237
238 if (!do_import || io_do_buffer_select(req))
239 return 0;
240
241 rw = req->async_data;
242 ret = io_import_iovec(ddir, req, rw, 0);
243 if (unlikely(ret < 0))
244 return ret;
245
246 iov_iter_save_state(&rw->iter, &rw->iter_state);
247 return 0;
248 }
249
250 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
251 int ddir, bool do_import)
252 {
253 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
254 unsigned ioprio;
255 int ret;
256
257 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
258 /* used for fixed read/write too - just read unconditionally */
259 req->buf_index = READ_ONCE(sqe->buf_index);
260
261 ioprio = READ_ONCE(sqe->ioprio);
262 if (ioprio) {
263 ret = ioprio_check_cap(ioprio);
264 if (ret)
265 return ret;
266
267 rw->kiocb.ki_ioprio = ioprio;
268 } else {
269 rw->kiocb.ki_ioprio = get_current_ioprio();
270 }
271 rw->kiocb.dio_complete = NULL;
272
273 rw->addr = READ_ONCE(sqe->addr);
274 rw->len = READ_ONCE(sqe->len);
275 rw->flags = READ_ONCE(sqe->rw_flags);
276 return io_prep_rw_setup(req, ddir, do_import);
277 }
278
279 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
280 {
281 return io_prep_rw(req, sqe, ITER_DEST, true);
282 }
283
284 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
285 {
286 return io_prep_rw(req, sqe, ITER_SOURCE, true);
287 }
288
289 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
290 int ddir)
291 {
292 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT);
293 int ret;
294
295 ret = io_prep_rw(req, sqe, ddir, do_import);
296 if (unlikely(ret))
297 return ret;
298 if (do_import)
299 return 0;
300
301 /*
302 * Have to do this validation here, as this is in io_read() rw->len
303 * might have chanaged due to buffer selection
304 */
305 return io_iov_buffer_select_prep(req);
306 }
307
308 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
309 {
310 return io_prep_rwv(req, sqe, ITER_DEST);
311 }
312
313 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
314 {
315 return io_prep_rwv(req, sqe, ITER_SOURCE);
316 }
317
318 static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
319 int ddir)
320 {
321 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
322 struct io_ring_ctx *ctx = req->ctx;
323 struct io_async_rw *io;
324 u16 index;
325 int ret;
326
327 ret = io_prep_rw(req, sqe, ddir, false);
328 if (unlikely(ret))
329 return ret;
330
331 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
332 return -EFAULT;
333 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
334 req->imu = ctx->user_bufs[index];
335 io_req_set_rsrc_node(req, ctx, 0);
336
337 io = req->async_data;
338 ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len);
339 iov_iter_save_state(&io->iter, &io->iter_state);
340 return ret;
341 }
342
343 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
344 {
345 return io_prep_rw_fixed(req, sqe, ITER_DEST);
346 }
347
348 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
349 {
350 return io_prep_rw_fixed(req, sqe, ITER_SOURCE);
351 }
352
353 /*
354 * Multishot read is prepared just like a normal read/write request, only
355 * difference is that we set the MULTISHOT flag.
356 */
357 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
358 {
359 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
360 int ret;
361
362 /* must be used with provided buffers */
363 if (!(req->flags & REQ_F_BUFFER_SELECT))
364 return -EINVAL;
365
366 ret = io_prep_rw(req, sqe, ITER_DEST, false);
367 if (unlikely(ret))
368 return ret;
369
370 if (rw->addr || rw->len)
371 return -EINVAL;
372
373 req->flags |= REQ_F_APOLL_MULTISHOT;
374 return 0;
375 }
376
377 void io_readv_writev_cleanup(struct io_kiocb *req)
378 {
379 io_rw_iovec_free(req->async_data);
380 }
381
382 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
383 {
384 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
385
386 if (rw->kiocb.ki_pos != -1)
387 return &rw->kiocb.ki_pos;
388
389 if (!(req->file->f_mode & FMODE_STREAM)) {
390 req->flags |= REQ_F_CUR_POS;
391 rw->kiocb.ki_pos = req->file->f_pos;
392 return &rw->kiocb.ki_pos;
393 }
394
395 rw->kiocb.ki_pos = 0;
396 return NULL;
397 }
398
399 #ifdef CONFIG_BLOCK
400 static void io_resubmit_prep(struct io_kiocb *req)
401 {
402 struct io_async_rw *io = req->async_data;
403
404 iov_iter_restore(&io->iter, &io->iter_state);
405 }
406
407 static bool io_rw_should_reissue(struct io_kiocb *req)
408 {
409 umode_t mode = file_inode(req->file)->i_mode;
410 struct io_ring_ctx *ctx = req->ctx;
411
412 if (!S_ISBLK(mode) && !S_ISREG(mode))
413 return false;
414 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
415 !(ctx->flags & IORING_SETUP_IOPOLL)))
416 return false;
417 /*
418 * If ref is dying, we might be running poll reap from the exit work.
419 * Don't attempt to reissue from that path, just let it fail with
420 * -EAGAIN.
421 */
422 if (percpu_ref_is_dying(&ctx->refs))
423 return false;
424 /*
425 * Play it safe and assume not safe to re-import and reissue if we're
426 * not in the original thread group (or in task context).
427 */
428 if (!same_thread_group(req->task, current) || !in_task())
429 return false;
430 return true;
431 }
432 #else
433 static void io_resubmit_prep(struct io_kiocb *req)
434 {
435 }
436 static bool io_rw_should_reissue(struct io_kiocb *req)
437 {
438 return false;
439 }
440 #endif
441
442 static void io_req_end_write(struct io_kiocb *req)
443 {
444 if (req->flags & REQ_F_ISREG) {
445 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
446
447 kiocb_end_write(&rw->kiocb);
448 }
449 }
450
451 /*
452 * Trigger the notifications after having done some IO, and finish the write
453 * accounting, if any.
454 */
455 static void io_req_io_end(struct io_kiocb *req)
456 {
457 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
458
459 if (rw->kiocb.ki_flags & IOCB_WRITE) {
460 io_req_end_write(req);
461 fsnotify_modify(req->file);
462 } else {
463 fsnotify_access(req->file);
464 }
465 }
466
467 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
468 {
469 if (unlikely(res != req->cqe.res)) {
470 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
471 io_rw_should_reissue(req)) {
472 /*
473 * Reissue will start accounting again, finish the
474 * current cycle.
475 */
476 io_req_io_end(req);
477 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
478 return true;
479 }
480 req_set_fail(req);
481 req->cqe.res = res;
482 }
483 return false;
484 }
485
486 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
487 {
488 struct io_async_rw *io = req->async_data;
489
490 /* add previously done IO, if any */
491 if (req_has_async_data(req) && io->bytes_done > 0) {
492 if (res < 0)
493 res = io->bytes_done;
494 else
495 res += io->bytes_done;
496 }
497 return res;
498 }
499
500 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
501 {
502 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
503 struct kiocb *kiocb = &rw->kiocb;
504
505 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
506 long res = kiocb->dio_complete(rw->kiocb.private);
507
508 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
509 }
510
511 io_req_io_end(req);
512
513 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
514 req->cqe.flags |= io_put_kbuf(req, 0);
515
516 io_req_rw_cleanup(req, 0);
517 io_req_task_complete(req, ts);
518 }
519
520 static void io_complete_rw(struct kiocb *kiocb, long res)
521 {
522 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
523 struct io_kiocb *req = cmd_to_io_kiocb(rw);
524
525 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
526 if (__io_complete_rw_common(req, res))
527 return;
528 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
529 }
530 req->io_task_work.func = io_req_rw_complete;
531 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
532 }
533
534 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
535 {
536 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
537 struct io_kiocb *req = cmd_to_io_kiocb(rw);
538
539 if (kiocb->ki_flags & IOCB_WRITE)
540 io_req_end_write(req);
541 if (unlikely(res != req->cqe.res)) {
542 if (res == -EAGAIN && io_rw_should_reissue(req)) {
543 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
544 return;
545 }
546 req->cqe.res = res;
547 }
548
549 /* order with io_iopoll_complete() checking ->iopoll_completed */
550 smp_store_release(&req->iopoll_completed, 1);
551 }
552
553 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
554 {
555 /* IO was queued async, completion will happen later */
556 if (ret == -EIOCBQUEUED)
557 return;
558
559 /* transform internal restart error codes */
560 if (unlikely(ret < 0)) {
561 switch (ret) {
562 case -ERESTARTSYS:
563 case -ERESTARTNOINTR:
564 case -ERESTARTNOHAND:
565 case -ERESTART_RESTARTBLOCK:
566 /*
567 * We can't just restart the syscall, since previously
568 * submitted sqes may already be in progress. Just fail
569 * this IO with EINTR.
570 */
571 ret = -EINTR;
572 break;
573 }
574 }
575
576 INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
577 io_complete_rw, kiocb, ret);
578 }
579
580 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
581 unsigned int issue_flags)
582 {
583 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
584 unsigned final_ret = io_fixup_rw_res(req, ret);
585
586 if (ret >= 0 && req->flags & REQ_F_CUR_POS)
587 req->file->f_pos = rw->kiocb.ki_pos;
588 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
589 if (!__io_complete_rw_common(req, ret)) {
590 /*
591 * Safe to call io_end from here as we're inline
592 * from the submission path.
593 */
594 io_req_io_end(req);
595 io_req_set_res(req, final_ret,
596 io_put_kbuf(req, issue_flags));
597 io_req_rw_cleanup(req, issue_flags);
598 return IOU_OK;
599 }
600 } else {
601 io_rw_done(&rw->kiocb, ret);
602 }
603
604 if (req->flags & REQ_F_REISSUE) {
605 req->flags &= ~REQ_F_REISSUE;
606 io_resubmit_prep(req);
607 return -EAGAIN;
608 }
609 return IOU_ISSUE_SKIP_COMPLETE;
610 }
611
612 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
613 {
614 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
615 }
616
617 /*
618 * For files that don't have ->read_iter() and ->write_iter(), handle them
619 * by looping over ->read() or ->write() manually.
620 */
621 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
622 {
623 struct kiocb *kiocb = &rw->kiocb;
624 struct file *file = kiocb->ki_filp;
625 ssize_t ret = 0;
626 loff_t *ppos;
627
628 /*
629 * Don't support polled IO through this interface, and we can't
630 * support non-blocking either. For the latter, this just causes
631 * the kiocb to be handled from an async context.
632 */
633 if (kiocb->ki_flags & IOCB_HIPRI)
634 return -EOPNOTSUPP;
635 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
636 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
637 return -EAGAIN;
638
639 ppos = io_kiocb_ppos(kiocb);
640
641 while (iov_iter_count(iter)) {
642 void __user *addr;
643 size_t len;
644 ssize_t nr;
645
646 if (iter_is_ubuf(iter)) {
647 addr = iter->ubuf + iter->iov_offset;
648 len = iov_iter_count(iter);
649 } else if (!iov_iter_is_bvec(iter)) {
650 addr = iter_iov_addr(iter);
651 len = iter_iov_len(iter);
652 } else {
653 addr = u64_to_user_ptr(rw->addr);
654 len = rw->len;
655 }
656
657 if (ddir == READ)
658 nr = file->f_op->read(file, addr, len, ppos);
659 else
660 nr = file->f_op->write(file, addr, len, ppos);
661
662 if (nr < 0) {
663 if (!ret)
664 ret = nr;
665 break;
666 }
667 ret += nr;
668 if (!iov_iter_is_bvec(iter)) {
669 iov_iter_advance(iter, nr);
670 } else {
671 rw->addr += nr;
672 rw->len -= nr;
673 if (!rw->len)
674 break;
675 }
676 if (nr != len)
677 break;
678 }
679
680 return ret;
681 }
682
683 /*
684 * This is our waitqueue callback handler, registered through __folio_lock_async()
685 * when we initially tried to do the IO with the iocb armed our waitqueue.
686 * This gets called when the page is unlocked, and we generally expect that to
687 * happen when the page IO is completed and the page is now uptodate. This will
688 * queue a task_work based retry of the operation, attempting to copy the data
689 * again. If the latter fails because the page was NOT uptodate, then we will
690 * do a thread based blocking retry of the operation. That's the unexpected
691 * slow path.
692 */
693 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
694 int sync, void *arg)
695 {
696 struct wait_page_queue *wpq;
697 struct io_kiocb *req = wait->private;
698 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
699 struct wait_page_key *key = arg;
700
701 wpq = container_of(wait, struct wait_page_queue, wait);
702
703 if (!wake_page_match(wpq, key))
704 return 0;
705
706 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
707 list_del_init(&wait->entry);
708 io_req_task_queue(req);
709 return 1;
710 }
711
712 /*
713 * This controls whether a given IO request should be armed for async page
714 * based retry. If we return false here, the request is handed to the async
715 * worker threads for retry. If we're doing buffered reads on a regular file,
716 * we prepare a private wait_page_queue entry and retry the operation. This
717 * will either succeed because the page is now uptodate and unlocked, or it
718 * will register a callback when the page is unlocked at IO completion. Through
719 * that callback, io_uring uses task_work to setup a retry of the operation.
720 * That retry will attempt the buffered read again. The retry will generally
721 * succeed, or in rare cases where it fails, we then fall back to using the
722 * async worker threads for a blocking retry.
723 */
724 static bool io_rw_should_retry(struct io_kiocb *req)
725 {
726 struct io_async_rw *io = req->async_data;
727 struct wait_page_queue *wait = &io->wpq;
728 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
729 struct kiocb *kiocb = &rw->kiocb;
730
731 /* never retry for NOWAIT, we just complete with -EAGAIN */
732 if (req->flags & REQ_F_NOWAIT)
733 return false;
734
735 /* Only for buffered IO */
736 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
737 return false;
738
739 /*
740 * just use poll if we can, and don't attempt if the fs doesn't
741 * support callback based unlocks
742 */
743 if (io_file_can_poll(req) ||
744 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC))
745 return false;
746
747 wait->wait.func = io_async_buf_func;
748 wait->wait.private = req;
749 wait->wait.flags = 0;
750 INIT_LIST_HEAD(&wait->wait.entry);
751 kiocb->ki_flags |= IOCB_WAITQ;
752 kiocb->ki_flags &= ~IOCB_NOWAIT;
753 kiocb->ki_waitq = wait;
754 return true;
755 }
756
757 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
758 {
759 struct file *file = rw->kiocb.ki_filp;
760
761 if (likely(file->f_op->read_iter))
762 return file->f_op->read_iter(&rw->kiocb, iter);
763 else if (file->f_op->read)
764 return loop_rw_iter(READ, rw, iter);
765 else
766 return -EINVAL;
767 }
768
769 static bool need_complete_io(struct io_kiocb *req)
770 {
771 return req->flags & REQ_F_ISREG ||
772 S_ISBLK(file_inode(req->file)->i_mode);
773 }
774
775 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
776 {
777 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
778 struct kiocb *kiocb = &rw->kiocb;
779 struct io_ring_ctx *ctx = req->ctx;
780 struct file *file = req->file;
781 int ret;
782
783 if (unlikely(!(file->f_mode & mode)))
784 return -EBADF;
785
786 if (!(req->flags & REQ_F_FIXED_FILE))
787 req->flags |= io_file_get_flags(file);
788
789 kiocb->ki_flags = file->f_iocb_flags;
790 ret = kiocb_set_rw_flags(kiocb, rw->flags);
791 if (unlikely(ret))
792 return ret;
793 kiocb->ki_flags |= IOCB_ALLOC_CACHE;
794
795 /*
796 * If the file is marked O_NONBLOCK, still allow retry for it if it
797 * supports async. Otherwise it's impossible to use O_NONBLOCK files
798 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
799 */
800 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
801 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
802 req->flags |= REQ_F_NOWAIT;
803
804 if (ctx->flags & IORING_SETUP_IOPOLL) {
805 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
806 return -EOPNOTSUPP;
807
808 kiocb->private = NULL;
809 kiocb->ki_flags |= IOCB_HIPRI;
810 kiocb->ki_complete = io_complete_rw_iopoll;
811 req->iopoll_completed = 0;
812 } else {
813 if (kiocb->ki_flags & IOCB_HIPRI)
814 return -EINVAL;
815 kiocb->ki_complete = io_complete_rw;
816 }
817
818 return 0;
819 }
820
821 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
822 {
823 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
824 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
825 struct io_async_rw *io = req->async_data;
826 struct kiocb *kiocb = &rw->kiocb;
827 ssize_t ret;
828 loff_t *ppos;
829
830 if (io_do_buffer_select(req)) {
831 ret = io_import_iovec(ITER_DEST, req, io, issue_flags);
832 if (unlikely(ret < 0))
833 return ret;
834 }
835
836 ret = io_rw_init_file(req, FMODE_READ);
837 if (unlikely(ret))
838 return ret;
839 req->cqe.res = iov_iter_count(&io->iter);
840
841 if (force_nonblock) {
842 /* If the file doesn't support async, just async punt */
843 if (unlikely(!io_file_supports_nowait(req)))
844 return -EAGAIN;
845 kiocb->ki_flags |= IOCB_NOWAIT;
846 } else {
847 /* Ensure we clear previously set non-block flag */
848 kiocb->ki_flags &= ~IOCB_NOWAIT;
849 }
850
851 ppos = io_kiocb_update_pos(req);
852
853 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
854 if (unlikely(ret))
855 return ret;
856
857 ret = io_iter_do_read(rw, &io->iter);
858
859 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
860 req->flags &= ~REQ_F_REISSUE;
861 /* If we can poll, just do that. */
862 if (io_file_can_poll(req))
863 return -EAGAIN;
864 /* IOPOLL retry should happen for io-wq threads */
865 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
866 goto done;
867 /* no retry on NONBLOCK nor RWF_NOWAIT */
868 if (req->flags & REQ_F_NOWAIT)
869 goto done;
870 ret = 0;
871 } else if (ret == -EIOCBQUEUED) {
872 return IOU_ISSUE_SKIP_COMPLETE;
873 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
874 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
875 /* read all, failed, already did sync or don't want to retry */
876 goto done;
877 }
878
879 /*
880 * Don't depend on the iter state matching what was consumed, or being
881 * untouched in case of error. Restore it and we'll advance it
882 * manually if we need to.
883 */
884 iov_iter_restore(&io->iter, &io->iter_state);
885
886 do {
887 /*
888 * We end up here because of a partial read, either from
889 * above or inside this loop. Advance the iter by the bytes
890 * that were consumed.
891 */
892 iov_iter_advance(&io->iter, ret);
893 if (!iov_iter_count(&io->iter))
894 break;
895 io->bytes_done += ret;
896 iov_iter_save_state(&io->iter, &io->iter_state);
897
898 /* if we can retry, do so with the callbacks armed */
899 if (!io_rw_should_retry(req)) {
900 kiocb->ki_flags &= ~IOCB_WAITQ;
901 return -EAGAIN;
902 }
903
904 req->cqe.res = iov_iter_count(&io->iter);
905 /*
906 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
907 * we get -EIOCBQUEUED, then we'll get a notification when the
908 * desired page gets unlocked. We can also get a partial read
909 * here, and if we do, then just retry at the new offset.
910 */
911 ret = io_iter_do_read(rw, &io->iter);
912 if (ret == -EIOCBQUEUED)
913 return IOU_ISSUE_SKIP_COMPLETE;
914 /* we got some bytes, but not all. retry. */
915 kiocb->ki_flags &= ~IOCB_WAITQ;
916 iov_iter_restore(&io->iter, &io->iter_state);
917 } while (ret > 0);
918 done:
919 /* it's faster to check here then delegate to kfree */
920 return ret;
921 }
922
923 int io_read(struct io_kiocb *req, unsigned int issue_flags)
924 {
925 int ret;
926
927 ret = __io_read(req, issue_flags);
928 if (ret >= 0)
929 return kiocb_done(req, ret, issue_flags);
930
931 return ret;
932 }
933
934 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
935 {
936 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
937 unsigned int cflags = 0;
938 int ret;
939
940 /*
941 * Multishot MUST be used on a pollable file
942 */
943 if (!io_file_can_poll(req))
944 return -EBADFD;
945
946 ret = __io_read(req, issue_flags);
947
948 /*
949 * If the file doesn't support proper NOWAIT, then disable multishot
950 * and stay in single shot mode.
951 */
952 if (!io_file_supports_nowait(req))
953 req->flags &= ~REQ_F_APOLL_MULTISHOT;
954
955 /*
956 * If we get -EAGAIN, recycle our buffer and just let normal poll
957 * handling arm it.
958 */
959 if (ret == -EAGAIN) {
960 /*
961 * Reset rw->len to 0 again to avoid clamping future mshot
962 * reads, in case the buffer size varies.
963 */
964 if (io_kbuf_recycle(req, issue_flags))
965 rw->len = 0;
966 if (issue_flags & IO_URING_F_MULTISHOT)
967 return IOU_ISSUE_SKIP_COMPLETE;
968 return -EAGAIN;
969 }
970
971 /*
972 * Any successful return value will keep the multishot read armed.
973 */
974 if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
975 /*
976 * Put our buffer and post a CQE. If we fail to post a CQE, then
977 * jump to the termination path. This request is then done.
978 */
979 cflags = io_put_kbuf(req, issue_flags);
980 rw->len = 0; /* similarly to above, reset len to 0 */
981
982 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
983 if (issue_flags & IO_URING_F_MULTISHOT) {
984 /*
985 * Force retry, as we might have more data to
986 * be read and otherwise it won't get retried
987 * until (if ever) another poll is triggered.
988 */
989 io_poll_multishot_retry(req);
990 return IOU_ISSUE_SKIP_COMPLETE;
991 }
992 return -EAGAIN;
993 }
994 }
995
996 /*
997 * Either an error, or we've hit overflow posting the CQE. For any
998 * multishot request, hitting overflow will terminate it.
999 */
1000 io_req_set_res(req, ret, cflags);
1001 io_req_rw_cleanup(req, issue_flags);
1002 if (issue_flags & IO_URING_F_MULTISHOT)
1003 return IOU_STOP_MULTISHOT;
1004 return IOU_OK;
1005 }
1006
1007 int io_write(struct io_kiocb *req, unsigned int issue_flags)
1008 {
1009 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1010 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1011 struct io_async_rw *io = req->async_data;
1012 struct kiocb *kiocb = &rw->kiocb;
1013 ssize_t ret, ret2;
1014 loff_t *ppos;
1015
1016 ret = io_rw_init_file(req, FMODE_WRITE);
1017 if (unlikely(ret))
1018 return ret;
1019 req->cqe.res = iov_iter_count(&io->iter);
1020
1021 if (force_nonblock) {
1022 /* If the file doesn't support async, just async punt */
1023 if (unlikely(!io_file_supports_nowait(req)))
1024 goto ret_eagain;
1025
1026 /* Check if we can support NOWAIT. */
1027 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1028 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) &&
1029 (req->flags & REQ_F_ISREG))
1030 goto ret_eagain;
1031
1032 kiocb->ki_flags |= IOCB_NOWAIT;
1033 } else {
1034 /* Ensure we clear previously set non-block flag */
1035 kiocb->ki_flags &= ~IOCB_NOWAIT;
1036 }
1037
1038 ppos = io_kiocb_update_pos(req);
1039
1040 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1041 if (unlikely(ret))
1042 return ret;
1043
1044 if (req->flags & REQ_F_ISREG)
1045 kiocb_start_write(kiocb);
1046 kiocb->ki_flags |= IOCB_WRITE;
1047
1048 if (likely(req->file->f_op->write_iter))
1049 ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
1050 else if (req->file->f_op->write)
1051 ret2 = loop_rw_iter(WRITE, rw, &io->iter);
1052 else
1053 ret2 = -EINVAL;
1054
1055 if (req->flags & REQ_F_REISSUE) {
1056 req->flags &= ~REQ_F_REISSUE;
1057 ret2 = -EAGAIN;
1058 }
1059
1060 /*
1061 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1062 * retry them without IOCB_NOWAIT.
1063 */
1064 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1065 ret2 = -EAGAIN;
1066 /* no retry on NONBLOCK nor RWF_NOWAIT */
1067 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1068 goto done;
1069 if (!force_nonblock || ret2 != -EAGAIN) {
1070 /* IOPOLL retry should happen for io-wq threads */
1071 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1072 goto ret_eagain;
1073
1074 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1075 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1076 req->cqe.res, ret2);
1077
1078 /* This is a partial write. The file pos has already been
1079 * updated, setup the async struct to complete the request
1080 * in the worker. Also update bytes_done to account for
1081 * the bytes already written.
1082 */
1083 iov_iter_save_state(&io->iter, &io->iter_state);
1084 io->bytes_done += ret2;
1085
1086 if (kiocb->ki_flags & IOCB_WRITE)
1087 io_req_end_write(req);
1088 return -EAGAIN;
1089 }
1090 done:
1091 return kiocb_done(req, ret2, issue_flags);
1092 } else {
1093 ret_eagain:
1094 iov_iter_restore(&io->iter, &io->iter_state);
1095 if (kiocb->ki_flags & IOCB_WRITE)
1096 io_req_end_write(req);
1097 return -EAGAIN;
1098 }
1099 }
1100
1101 void io_rw_fail(struct io_kiocb *req)
1102 {
1103 int res;
1104
1105 res = io_fixup_rw_res(req, req->cqe.res);
1106 io_req_set_res(req, res, req->cqe.flags);
1107 }
1108
1109 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1110 {
1111 struct io_wq_work_node *pos, *start, *prev;
1112 unsigned int poll_flags = 0;
1113 DEFINE_IO_COMP_BATCH(iob);
1114 int nr_events = 0;
1115
1116 /*
1117 * Only spin for completions if we don't have multiple devices hanging
1118 * off our complete list.
1119 */
1120 if (ctx->poll_multi_queue || force_nonspin)
1121 poll_flags |= BLK_POLL_ONESHOT;
1122
1123 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1124 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1125 struct file *file = req->file;
1126 int ret;
1127
1128 /*
1129 * Move completed and retryable entries to our local lists.
1130 * If we find a request that requires polling, break out
1131 * and complete those lists first, if we have entries there.
1132 */
1133 if (READ_ONCE(req->iopoll_completed))
1134 break;
1135
1136 if (req->opcode == IORING_OP_URING_CMD) {
1137 struct io_uring_cmd *ioucmd;
1138
1139 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1140 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1141 poll_flags);
1142 } else {
1143 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1144
1145 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1146 }
1147 if (unlikely(ret < 0))
1148 return ret;
1149 else if (ret)
1150 poll_flags |= BLK_POLL_ONESHOT;
1151
1152 /* iopoll may have completed current req */
1153 if (!rq_list_empty(iob.req_list) ||
1154 READ_ONCE(req->iopoll_completed))
1155 break;
1156 }
1157
1158 if (!rq_list_empty(iob.req_list))
1159 iob.complete(&iob);
1160 else if (!pos)
1161 return 0;
1162
1163 prev = start;
1164 wq_list_for_each_resume(pos, prev) {
1165 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1166
1167 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1168 if (!smp_load_acquire(&req->iopoll_completed))
1169 break;
1170 nr_events++;
1171 req->cqe.flags = io_put_kbuf(req, 0);
1172 if (req->opcode != IORING_OP_URING_CMD)
1173 io_req_rw_cleanup(req, 0);
1174 }
1175 if (unlikely(!nr_events))
1176 return 0;
1177
1178 pos = start ? start->next : ctx->iopoll_list.first;
1179 wq_list_cut(&ctx->iopoll_list, prev, start);
1180
1181 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1182 return 0;
1183 ctx->submit_state.compl_reqs.first = pos;
1184 __io_submit_flush_completions(ctx);
1185 return nr_events;
1186 }
1187
1188 void io_rw_cache_free(const void *entry)
1189 {
1190 struct io_async_rw *rw = (struct io_async_rw *) entry;
1191
1192 if (rw->free_iovec) {
1193 kasan_mempool_unpoison_object(rw->free_iovec,
1194 rw->free_iov_nr * sizeof(struct iovec));
1195 io_rw_iovec_free(rw);
1196 }
1197 kfree(rw);
1198 }