]> git.ipfire.org Git - thirdparty/linux.git/blob - io_uring/rw.c
Merge tag 'char-misc-6.7-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[thirdparty/linux.git] / io_uring / rw.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14
15 #include <uapi/linux/io_uring.h>
16
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22
23 struct io_rw {
24 /* NOTE: kiocb has the file as the first member, so don't do it here */
25 struct kiocb kiocb;
26 u64 addr;
27 u32 len;
28 rwf_t flags;
29 };
30
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35
36 #ifdef CONFIG_COMPAT
37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 struct compat_iovec __user *uiov;
40 compat_ssize_t clen;
41
42 uiov = u64_to_user_ptr(rw->addr);
43 if (!access_ok(uiov, sizeof(*uiov)))
44 return -EFAULT;
45 if (__get_user(clen, &uiov->iov_len))
46 return -EFAULT;
47 if (clen < 0)
48 return -EINVAL;
49
50 rw->len = clen;
51 return 0;
52 }
53 #endif
54
55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 struct iovec __user *uiov;
58 struct iovec iov;
59 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60
61 if (rw->len != 1)
62 return -EINVAL;
63
64 #ifdef CONFIG_COMPAT
65 if (req->ctx->compat)
66 return io_iov_compat_buffer_select_prep(rw);
67 #endif
68
69 uiov = u64_to_user_ptr(rw->addr);
70 if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 return -EFAULT;
72 rw->len = iov.iov_len;
73 return 0;
74 }
75
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 unsigned ioprio;
80 int ret;
81
82 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 /* used for fixed read/write too - just read unconditionally */
84 req->buf_index = READ_ONCE(sqe->buf_index);
85
86 ioprio = READ_ONCE(sqe->ioprio);
87 if (ioprio) {
88 ret = ioprio_check_cap(ioprio);
89 if (ret)
90 return ret;
91
92 rw->kiocb.ki_ioprio = ioprio;
93 } else {
94 rw->kiocb.ki_ioprio = get_current_ioprio();
95 }
96 rw->kiocb.dio_complete = NULL;
97
98 rw->addr = READ_ONCE(sqe->addr);
99 rw->len = READ_ONCE(sqe->len);
100 rw->flags = READ_ONCE(sqe->rw_flags);
101 return 0;
102 }
103
104 int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
105 {
106 int ret;
107
108 ret = io_prep_rw(req, sqe);
109 if (unlikely(ret))
110 return ret;
111
112 /*
113 * Have to do this validation here, as this is in io_read() rw->len
114 * might have chanaged due to buffer selection
115 */
116 if (req->flags & REQ_F_BUFFER_SELECT)
117 return io_iov_buffer_select_prep(req);
118
119 return 0;
120 }
121
122 int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
123 {
124 struct io_ring_ctx *ctx = req->ctx;
125 u16 index;
126 int ret;
127
128 ret = io_prep_rw(req, sqe);
129 if (unlikely(ret))
130 return ret;
131
132 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
133 return -EFAULT;
134 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
135 req->imu = ctx->user_bufs[index];
136 io_req_set_rsrc_node(req, ctx, 0);
137 return 0;
138 }
139
140 /*
141 * Multishot read is prepared just like a normal read/write request, only
142 * difference is that we set the MULTISHOT flag.
143 */
144 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
145 {
146 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
147 int ret;
148
149 /* must be used with provided buffers */
150 if (!(req->flags & REQ_F_BUFFER_SELECT))
151 return -EINVAL;
152
153 ret = io_prep_rw(req, sqe);
154 if (unlikely(ret))
155 return ret;
156
157 if (rw->addr || rw->len)
158 return -EINVAL;
159
160 req->flags |= REQ_F_APOLL_MULTISHOT;
161 return 0;
162 }
163
164 void io_readv_writev_cleanup(struct io_kiocb *req)
165 {
166 struct io_async_rw *io = req->async_data;
167
168 kfree(io->free_iovec);
169 }
170
171 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
172 {
173 switch (ret) {
174 case -EIOCBQUEUED:
175 break;
176 case -ERESTARTSYS:
177 case -ERESTARTNOINTR:
178 case -ERESTARTNOHAND:
179 case -ERESTART_RESTARTBLOCK:
180 /*
181 * We can't just restart the syscall, since previously
182 * submitted sqes may already be in progress. Just fail this
183 * IO with EINTR.
184 */
185 ret = -EINTR;
186 fallthrough;
187 default:
188 kiocb->ki_complete(kiocb, ret);
189 }
190 }
191
192 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
193 {
194 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
195
196 if (rw->kiocb.ki_pos != -1)
197 return &rw->kiocb.ki_pos;
198
199 if (!(req->file->f_mode & FMODE_STREAM)) {
200 req->flags |= REQ_F_CUR_POS;
201 rw->kiocb.ki_pos = req->file->f_pos;
202 return &rw->kiocb.ki_pos;
203 }
204
205 rw->kiocb.ki_pos = 0;
206 return NULL;
207 }
208
209 static void io_req_task_queue_reissue(struct io_kiocb *req)
210 {
211 req->io_task_work.func = io_queue_iowq;
212 io_req_task_work_add(req);
213 }
214
215 #ifdef CONFIG_BLOCK
216 static bool io_resubmit_prep(struct io_kiocb *req)
217 {
218 struct io_async_rw *io = req->async_data;
219
220 if (!req_has_async_data(req))
221 return !io_req_prep_async(req);
222 iov_iter_restore(&io->s.iter, &io->s.iter_state);
223 return true;
224 }
225
226 static bool io_rw_should_reissue(struct io_kiocb *req)
227 {
228 umode_t mode = file_inode(req->file)->i_mode;
229 struct io_ring_ctx *ctx = req->ctx;
230
231 if (!S_ISBLK(mode) && !S_ISREG(mode))
232 return false;
233 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
234 !(ctx->flags & IORING_SETUP_IOPOLL)))
235 return false;
236 /*
237 * If ref is dying, we might be running poll reap from the exit work.
238 * Don't attempt to reissue from that path, just let it fail with
239 * -EAGAIN.
240 */
241 if (percpu_ref_is_dying(&ctx->refs))
242 return false;
243 /*
244 * Play it safe and assume not safe to re-import and reissue if we're
245 * not in the original thread group (or in task context).
246 */
247 if (!same_thread_group(req->task, current) || !in_task())
248 return false;
249 return true;
250 }
251 #else
252 static bool io_resubmit_prep(struct io_kiocb *req)
253 {
254 return false;
255 }
256 static bool io_rw_should_reissue(struct io_kiocb *req)
257 {
258 return false;
259 }
260 #endif
261
262 static void io_req_end_write(struct io_kiocb *req)
263 {
264 if (req->flags & REQ_F_ISREG) {
265 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
266
267 kiocb_end_write(&rw->kiocb);
268 }
269 }
270
271 /*
272 * Trigger the notifications after having done some IO, and finish the write
273 * accounting, if any.
274 */
275 static void io_req_io_end(struct io_kiocb *req)
276 {
277 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
278
279 if (rw->kiocb.ki_flags & IOCB_WRITE) {
280 io_req_end_write(req);
281 fsnotify_modify(req->file);
282 } else {
283 fsnotify_access(req->file);
284 }
285 }
286
287 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
288 {
289 if (unlikely(res != req->cqe.res)) {
290 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
291 io_rw_should_reissue(req)) {
292 /*
293 * Reissue will start accounting again, finish the
294 * current cycle.
295 */
296 io_req_io_end(req);
297 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
298 return true;
299 }
300 req_set_fail(req);
301 req->cqe.res = res;
302 }
303 return false;
304 }
305
306 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
307 {
308 struct io_async_rw *io = req->async_data;
309
310 /* add previously done IO, if any */
311 if (req_has_async_data(req) && io->bytes_done > 0) {
312 if (res < 0)
313 res = io->bytes_done;
314 else
315 res += io->bytes_done;
316 }
317 return res;
318 }
319
320 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
321 {
322 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
323 struct kiocb *kiocb = &rw->kiocb;
324
325 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
326 long res = kiocb->dio_complete(rw->kiocb.private);
327
328 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
329 }
330
331 io_req_io_end(req);
332
333 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
334 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
335
336 req->cqe.flags |= io_put_kbuf(req, issue_flags);
337 }
338 io_req_task_complete(req, ts);
339 }
340
341 static void io_complete_rw(struct kiocb *kiocb, long res)
342 {
343 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
344 struct io_kiocb *req = cmd_to_io_kiocb(rw);
345
346 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
347 if (__io_complete_rw_common(req, res))
348 return;
349 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
350 }
351 req->io_task_work.func = io_req_rw_complete;
352 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
353 }
354
355 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
356 {
357 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
358 struct io_kiocb *req = cmd_to_io_kiocb(rw);
359
360 if (kiocb->ki_flags & IOCB_WRITE)
361 io_req_end_write(req);
362 if (unlikely(res != req->cqe.res)) {
363 if (res == -EAGAIN && io_rw_should_reissue(req)) {
364 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
365 return;
366 }
367 req->cqe.res = res;
368 }
369
370 /* order with io_iopoll_complete() checking ->iopoll_completed */
371 smp_store_release(&req->iopoll_completed, 1);
372 }
373
374 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
375 unsigned int issue_flags)
376 {
377 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
378 unsigned final_ret = io_fixup_rw_res(req, ret);
379
380 if (ret >= 0 && req->flags & REQ_F_CUR_POS)
381 req->file->f_pos = rw->kiocb.ki_pos;
382 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
383 if (!__io_complete_rw_common(req, ret)) {
384 /*
385 * Safe to call io_end from here as we're inline
386 * from the submission path.
387 */
388 io_req_io_end(req);
389 io_req_set_res(req, final_ret,
390 io_put_kbuf(req, issue_flags));
391 return IOU_OK;
392 }
393 } else {
394 io_rw_done(&rw->kiocb, ret);
395 }
396
397 if (req->flags & REQ_F_REISSUE) {
398 req->flags &= ~REQ_F_REISSUE;
399 if (io_resubmit_prep(req))
400 io_req_task_queue_reissue(req);
401 else
402 io_req_task_queue_fail(req, final_ret);
403 }
404 return IOU_ISSUE_SKIP_COMPLETE;
405 }
406
407 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
408 struct io_rw_state *s,
409 unsigned int issue_flags)
410 {
411 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
412 struct iov_iter *iter = &s->iter;
413 u8 opcode = req->opcode;
414 struct iovec *iovec;
415 void __user *buf;
416 size_t sqe_len;
417 ssize_t ret;
418
419 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
420 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
421 if (ret)
422 return ERR_PTR(ret);
423 return NULL;
424 }
425
426 buf = u64_to_user_ptr(rw->addr);
427 sqe_len = rw->len;
428
429 if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
430 if (io_do_buffer_select(req)) {
431 buf = io_buffer_select(req, &sqe_len, issue_flags);
432 if (!buf)
433 return ERR_PTR(-ENOBUFS);
434 rw->addr = (unsigned long) buf;
435 rw->len = sqe_len;
436 }
437
438 ret = import_ubuf(ddir, buf, sqe_len, iter);
439 if (ret)
440 return ERR_PTR(ret);
441 return NULL;
442 }
443
444 iovec = s->fast_iov;
445 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
446 req->ctx->compat);
447 if (unlikely(ret < 0))
448 return ERR_PTR(ret);
449 return iovec;
450 }
451
452 static inline int io_import_iovec(int rw, struct io_kiocb *req,
453 struct iovec **iovec, struct io_rw_state *s,
454 unsigned int issue_flags)
455 {
456 *iovec = __io_import_iovec(rw, req, s, issue_flags);
457 if (IS_ERR(*iovec))
458 return PTR_ERR(*iovec);
459
460 iov_iter_save_state(&s->iter, &s->iter_state);
461 return 0;
462 }
463
464 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
465 {
466 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
467 }
468
469 /*
470 * For files that don't have ->read_iter() and ->write_iter(), handle them
471 * by looping over ->read() or ->write() manually.
472 */
473 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
474 {
475 struct kiocb *kiocb = &rw->kiocb;
476 struct file *file = kiocb->ki_filp;
477 ssize_t ret = 0;
478 loff_t *ppos;
479
480 /*
481 * Don't support polled IO through this interface, and we can't
482 * support non-blocking either. For the latter, this just causes
483 * the kiocb to be handled from an async context.
484 */
485 if (kiocb->ki_flags & IOCB_HIPRI)
486 return -EOPNOTSUPP;
487 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
488 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
489 return -EAGAIN;
490
491 ppos = io_kiocb_ppos(kiocb);
492
493 while (iov_iter_count(iter)) {
494 void __user *addr;
495 size_t len;
496 ssize_t nr;
497
498 if (iter_is_ubuf(iter)) {
499 addr = iter->ubuf + iter->iov_offset;
500 len = iov_iter_count(iter);
501 } else if (!iov_iter_is_bvec(iter)) {
502 addr = iter_iov_addr(iter);
503 len = iter_iov_len(iter);
504 } else {
505 addr = u64_to_user_ptr(rw->addr);
506 len = rw->len;
507 }
508
509 if (ddir == READ)
510 nr = file->f_op->read(file, addr, len, ppos);
511 else
512 nr = file->f_op->write(file, addr, len, ppos);
513
514 if (nr < 0) {
515 if (!ret)
516 ret = nr;
517 break;
518 }
519 ret += nr;
520 if (!iov_iter_is_bvec(iter)) {
521 iov_iter_advance(iter, nr);
522 } else {
523 rw->addr += nr;
524 rw->len -= nr;
525 if (!rw->len)
526 break;
527 }
528 if (nr != len)
529 break;
530 }
531
532 return ret;
533 }
534
535 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
536 const struct iovec *fast_iov, struct iov_iter *iter)
537 {
538 struct io_async_rw *io = req->async_data;
539
540 memcpy(&io->s.iter, iter, sizeof(*iter));
541 io->free_iovec = iovec;
542 io->bytes_done = 0;
543 /* can only be fixed buffers, no need to do anything */
544 if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
545 return;
546 if (!iovec) {
547 unsigned iov_off = 0;
548
549 io->s.iter.__iov = io->s.fast_iov;
550 if (iter->__iov != fast_iov) {
551 iov_off = iter_iov(iter) - fast_iov;
552 io->s.iter.__iov += iov_off;
553 }
554 if (io->s.fast_iov != fast_iov)
555 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
556 sizeof(struct iovec) * iter->nr_segs);
557 } else {
558 req->flags |= REQ_F_NEED_CLEANUP;
559 }
560 }
561
562 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
563 struct io_rw_state *s, bool force)
564 {
565 if (!force && !io_cold_defs[req->opcode].prep_async)
566 return 0;
567 /* opcode type doesn't need async data */
568 if (!io_cold_defs[req->opcode].async_size)
569 return 0;
570 if (!req_has_async_data(req)) {
571 struct io_async_rw *iorw;
572
573 if (io_alloc_async_data(req)) {
574 kfree(iovec);
575 return -ENOMEM;
576 }
577
578 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
579 iorw = req->async_data;
580 /* we've copied and mapped the iter, ensure state is saved */
581 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
582 }
583 return 0;
584 }
585
586 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
587 {
588 struct io_async_rw *iorw = req->async_data;
589 struct iovec *iov;
590 int ret;
591
592 /* submission path, ->uring_lock should already be taken */
593 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
594 if (unlikely(ret < 0))
595 return ret;
596
597 iorw->bytes_done = 0;
598 iorw->free_iovec = iov;
599 if (iov)
600 req->flags |= REQ_F_NEED_CLEANUP;
601 return 0;
602 }
603
604 int io_readv_prep_async(struct io_kiocb *req)
605 {
606 return io_rw_prep_async(req, ITER_DEST);
607 }
608
609 int io_writev_prep_async(struct io_kiocb *req)
610 {
611 return io_rw_prep_async(req, ITER_SOURCE);
612 }
613
614 /*
615 * This is our waitqueue callback handler, registered through __folio_lock_async()
616 * when we initially tried to do the IO with the iocb armed our waitqueue.
617 * This gets called when the page is unlocked, and we generally expect that to
618 * happen when the page IO is completed and the page is now uptodate. This will
619 * queue a task_work based retry of the operation, attempting to copy the data
620 * again. If the latter fails because the page was NOT uptodate, then we will
621 * do a thread based blocking retry of the operation. That's the unexpected
622 * slow path.
623 */
624 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
625 int sync, void *arg)
626 {
627 struct wait_page_queue *wpq;
628 struct io_kiocb *req = wait->private;
629 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
630 struct wait_page_key *key = arg;
631
632 wpq = container_of(wait, struct wait_page_queue, wait);
633
634 if (!wake_page_match(wpq, key))
635 return 0;
636
637 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
638 list_del_init(&wait->entry);
639 io_req_task_queue(req);
640 return 1;
641 }
642
643 /*
644 * This controls whether a given IO request should be armed for async page
645 * based retry. If we return false here, the request is handed to the async
646 * worker threads for retry. If we're doing buffered reads on a regular file,
647 * we prepare a private wait_page_queue entry and retry the operation. This
648 * will either succeed because the page is now uptodate and unlocked, or it
649 * will register a callback when the page is unlocked at IO completion. Through
650 * that callback, io_uring uses task_work to setup a retry of the operation.
651 * That retry will attempt the buffered read again. The retry will generally
652 * succeed, or in rare cases where it fails, we then fall back to using the
653 * async worker threads for a blocking retry.
654 */
655 static bool io_rw_should_retry(struct io_kiocb *req)
656 {
657 struct io_async_rw *io = req->async_data;
658 struct wait_page_queue *wait = &io->wpq;
659 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
660 struct kiocb *kiocb = &rw->kiocb;
661
662 /* never retry for NOWAIT, we just complete with -EAGAIN */
663 if (req->flags & REQ_F_NOWAIT)
664 return false;
665
666 /* Only for buffered IO */
667 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
668 return false;
669
670 /*
671 * just use poll if we can, and don't attempt if the fs doesn't
672 * support callback based unlocks
673 */
674 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
675 return false;
676
677 wait->wait.func = io_async_buf_func;
678 wait->wait.private = req;
679 wait->wait.flags = 0;
680 INIT_LIST_HEAD(&wait->wait.entry);
681 kiocb->ki_flags |= IOCB_WAITQ;
682 kiocb->ki_flags &= ~IOCB_NOWAIT;
683 kiocb->ki_waitq = wait;
684 return true;
685 }
686
687 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
688 {
689 struct file *file = rw->kiocb.ki_filp;
690
691 if (likely(file->f_op->read_iter))
692 return call_read_iter(file, &rw->kiocb, iter);
693 else if (file->f_op->read)
694 return loop_rw_iter(READ, rw, iter);
695 else
696 return -EINVAL;
697 }
698
699 static bool need_complete_io(struct io_kiocb *req)
700 {
701 return req->flags & REQ_F_ISREG ||
702 S_ISBLK(file_inode(req->file)->i_mode);
703 }
704
705 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
706 {
707 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
708 struct kiocb *kiocb = &rw->kiocb;
709 struct io_ring_ctx *ctx = req->ctx;
710 struct file *file = req->file;
711 int ret;
712
713 if (unlikely(!file || !(file->f_mode & mode)))
714 return -EBADF;
715
716 if (!(req->flags & REQ_F_FIXED_FILE))
717 req->flags |= io_file_get_flags(file);
718
719 kiocb->ki_flags = file->f_iocb_flags;
720 ret = kiocb_set_rw_flags(kiocb, rw->flags);
721 if (unlikely(ret))
722 return ret;
723 kiocb->ki_flags |= IOCB_ALLOC_CACHE;
724
725 /*
726 * If the file is marked O_NONBLOCK, still allow retry for it if it
727 * supports async. Otherwise it's impossible to use O_NONBLOCK files
728 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
729 */
730 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
731 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
732 req->flags |= REQ_F_NOWAIT;
733
734 if (ctx->flags & IORING_SETUP_IOPOLL) {
735 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
736 return -EOPNOTSUPP;
737
738 kiocb->private = NULL;
739 kiocb->ki_flags |= IOCB_HIPRI;
740 kiocb->ki_complete = io_complete_rw_iopoll;
741 req->iopoll_completed = 0;
742 } else {
743 if (kiocb->ki_flags & IOCB_HIPRI)
744 return -EINVAL;
745 kiocb->ki_complete = io_complete_rw;
746 }
747
748 return 0;
749 }
750
751 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
752 {
753 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
754 struct io_rw_state __s, *s = &__s;
755 struct iovec *iovec;
756 struct kiocb *kiocb = &rw->kiocb;
757 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
758 struct io_async_rw *io;
759 ssize_t ret, ret2;
760 loff_t *ppos;
761
762 if (!req_has_async_data(req)) {
763 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
764 if (unlikely(ret < 0))
765 return ret;
766 } else {
767 io = req->async_data;
768 s = &io->s;
769
770 /*
771 * Safe and required to re-import if we're using provided
772 * buffers, as we dropped the selected one before retry.
773 */
774 if (io_do_buffer_select(req)) {
775 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
776 if (unlikely(ret < 0))
777 return ret;
778 }
779
780 /*
781 * We come here from an earlier attempt, restore our state to
782 * match in case it doesn't. It's cheap enough that we don't
783 * need to make this conditional.
784 */
785 iov_iter_restore(&s->iter, &s->iter_state);
786 iovec = NULL;
787 }
788 ret = io_rw_init_file(req, FMODE_READ);
789 if (unlikely(ret)) {
790 kfree(iovec);
791 return ret;
792 }
793 req->cqe.res = iov_iter_count(&s->iter);
794
795 if (force_nonblock) {
796 /* If the file doesn't support async, just async punt */
797 if (unlikely(!io_file_supports_nowait(req))) {
798 ret = io_setup_async_rw(req, iovec, s, true);
799 return ret ?: -EAGAIN;
800 }
801 kiocb->ki_flags |= IOCB_NOWAIT;
802 } else {
803 /* Ensure we clear previously set non-block flag */
804 kiocb->ki_flags &= ~IOCB_NOWAIT;
805 }
806
807 ppos = io_kiocb_update_pos(req);
808
809 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
810 if (unlikely(ret)) {
811 kfree(iovec);
812 return ret;
813 }
814
815 ret = io_iter_do_read(rw, &s->iter);
816
817 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
818 req->flags &= ~REQ_F_REISSUE;
819 /*
820 * If we can poll, just do that. For a vectored read, we'll
821 * need to copy state first.
822 */
823 if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
824 return -EAGAIN;
825 /* IOPOLL retry should happen for io-wq threads */
826 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
827 goto done;
828 /* no retry on NONBLOCK nor RWF_NOWAIT */
829 if (req->flags & REQ_F_NOWAIT)
830 goto done;
831 ret = 0;
832 } else if (ret == -EIOCBQUEUED) {
833 if (iovec)
834 kfree(iovec);
835 return IOU_ISSUE_SKIP_COMPLETE;
836 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
837 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
838 /* read all, failed, already did sync or don't want to retry */
839 goto done;
840 }
841
842 /*
843 * Don't depend on the iter state matching what was consumed, or being
844 * untouched in case of error. Restore it and we'll advance it
845 * manually if we need to.
846 */
847 iov_iter_restore(&s->iter, &s->iter_state);
848
849 ret2 = io_setup_async_rw(req, iovec, s, true);
850 iovec = NULL;
851 if (ret2) {
852 ret = ret > 0 ? ret : ret2;
853 goto done;
854 }
855
856 io = req->async_data;
857 s = &io->s;
858 /*
859 * Now use our persistent iterator and state, if we aren't already.
860 * We've restored and mapped the iter to match.
861 */
862
863 do {
864 /*
865 * We end up here because of a partial read, either from
866 * above or inside this loop. Advance the iter by the bytes
867 * that were consumed.
868 */
869 iov_iter_advance(&s->iter, ret);
870 if (!iov_iter_count(&s->iter))
871 break;
872 io->bytes_done += ret;
873 iov_iter_save_state(&s->iter, &s->iter_state);
874
875 /* if we can retry, do so with the callbacks armed */
876 if (!io_rw_should_retry(req)) {
877 kiocb->ki_flags &= ~IOCB_WAITQ;
878 return -EAGAIN;
879 }
880
881 req->cqe.res = iov_iter_count(&s->iter);
882 /*
883 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
884 * we get -EIOCBQUEUED, then we'll get a notification when the
885 * desired page gets unlocked. We can also get a partial read
886 * here, and if we do, then just retry at the new offset.
887 */
888 ret = io_iter_do_read(rw, &s->iter);
889 if (ret == -EIOCBQUEUED)
890 return IOU_ISSUE_SKIP_COMPLETE;
891 /* we got some bytes, but not all. retry. */
892 kiocb->ki_flags &= ~IOCB_WAITQ;
893 iov_iter_restore(&s->iter, &s->iter_state);
894 } while (ret > 0);
895 done:
896 /* it's faster to check here then delegate to kfree */
897 if (iovec)
898 kfree(iovec);
899 return ret;
900 }
901
902 int io_read(struct io_kiocb *req, unsigned int issue_flags)
903 {
904 int ret;
905
906 ret = __io_read(req, issue_flags);
907 if (ret >= 0)
908 return kiocb_done(req, ret, issue_flags);
909
910 return ret;
911 }
912
913 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
914 {
915 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
916 unsigned int cflags = 0;
917 int ret;
918
919 /*
920 * Multishot MUST be used on a pollable file
921 */
922 if (!file_can_poll(req->file))
923 return -EBADFD;
924
925 ret = __io_read(req, issue_flags);
926
927 /*
928 * If we get -EAGAIN, recycle our buffer and just let normal poll
929 * handling arm it.
930 */
931 if (ret == -EAGAIN) {
932 /*
933 * Reset rw->len to 0 again to avoid clamping future mshot
934 * reads, in case the buffer size varies.
935 */
936 if (io_kbuf_recycle(req, issue_flags))
937 rw->len = 0;
938 return -EAGAIN;
939 }
940
941 /*
942 * Any successful return value will keep the multishot read armed.
943 */
944 if (ret > 0) {
945 /*
946 * Put our buffer and post a CQE. If we fail to post a CQE, then
947 * jump to the termination path. This request is then done.
948 */
949 cflags = io_put_kbuf(req, issue_flags);
950 rw->len = 0; /* similarly to above, reset len to 0 */
951
952 if (io_fill_cqe_req_aux(req,
953 issue_flags & IO_URING_F_COMPLETE_DEFER,
954 ret, cflags | IORING_CQE_F_MORE)) {
955 if (issue_flags & IO_URING_F_MULTISHOT)
956 return IOU_ISSUE_SKIP_COMPLETE;
957 return -EAGAIN;
958 }
959 }
960
961 /*
962 * Either an error, or we've hit overflow posting the CQE. For any
963 * multishot request, hitting overflow will terminate it.
964 */
965 io_req_set_res(req, ret, cflags);
966 if (issue_flags & IO_URING_F_MULTISHOT)
967 return IOU_STOP_MULTISHOT;
968 return IOU_OK;
969 }
970
971 int io_write(struct io_kiocb *req, unsigned int issue_flags)
972 {
973 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
974 struct io_rw_state __s, *s = &__s;
975 struct iovec *iovec;
976 struct kiocb *kiocb = &rw->kiocb;
977 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
978 ssize_t ret, ret2;
979 loff_t *ppos;
980
981 if (!req_has_async_data(req)) {
982 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
983 if (unlikely(ret < 0))
984 return ret;
985 } else {
986 struct io_async_rw *io = req->async_data;
987
988 s = &io->s;
989 iov_iter_restore(&s->iter, &s->iter_state);
990 iovec = NULL;
991 }
992 ret = io_rw_init_file(req, FMODE_WRITE);
993 if (unlikely(ret)) {
994 kfree(iovec);
995 return ret;
996 }
997 req->cqe.res = iov_iter_count(&s->iter);
998
999 if (force_nonblock) {
1000 /* If the file doesn't support async, just async punt */
1001 if (unlikely(!io_file_supports_nowait(req)))
1002 goto copy_iov;
1003
1004 /* File path supports NOWAIT for non-direct_IO only for block devices. */
1005 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1006 !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
1007 (req->flags & REQ_F_ISREG))
1008 goto copy_iov;
1009
1010 kiocb->ki_flags |= IOCB_NOWAIT;
1011 } else {
1012 /* Ensure we clear previously set non-block flag */
1013 kiocb->ki_flags &= ~IOCB_NOWAIT;
1014 }
1015
1016 ppos = io_kiocb_update_pos(req);
1017
1018 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1019 if (unlikely(ret)) {
1020 kfree(iovec);
1021 return ret;
1022 }
1023
1024 if (req->flags & REQ_F_ISREG)
1025 kiocb_start_write(kiocb);
1026 kiocb->ki_flags |= IOCB_WRITE;
1027
1028 if (likely(req->file->f_op->write_iter))
1029 ret2 = call_write_iter(req->file, kiocb, &s->iter);
1030 else if (req->file->f_op->write)
1031 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
1032 else
1033 ret2 = -EINVAL;
1034
1035 if (req->flags & REQ_F_REISSUE) {
1036 req->flags &= ~REQ_F_REISSUE;
1037 ret2 = -EAGAIN;
1038 }
1039
1040 /*
1041 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1042 * retry them without IOCB_NOWAIT.
1043 */
1044 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1045 ret2 = -EAGAIN;
1046 /* no retry on NONBLOCK nor RWF_NOWAIT */
1047 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1048 goto done;
1049 if (!force_nonblock || ret2 != -EAGAIN) {
1050 /* IOPOLL retry should happen for io-wq threads */
1051 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1052 goto copy_iov;
1053
1054 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1055 struct io_async_rw *io;
1056
1057 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1058 req->cqe.res, ret2);
1059
1060 /* This is a partial write. The file pos has already been
1061 * updated, setup the async struct to complete the request
1062 * in the worker. Also update bytes_done to account for
1063 * the bytes already written.
1064 */
1065 iov_iter_save_state(&s->iter, &s->iter_state);
1066 ret = io_setup_async_rw(req, iovec, s, true);
1067
1068 io = req->async_data;
1069 if (io)
1070 io->bytes_done += ret2;
1071
1072 if (kiocb->ki_flags & IOCB_WRITE)
1073 io_req_end_write(req);
1074 return ret ? ret : -EAGAIN;
1075 }
1076 done:
1077 ret = kiocb_done(req, ret2, issue_flags);
1078 } else {
1079 copy_iov:
1080 iov_iter_restore(&s->iter, &s->iter_state);
1081 ret = io_setup_async_rw(req, iovec, s, false);
1082 if (!ret) {
1083 if (kiocb->ki_flags & IOCB_WRITE)
1084 io_req_end_write(req);
1085 return -EAGAIN;
1086 }
1087 return ret;
1088 }
1089 /* it's reportedly faster than delegating the null check to kfree() */
1090 if (iovec)
1091 kfree(iovec);
1092 return ret;
1093 }
1094
1095 void io_rw_fail(struct io_kiocb *req)
1096 {
1097 int res;
1098
1099 res = io_fixup_rw_res(req, req->cqe.res);
1100 io_req_set_res(req, res, req->cqe.flags);
1101 }
1102
1103 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1104 {
1105 struct io_wq_work_node *pos, *start, *prev;
1106 unsigned int poll_flags = 0;
1107 DEFINE_IO_COMP_BATCH(iob);
1108 int nr_events = 0;
1109
1110 /*
1111 * Only spin for completions if we don't have multiple devices hanging
1112 * off our complete list.
1113 */
1114 if (ctx->poll_multi_queue || force_nonspin)
1115 poll_flags |= BLK_POLL_ONESHOT;
1116
1117 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1118 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1119 struct file *file = req->file;
1120 int ret;
1121
1122 /*
1123 * Move completed and retryable entries to our local lists.
1124 * If we find a request that requires polling, break out
1125 * and complete those lists first, if we have entries there.
1126 */
1127 if (READ_ONCE(req->iopoll_completed))
1128 break;
1129
1130 if (req->opcode == IORING_OP_URING_CMD) {
1131 struct io_uring_cmd *ioucmd;
1132
1133 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1134 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1135 poll_flags);
1136 } else {
1137 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1138
1139 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1140 }
1141 if (unlikely(ret < 0))
1142 return ret;
1143 else if (ret)
1144 poll_flags |= BLK_POLL_ONESHOT;
1145
1146 /* iopoll may have completed current req */
1147 if (!rq_list_empty(iob.req_list) ||
1148 READ_ONCE(req->iopoll_completed))
1149 break;
1150 }
1151
1152 if (!rq_list_empty(iob.req_list))
1153 iob.complete(&iob);
1154 else if (!pos)
1155 return 0;
1156
1157 prev = start;
1158 wq_list_for_each_resume(pos, prev) {
1159 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1160
1161 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1162 if (!smp_load_acquire(&req->iopoll_completed))
1163 break;
1164 nr_events++;
1165 req->cqe.flags = io_put_kbuf(req, 0);
1166 }
1167 if (unlikely(!nr_events))
1168 return 0;
1169
1170 pos = start ? start->next : ctx->iopoll_list.first;
1171 wq_list_cut(&ctx->iopoll_list, prev, start);
1172
1173 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1174 return 0;
1175 ctx->submit_state.compl_reqs.first = pos;
1176 __io_submit_flush_completions(ctx);
1177 return nr_events;
1178 }