]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring: add REQ_F_IOPOLL
authorCaleb Sander Mateos <csander@purestorage.com>
Mon, 2 Mar 2026 17:29:10 +0000 (10:29 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 16 Mar 2026 22:14:14 +0000 (16:14 -0600)
A subsequent commit will allow uring_cmds to files that don't implement
->uring_cmd_iopoll() to be issued to IORING_SETUP_IOPOLL io_urings. This
means the ctx's IORING_SETUP_IOPOLL flag isn't sufficient to determine
whether a given request needs to be iopolled.

Introduce a request flag REQ_F_IOPOLL set in ->issue() if a request
needs to be iopolled to completion. Set the flag in io_rw_init_file()
and io_uring_cmd() for requests issued to IORING_SETUP_IOPOLL ctxs. Use
the request flag instead of IORING_SETUP_IOPOLL in places dealing with a
specific request.

A future possibility would be to add an option to enable/disable iopoll
in the io_uring SQE instead of determining it from IORING_SETUP_IOPOLL.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Reviewed-by: Anuj Gupta <anuj20.g@samsung.com>
Link: https://patch.msgid.link/20260302172914.2488599-2-csander@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
io_uring/io_uring.c
io_uring/rw.c
io_uring/uring_cmd.c

index 0b3f08adc2170c7572d211df32496f6faa454ac5..4dbd7083dd540e0a92c6cb33e5df4fce37598993 100644 (file)
@@ -550,6 +550,7 @@ enum {
        REQ_F_HAS_METADATA_BIT,
        REQ_F_IMPORT_BUFFER_BIT,
        REQ_F_SQE_COPIED_BIT,
+       REQ_F_IOPOLL_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -641,6 +642,8 @@ enum {
        REQ_F_IMPORT_BUFFER     = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
        /* ->sqe_copy() has been called, if necessary */
        REQ_F_SQE_COPIED        = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
+       /* request must be iopolled to completion (set in ->issue()) */
+       REQ_F_IOPOLL            = IO_REQ_FLAG(REQ_F_IOPOLL_BIT),
 };
 
 struct io_tw_req {
index fb5a263706be4d139a2d0ddac6bca66f49adb2d4..a610eaa5fd7c306c6e74447699d1aa02246efed0 100644 (file)
@@ -356,7 +356,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 static void io_prep_async_work(struct io_kiocb *req)
 {
        const struct io_issue_def *def = &io_issue_defs[req->opcode];
-       struct io_ring_ctx *ctx = req->ctx;
 
        if (!(req->flags & REQ_F_CREDS)) {
                req->flags |= REQ_F_CREDS;
@@ -378,7 +377,7 @@ static void io_prep_async_work(struct io_kiocb *req)
                if (should_hash && (req->file->f_flags & O_DIRECT) &&
                    (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
                        should_hash = false;
-               if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
+               if (should_hash || (req->flags & REQ_F_IOPOLL))
                        io_wq_hash_work(&req->work, file_inode(req->file));
        } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
                if (def->unbound_nonreg_file)
@@ -1419,7 +1418,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
                ret = 0;
 
                /* If the op doesn't have a file, we're not polling for it */
-               if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
+               if ((req->flags & REQ_F_IOPOLL) && def->iopoll_queue)
                        io_iopoll_req_issued(req, issue_flags);
        }
        return ret;
@@ -1435,7 +1434,7 @@ int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw)
        io_tw_lock(req->ctx, tw);
 
        WARN_ON_ONCE(!req->file);
-       if (WARN_ON_ONCE(req->ctx->flags & IORING_SETUP_IOPOLL))
+       if (WARN_ON_ONCE(req->flags & REQ_F_IOPOLL))
                return -EFAULT;
 
        ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
@@ -1533,7 +1532,7 @@ fail:
                 * wait for request slots on the block side.
                 */
                if (!needs_poll) {
-                       if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
+                       if (!(req->flags & REQ_F_IOPOLL))
                                break;
                        if (io_wq_worker_stopped())
                                break;
index 1a5f262734e8e6525f3e942b1b1d98ec9abf2915..3bdb9914e6734093011c560fe89487aa7f62a69a 100644 (file)
@@ -504,7 +504,7 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
        if (!S_ISBLK(mode) && !S_ISREG(mode))
                return false;
        if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
-           !(ctx->flags & IORING_SETUP_IOPOLL)))
+           !(req->flags & REQ_F_IOPOLL)))
                return false;
        /*
         * If ref is dying, we might be running poll reap from the exit work.
@@ -640,7 +640,7 @@ static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
                }
        }
 
-       if (req->ctx->flags & IORING_SETUP_IOPOLL)
+       if (req->flags & REQ_F_IOPOLL)
                io_complete_rw_iopoll(&rw->kiocb, ret);
        else
                io_complete_rw(&rw->kiocb, ret);
@@ -654,7 +654,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 
        if (ret >= 0 && req->flags & REQ_F_CUR_POS)
                req->file->f_pos = rw->kiocb.ki_pos;
-       if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
+       if (ret >= 0 && !(req->flags & REQ_F_IOPOLL)) {
                u32 cflags = 0;
 
                __io_complete_rw_common(req, ret);
@@ -876,6 +876,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
        if (ctx->flags & IORING_SETUP_IOPOLL) {
                if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
                        return -EOPNOTSUPP;
+               req->flags |= REQ_F_IOPOLL;
                kiocb->private = NULL;
                kiocb->ki_flags |= IOCB_HIPRI;
                req->iopoll_completed = 0;
@@ -963,7 +964,7 @@ static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
                if (io_file_can_poll(req))
                        return -EAGAIN;
                /* IOPOLL retry should happen for io-wq threads */
-               if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
+               if (!force_nonblock && !(req->flags & REQ_F_IOPOLL))
                        goto done;
                /* no retry on NONBLOCK nor RWF_NOWAIT */
                if (req->flags & REQ_F_NOWAIT)
@@ -1188,7 +1189,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
                goto done;
        if (!force_nonblock || ret2 != -EAGAIN) {
                /* IOPOLL retry should happen for io-wq threads */
-               if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
+               if (ret2 == -EAGAIN && (req->flags & REQ_F_IOPOLL))
                        goto ret_eagain;
 
                if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
index ee7b49f47cb552aad95f74c3a53e8cad467a433b..b651c63f6e2070fc986da5abb1cc88f07a6b6ba6 100644 (file)
@@ -110,7 +110,7 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
         * because iopoll completion data overlaps with the hash_node used
         * for tracking.
         */
-       if (ctx->flags & IORING_SETUP_IOPOLL)
+       if (req->flags & REQ_F_IOPOLL)
                return;
 
        if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
@@ -167,7 +167,7 @@ void __io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
                io_req_set_cqe32_extra(req, res2, 0);
        }
        io_req_uring_cleanup(req, issue_flags);
-       if (req->ctx->flags & IORING_SETUP_IOPOLL) {
+       if (req->flags & REQ_F_IOPOLL) {
                /* order with io_iopoll_req_issued() checking ->iopoll_complete */
                smp_store_release(&req->iopoll_completed, 1);
        } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
@@ -260,6 +260,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
        if (ctx->flags & IORING_SETUP_IOPOLL) {
                if (!file->f_op->uring_cmd_iopoll)
                        return -EOPNOTSUPP;
+               req->flags |= REQ_F_IOPOLL;
                issue_flags |= IO_URING_F_IOPOLL;
                req->iopoll_completed = 0;
                if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {