]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring: fix IOPOLL with passthrough I/O
authorJens Axboe <axboe@kernel.dk>
Wed, 14 Jan 2026 14:59:08 +0000 (07:59 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 15 Jan 2026 05:03:49 +0000 (22:03 -0700)
A previous commit improving IOPOLL made an incorrect assumption that
task_work isn't used with IOPOLL. This can cause crashes when doing
passthrough I/O on nvme, where queueing the completion task_work will
trample on the same memory that holds the completed list of requests.

Fix it up by shuffling the members around, so we're not sharing any
parts that end up getting used in this path.

Fixes: 3c7d76d6128a ("io_uring: IOPOLL polling improvements")
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Link: https://lore.kernel.org/linux-block/CAHj4cs_SLPj9v9w5MgfzHKy+983enPx3ZQY2kMuMJ1202DBefw@mail.gmail.com/
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Cc: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
io_uring/rw.c

index e4c804f99c305ae69e67085934c6cd8eeaf57dfa..211686ad89fd263f534154467ae786c1220907b4 100644 (file)
@@ -713,13 +713,10 @@ struct io_kiocb {
        atomic_t                        refs;
        bool                            cancel_seq_set;
 
-       /*
-        * IOPOLL doesn't use task_work, so use the ->iopoll_node list
-        * entry to manage pending iopoll requests.
-        */
        union {
                struct io_task_work     io_task_work;
-               struct list_head        iopoll_node;
+               /* For IOPOLL setup queues, with hybrid polling */
+               u64                     iopoll_start;
        };
 
        union {
@@ -728,8 +725,8 @@ struct io_kiocb {
                 * poll
                 */
                struct hlist_node       hash_node;
-               /* For IOPOLL setup queues, with hybrid polling */
-               u64                     iopoll_start;
+               /* IOPOLL completion handling */
+               struct list_head        iopoll_node;
                /* for private io_kiocb freeing */
                struct rcu_head         rcu_head;
        };
index 307f1f39d9f391b6b87ce1767c92f4653706a0e0..c33c533a267ee52237aa4f7c8f055cf25113706a 100644 (file)
@@ -1296,12 +1296,13 @@ static int io_uring_hybrid_poll(struct io_kiocb *req,
                                struct io_comp_batch *iob, unsigned int poll_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       u64 runtime, sleep_time;
+       u64 runtime, sleep_time, iopoll_start;
        int ret;
 
+       iopoll_start = READ_ONCE(req->iopoll_start);
        sleep_time = io_hybrid_iopoll_delay(ctx, req);
        ret = io_uring_classic_poll(req, iob, poll_flags);
-       runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
+       runtime = ktime_get_ns() - iopoll_start - sleep_time;
 
        /*
         * Use minimum sleep time if we're polling devices with different