]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
nvme: always punt polled uring_cmd end_io work to task_work
authorJens Axboe <axboe@kernel.dk>
Fri, 13 Jun 2025 19:37:41 +0000 (13:37 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 6 Jul 2025 09:00:16 +0000 (11:00 +0200)
Commit 9ce6c9875f3e995be5fd720b65835291f8a609b1 upstream.

Currently NVMe uring_cmd completions will complete locally, if they are
polled. This is done because those completions are always invoked from
task context. And while that is true, there's no guarantee that it's
invoked under the right ring context, or even task. If someone does
NVMe passthrough via multiple threads and with a limited number of
poll queues, then ringA may find completions from ringB. For that case,
completing the request may not be sound.

Always just punt the passthrough completions via task_work, which will
redirect the completion, if needed.

Cc: stable@vger.kernel.org
Fixes: 585079b6e425 ("nvme: wire up async polling for io passthrough commands")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/nvme/host/ioctl.c

index 4ce31f9f069475bcce369d348932ac13987f191d..5cf050e562b73442e6dd8a8dd29d1ad4f01c9c8e 100644 (file)
@@ -526,16 +526,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
        pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
 
        /*
-        * For iopoll, complete it directly.
-        * Otherwise, move the completion to task work.
+        * IOPOLL could potentially complete this request directly, but
+        * if multiple rings are polling on the same queue, then it's possible
+        * for one ring to find completions for another ring. Punting the
+        * completion via task_work will always direct it to the right
+        * location, rather than potentially complete requests for ringA
+        * under iopoll invocations from ringB.
         */
-       if (blk_rq_is_poll(req)) {
-               WRITE_ONCE(ioucmd->cookie, NULL);
-               nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
-       } else {
-               io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
-       }
-
+       io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
        return RQ_END_IO_FREE;
 }