]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring/rw: avoid punting to io-wq directly
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 18 Mar 2024 22:00:28 +0000 (22:00 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 27 Dec 2024 12:53:02 +0000 (13:53 +0100)
Commit 6e6b8c62120a22acd8cb759304e4cd2e3215d488 upstream.

kiocb_done() should care to specifically redirecting requests to io-wq.
Remove the hopping to tw to then queue an io-wq, return -EAGAIN and let
the core code io_uring handle offloading.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/413564e550fe23744a970e1783dfa566291b0e6f.1710799188.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
(cherry picked from commit 6e6b8c62120a22acd8cb759304e4cd2e3215d488)
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/rw.c

index 10070cd867b4abff07bc21b7c7cd6edda3400485..9b58ba4616d40bc8c1f7c133fe3b8a52008dee36 100644 (file)
@@ -434,7 +434,7 @@ static void io_prep_async_link(struct io_kiocb *req)
        }
 }
 
-void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
+static void io_queue_iowq(struct io_kiocb *req)
 {
        struct io_kiocb *link = io_prep_linked_timeout(req);
        struct io_uring_task *tctx = req->task->io_uring;
@@ -1913,7 +1913,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
                break;
        case IO_APOLL_ABORTED:
                io_kbuf_recycle(req, 0);
-               io_queue_iowq(req, NULL);
+               io_queue_iowq(req);
                break;
        case IO_APOLL_OK:
                break;
@@ -1962,7 +1962,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
                if (unlikely(req->ctx->drain_active))
                        io_drain_req(req);
                else
-                       io_queue_iowq(req, NULL);
+                       io_queue_iowq(req);
        }
 }
 
index 3b87f5421eb628cf07f13bc60600ad6f060de00a..a1f679b8199ea05b251bcc27d3b819f7d8c46303 100644 (file)
@@ -54,7 +54,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
 bool io_alloc_async_data(struct io_kiocb *req);
 void io_req_task_queue(struct io_kiocb *req);
-void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
 void io_req_task_complete(struct io_kiocb *req, bool *locked);
 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 void io_req_task_submit(struct io_kiocb *req, bool *locked);
index b32395d872c6856d3f03434f66df5ee13f9fff61..692663bd864fbd01d813b11a615fb6219d45774a 100644 (file)
@@ -167,12 +167,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
        return NULL;
 }
 
-static void io_req_task_queue_reissue(struct io_kiocb *req)
-{
-       req->io_task_work.func = io_queue_iowq;
-       io_req_task_work_add(req);
-}
-
 #ifdef CONFIG_BLOCK
 static bool io_resubmit_prep(struct io_kiocb *req)
 {
@@ -341,7 +335,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
        if (req->flags & REQ_F_REISSUE) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req))
-                       io_req_task_queue_reissue(req);
+                       return -EAGAIN;
                else
                        io_req_task_queue_fail(req, final_ret);
        }