]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring/net: fix io_req_post_cqe abuse by send bundle
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 27 Mar 2025 09:57:27 +0000 (09:57 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 20 Apr 2025 08:18:07 +0000 (10:18 +0200)
commit 6889ae1b4df1579bcdffef023e2ea9a982565dff upstream.

[  114.987980][ T5313] WARNING: CPU: 6 PID: 5313 at io_uring/io_uring.c:872 io_req_post_cqe+0x12e/0x4f0
[  114.991597][ T5313] RIP: 0010:io_req_post_cqe+0x12e/0x4f0
[  115.001880][ T5313] Call Trace:
[  115.002222][ T5313]  <TASK>
[  115.007813][ T5313]  io_send+0x4fe/0x10f0
[  115.009317][ T5313]  io_issue_sqe+0x1a6/0x1740
[  115.012094][ T5313]  io_wq_submit_work+0x38b/0xed0
[  115.013223][ T5313]  io_worker_handle_work+0x62a/0x1600
[  115.013876][ T5313]  io_wq_worker+0x34f/0xdf0

As the comment states, io_req_post_cqe() should only be used by
multishot requests, i.e. REQ_F_APOLL_MULTISHOT, which bundled sends are
not. Add a flag signifying whether a request wants to post multiple
CQEs. Eventually REQ_F_APOLL_MULTISHOT should imply the new flag, but
that's left out for simplicity.

Cc: stable@vger.kernel.org
Fixes: a05d1f625c7aa ("io_uring/net: support bundles for send")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8b611dbb54d1cd47a88681f5d38c84d0c02bc563.1743067183.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/io_uring_types.h
io_uring/io_uring.c
io_uring/net.c

index fd4cdb0860a28abbf3f698feea49d6e4090b90bf..b6210c44346dee1b2b0c714c1d9f3890666532d7 100644 (file)
@@ -470,6 +470,7 @@ enum {
        REQ_F_SKIP_LINK_CQES_BIT,
        REQ_F_SINGLE_POLL_BIT,
        REQ_F_DOUBLE_POLL_BIT,
+       REQ_F_MULTISHOT_BIT,
        REQ_F_APOLL_MULTISHOT_BIT,
        REQ_F_CLEAR_POLLIN_BIT,
        /* keep async read/write and isreg together and in order */
@@ -545,6 +546,8 @@ enum {
        REQ_F_SINGLE_POLL       = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
        /* double poll may active */
        REQ_F_DOUBLE_POLL       = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
+       /* request posts multiple completions, should be set at prep time */
+       REQ_F_MULTISHOT         = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
        /* fast poll multishot mode */
        REQ_F_APOLL_MULTISHOT   = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
        /* recvmsg special flag, clear EPOLLIN */
index 0b0dfef934803627448ff52a10f78f7379822a09..dcfdaf2686c0b35bad4a9fb23c34755be253965b 100644 (file)
@@ -1818,7 +1818,7 @@ fail:
         * Don't allow any multishot execution from io-wq. It's more restrictive
         * than necessary and also cleaner.
         */
-       if (req->flags & REQ_F_APOLL_MULTISHOT) {
+       if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
                err = -EBADFD;
                if (!io_file_can_poll(req))
                        goto fail;
@@ -1829,7 +1829,7 @@ fail:
                                goto fail;
                        return;
                } else {
-                       req->flags &= ~REQ_F_APOLL_MULTISHOT;
+                       req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
                }
        }
 
index b10afcc00b3b0e635480b4fd493e0daa18af86d7..f4835e14c77972b06f27a46274adcd1df476fc8e 100644 (file)
@@ -441,6 +441,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                sr->msg_flags |= MSG_WAITALL;
                sr->buf_group = req->buf_index;
                req->buf_list = NULL;
+               req->flags |= REQ_F_MULTISHOT;
        }
 
 #ifdef CONFIG_COMPAT