]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring/kbuf: propagate BUF_MORE through early buffer commit path
authorJens Axboe <axboe@kernel.dk>
Thu, 19 Mar 2026 20:29:20 +0000 (14:29 -0600)
committerJens Axboe <axboe@kernel.dk>
Thu, 19 Mar 2026 21:09:48 +0000 (15:09 -0600)
When io_should_commit() returns true (eg for non-pollable files), buffer
commit happens at buffer selection time and sel->buf_list is set to
NULL. When __io_put_kbufs() generates CQE flags at completion time, it
calls __io_put_kbuf_ring() which finds a NULL buffer_list and hence
cannot determine whether the buffer was consumed or not. This means that
IORING_CQE_F_BUF_MORE is never set for non-pollable input with
incrementally consumed buffers.

Likewise for io_buffers_select(), which always commits upfront and
discards the return value of io_kbuf_commit().

Add REQ_F_BUF_MORE to store the result of io_kbuf_commit() during early
commit. Then __io_put_kbuf_ring() can check this flag and set
IORING_F_BUF_MORE accordingy.

Reported-by: Martin Michaelis <code@mgjm.de>
Cc: stable@vger.kernel.org
Fixes: ae98dbf43d75 ("io_uring/kbuf: add support for incremental buffer consumption")
Link: https://github.com/axboe/liburing/issues/1553
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
io_uring/kbuf.c

index dd1420bfcb7350478b0932962ec11ad18cb97798..214fdbd490522d0cc100083e6cd065dad4b7d3de 100644 (file)
@@ -541,6 +541,7 @@ enum {
        REQ_F_BL_NO_RECYCLE_BIT,
        REQ_F_BUFFERS_COMMIT_BIT,
        REQ_F_BUF_NODE_BIT,
+       REQ_F_BUF_MORE_BIT,
        REQ_F_HAS_METADATA_BIT,
        REQ_F_IMPORT_BUFFER_BIT,
        REQ_F_SQE_COPIED_BIT,
@@ -626,6 +627,8 @@ enum {
        REQ_F_BUFFERS_COMMIT    = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
        /* buf node is valid */
        REQ_F_BUF_NODE          = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
+       /* incremental buffer consumption, more space available */
+       REQ_F_BUF_MORE          = IO_REQ_FLAG(REQ_F_BUF_MORE_BIT),
        /* request has read/write metadata assigned */
        REQ_F_HAS_METADATA      = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
        /*
index a4cb6752b7aa6e99fc4b014d02a53f31c7aa5fdb..f72f38d22d2b38806c68469cea84eddd95388ab0 100644 (file)
@@ -216,7 +216,8 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
        sel.addr = u64_to_user_ptr(READ_ONCE(buf->addr));
 
        if (io_should_commit(req, issue_flags)) {
-               io_kbuf_commit(req, sel.buf_list, *len, 1);
+               if (!io_kbuf_commit(req, sel.buf_list, *len, 1))
+                       req->flags |= REQ_F_BUF_MORE;
                sel.buf_list = NULL;
        }
        return sel;
@@ -349,7 +350,8 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
                 */
                if (ret > 0) {
                        req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
-                       io_kbuf_commit(req, sel->buf_list, arg->out_len, ret);
+                       if (!io_kbuf_commit(req, sel->buf_list, arg->out_len, ret))
+                               req->flags |= REQ_F_BUF_MORE;
                }
        } else {
                ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs);
@@ -395,8 +397,10 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
 
        if (bl)
                ret = io_kbuf_commit(req, bl, len, nr);
+       if (ret && (req->flags & REQ_F_BUF_MORE))
+               ret = false;
 
-       req->flags &= ~REQ_F_BUFFER_RING;
+       req->flags &= ~(REQ_F_BUFFER_RING | REQ_F_BUF_MORE);
        return ret;
 }