]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring/kbuf: pass bgid to io_buffer_select()
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 31 Mar 2025 16:18:01 +0000 (17:18 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 21 Apr 2025 11:06:58 +0000 (05:06 -0600)
The current situation with buffer group id juggling is not ideal.
req->buf_index first stores the bgid, then it's overwritten by a buffer
id, and then it can get restored back no recycling / etc. It's not so
easy to control, and it's not handled consistently across request types
with receive requests saving and restoring the bgid it by hand.

It's a prep patch that adds a buffer group id argument to
io_buffer_select(). The caller will be responsible for stashing a copy
somewhere and passing it into the function.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/a210d6427cc3f4f42271a6853274cd5a50e56820.1743437358.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/kbuf.c
io_uring/kbuf.h
io_uring/net.c
io_uring/rw.c
io_uring/rw.h

index 953d5e74256916875e3497153fbc9a57ab958c41..f195876732be4bf50e14718b1dd082700f5e6817 100644 (file)
@@ -193,7 +193,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
 }
 
 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
-                             unsigned int issue_flags)
+                             unsigned buf_group, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer_list *bl;
@@ -201,7 +201,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 
        io_ring_submit_lock(req->ctx, issue_flags);
 
-       bl = io_buffer_get_list(ctx, req->buf_index);
+       bl = io_buffer_get_list(ctx, buf_group);
        if (likely(bl)) {
                if (bl->flags & IOBL_BUF_RING)
                        ret = io_ring_buffer_select(req, len, bl, issue_flags);
index 2ec0b983ce243c2b43abafd8c3ceebdbb0446059..09129115f3efd59dc46e0ab7e4aa269a49f2f9ee 100644 (file)
@@ -58,7 +58,7 @@ struct buf_sel_arg {
 };
 
 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
-                             unsigned int issue_flags);
+                             unsigned buf_group, unsigned int issue_flags);
 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
                      unsigned int issue_flags);
 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
index eb2130112e0e59c0c2185a8c13edc6751fe7bffe..6314b1583c8c476b8530a9f8cca8eb58a69df863 100644 (file)
@@ -407,13 +407,12 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
        if (sr->msg_flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
+       if (req->flags & REQ_F_BUFFER_SELECT)
+               sr->buf_group = req->buf_index;
        if (sr->flags & IORING_RECVSEND_BUNDLE) {
                if (req->opcode == IORING_OP_SENDMSG)
                        return -EINVAL;
-               if (!(req->flags & REQ_F_BUFFER_SELECT))
-                       return -EINVAL;
                sr->msg_flags |= MSG_WAITALL;
-               sr->buf_group = req->buf_index;
                req->buf_list = NULL;
                req->flags |= REQ_F_MULTISHOT;
        }
@@ -979,7 +978,7 @@ retry_multishot:
                void __user *buf;
                size_t len = sr->len;
 
-               buf = io_buffer_select(req, &len, issue_flags);
+               buf = io_buffer_select(req, &len, sr->buf_group, issue_flags);
                if (!buf)
                        return -ENOBUFS;
 
@@ -1089,7 +1088,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
                void __user *buf;
 
                *len = sr->len;
-               buf = io_buffer_select(req, len, issue_flags);
+               buf = io_buffer_select(req, len, sr->buf_group, issue_flags);
                if (!buf)
                        return -ENOBUFS;
                sr->buf = buf;
index 039e063f7091ebb8c3ac2e1412785747ceadf208..17a12a1cf3a63012501a573aa9f6d3c09c6d8b9c 100644 (file)
@@ -119,7 +119,7 @@ static int __io_import_rw_buffer(int ddir, struct io_kiocb *req,
                return io_import_vec(ddir, req, io, buf, sqe_len);
 
        if (io_do_buffer_select(req)) {
-               buf = io_buffer_select(req, &sqe_len, issue_flags);
+               buf = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags);
                if (!buf)
                        return -ENOBUFS;
                rw->addr = (unsigned long) buf;
@@ -253,16 +253,19 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                        int ddir)
 {
        struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+       struct io_async_rw *io;
        unsigned ioprio;
        u64 attr_type_mask;
        int ret;
 
        if (io_rw_alloc_async(req))
                return -ENOMEM;
+       io = req->async_data;
 
        rw->kiocb.ki_pos = READ_ONCE(sqe->off);
        /* used for fixed read/write too - just read unconditionally */
        req->buf_index = READ_ONCE(sqe->buf_index);
+       io->buf_group = req->buf_index;
 
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
index 81d6d9a8cf6937ace677aaf1e27071858b728fc6..129a53fe54825d7ec35108627bf0bf5030943edb 100644 (file)
@@ -16,6 +16,8 @@ struct io_async_rw {
                struct iov_iter                 iter;
                struct iov_iter_state           iter_state;
                struct iovec                    fast_iov;
+               unsigned                        buf_group;
+
                /*
                 * wpq is for buffered io, while meta fields are used with
                 * direct io