]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring/rsrc: add & apply io_req_assign_buf_node()
authorMing Lei <ming.lei@redhat.com>
Thu, 7 Nov 2024 11:01:36 +0000 (19:01 +0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 7 Nov 2024 22:24:33 +0000 (15:24 -0700)
The following pattern becomes more and more:

+       io_req_assign_rsrc_node(&req->buf_node, node);
+       req->flags |= REQ_F_BUF_NODE;

so make it a helper, which is less fragile to use than above code, for
example, the BUF_NODE flag is even missed in current io_uring_cmd_prep().

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241107110149.890530-4-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c
io_uring/nop.c
io_uring/rsrc.h
io_uring/rw.c
io_uring/uring_cmd.c

index 2ccc2b4094310f93dcb732e77ca8eff5ddaab0ef..df1f7dc6f1c8f3059ae0737f743e9cdc93442e30 100644 (file)
@@ -1348,8 +1348,7 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
                io_ring_submit_lock(ctx, issue_flags);
                node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
                if (node) {
-                       io_req_assign_rsrc_node(&sr->notif->buf_node, node);
-                       sr->notif->flags |= REQ_F_BUF_NODE;
+                       io_req_assign_buf_node(sr->notif, node);
                        ret = 0;
                }
                io_ring_submit_unlock(ctx, issue_flags);
index bc22bcc739f37d8e258ec77fbf0ab10c36048d15..6d470d4251eef96ecf9e3b77c44388d3112901f4 100644 (file)
@@ -67,8 +67,7 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
                io_ring_submit_lock(ctx, issue_flags);
                node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
                if (node) {
-                       io_req_assign_rsrc_node(&req->buf_node, node);
-                       req->flags |= REQ_F_BUF_NODE;
+                       io_req_assign_buf_node(req, node);
                        ret = 0;
                }
                io_ring_submit_unlock(ctx, issue_flags);
index c8a64a9ed5b9e3424c72978483f80b115b594d73..7a4668deaa1a9efb3728eaae53bcb8b84792898f 100644 (file)
@@ -111,6 +111,13 @@ static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
        *dst_node = node;
 }
 
+static inline void io_req_assign_buf_node(struct io_kiocb *req,
+                                         struct io_rsrc_node *node)
+{
+       io_req_assign_rsrc_node(&req->buf_node, node);
+       req->flags |= REQ_F_BUF_NODE;
+}
+
 int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 
index e368b9afde03864a4ddd1459266254a27659841d..b62cdb5fc9368901768d6ccb083d742d5c6e1748 100644 (file)
@@ -341,8 +341,7 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
        node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
        if (!node)
                return -EFAULT;
-       io_req_assign_rsrc_node(&req->buf_node, node);
-       req->flags |= REQ_F_BUF_NODE;
+       io_req_assign_buf_node(req, node);
 
        io = req->async_data;
        ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);
index 40b8b777ba120357ea1b87bc0a3a10a572ea7016..b62965f58f30be6abd15abe4fee91f23280770d4 100644 (file)
@@ -219,7 +219,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                 * being called. This prevents destruction of the mapped buffer
                 * we'll need at actual import time.
                 */
-               io_req_assign_rsrc_node(&req->buf_node, node);
+               io_req_assign_buf_node(req, node);
        }
        ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);