]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests: ublk: add ublk_io_buf_idx() for returning io buffer index
authorMing Lei <ming.lei@redhat.com>
Fri, 16 Jan 2026 14:18:50 +0000 (22:18 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jan 2026 03:05:41 +0000 (20:05 -0700)
Since UBLK_F_PER_IO_DAEMON is added, io buffer index may depend on current
thread because the common way is to use per-pthread io_ring_ctx for issuing
ublk uring_cmd.

Add one helper for returning io buffer index, so we can hide the buffer
index implementation details for target code.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
tools/testing/selftests/ublk/file_backed.c
tools/testing/selftests/ublk/kublk.c
tools/testing/selftests/ublk/kublk.h
tools/testing/selftests/ublk/null.c
tools/testing/selftests/ublk/stripe.c

index 889047bd8fa3719723593b52f3decebe42956764..228af2580ac6845f1490395467cbe7c3c9315fd8 100644 (file)
@@ -39,6 +39,7 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
        __u32 len = iod->nr_sectors << 9;
        struct io_uring_sqe *sqe[3];
        void *addr = io->buf_addr;
+       unsigned short buf_index = ublk_io_buf_idx(t, q, tag);
 
        if (iod->op_flags & UBLK_IO_F_INTEGRITY) {
                ublk_io_alloc_sqes(t, sqe, 1);
@@ -62,7 +63,7 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
                                len,
                                offset);
                if (auto_zc)
-                       sqe[0]->buf_index = tag;
+                       sqe[0]->buf_index = buf_index;
                io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
                /* bit63 marks us as tgt io */
                sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
@@ -71,7 +72,7 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
 
        ublk_io_alloc_sqes(t, sqe, 3);
 
-       io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index);
+       io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_index);
        sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
        sqe[0]->user_data = build_user_data(tag,
                        ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
@@ -79,11 +80,11 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
        io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
                        len,
                        offset);
-       sqe[1]->buf_index = tag;
+       sqe[1]->buf_index = buf_index;
        sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
        sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
 
-       io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, io->buf_index);
+       io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_index);
        sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
 
        return !!(iod->op_flags & UBLK_IO_F_INTEGRITY) + 2;
index e98999bea9b1470343b3805e28acc7a2cc82ac27..9b6f1cd04dc44843c2aa5220b1c3ace806d0c4c5 100644 (file)
@@ -605,16 +605,17 @@ static void ublk_dev_unprep(struct ublk_dev *dev)
        close(dev->fds[0]);
 }
 
-static void ublk_set_auto_buf_reg(const struct ublk_queue *q,
+static void ublk_set_auto_buf_reg(const struct ublk_thread *t,
+                                 const struct ublk_queue *q,
                                  struct io_uring_sqe *sqe,
                                  unsigned short tag)
 {
        struct ublk_auto_buf_reg buf = {};
 
        if (q->tgt_ops->buf_index)
-               buf.index = q->tgt_ops->buf_index(q, tag);
+               buf.index = q->tgt_ops->buf_index(t, q, tag);
        else
-               buf.index = q->ios[tag].buf_index;
+               buf.index = ublk_io_buf_idx(t, q, tag);
 
        if (ublk_queue_auto_zc_fallback(q))
                buf.flags = UBLK_AUTO_BUF_REG_FALLBACK;
@@ -730,7 +731,7 @@ int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
                cmd->addr       = 0;
 
        if (ublk_queue_use_auto_zc(q))
-               ublk_set_auto_buf_reg(q, sqe[0], io->tag);
+               ublk_set_auto_buf_reg(t, q, sqe[0], io->tag);
 
        user_data = build_user_data(io->tag, _IOC_NR(cmd_op), 0, q->q_id, 0);
        io_uring_sqe_set_data64(sqe[0], user_data);
index 48634d29c084f81fa032ce6dfcd9b8b6b095557e..311a75da9b2120f73dc087ec34357a54a7104228 100644 (file)
@@ -150,7 +150,8 @@ struct ublk_tgt_ops {
        void (*usage)(const struct ublk_tgt_ops *ops);
 
        /* return buffer index for UBLK_F_AUTO_BUF_REG */
-       unsigned short (*buf_index)(const struct ublk_queue *, int tag);
+       unsigned short (*buf_index)(const struct ublk_thread *t,
+                       const struct ublk_queue *, int tag);
 };
 
 struct ublk_tgt {
@@ -393,6 +394,13 @@ static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
        addr[1] = 0;
 }
 
+static inline unsigned short ublk_io_buf_idx(const struct ublk_thread *t,
+                                            const struct ublk_queue *q,
+                                            unsigned tag)
+{
+       return q->ios[tag].buf_index;
+}
+
 static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
 {
        return &q->ios[tag];
index 3aa162f08476faecbcfe7751e5215edfa43b4052..7656888f414961616b9b991e20ea2b420275121c 100644 (file)
@@ -44,12 +44,12 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
 }
 
 static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod,
-               struct io_uring_sqe *sqe, int q_id)
+               struct io_uring_sqe *sqe, int q_id, unsigned buf_idx)
 {
        unsigned ublk_op = ublksrv_get_op(iod);
 
        io_uring_prep_nop(sqe);
-       sqe->buf_index = tag;
+       sqe->buf_index = buf_idx;
        sqe->flags |= IOSQE_FIXED_FILE;
        sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT;
        sqe->len = iod->nr_sectors << 9;        /* injected result */
@@ -61,18 +61,19 @@ static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
 {
        const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
        struct io_uring_sqe *sqe[3];
+       unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
 
        ublk_io_alloc_sqes(t, sqe, 3);
 
-       io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+       io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
        sqe[0]->user_data = build_user_data(tag,
                        ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
        sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
 
-       __setup_nop_io(tag, iod, sqe[1], q->q_id);
+       __setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx);
        sqe[1]->flags |= IOSQE_IO_HARDLINK;
 
-       io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+       io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_idx);
        sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
 
        // buf register is marked as IOSQE_CQE_SKIP_SUCCESS
@@ -86,7 +87,7 @@ static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q,
        struct io_uring_sqe *sqe[1];
 
        ublk_io_alloc_sqes(t, sqe, 1);
-       __setup_nop_io(tag, iod, sqe[0], q->q_id);
+       __setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag));
        return 1;
 }
 
@@ -137,11 +138,12 @@ static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q,
  * return invalid buffer index for triggering auto buffer register failure,
  * then UBLK_IO_RES_NEED_REG_BUF handling is covered
  */
-static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag)
+static unsigned short ublk_null_buf_index(const struct ublk_thread *t,
+               const struct ublk_queue *q, int tag)
 {
        if (ublk_queue_auto_zc_fallback(q))
                return (unsigned short)-1;
-       return q->ios[tag].buf_index;
+       return ublk_io_buf_idx(t, q, tag);
 }
 
 const struct ublk_tgt_ops null_tgt_ops = {
index b967447fe5913db1ccc931f5edac218e7e9dcc63..dca819f5366ed08f0fa4ffe10bc46b840f7e0a40 100644 (file)
@@ -135,6 +135,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
        struct ublk_io *io = ublk_get_io(q, tag);
        int i, extra = zc ? 2 : 0;
        void *base = io->buf_addr;
+       unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
 
        io->private_data = s;
        calculate_stripe_array(conf, iod, s, base);
@@ -142,7 +143,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
        ublk_io_alloc_sqes(t, sqe, s->nr + extra);
 
        if (zc) {
-               io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index);
+               io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
                sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
                sqe[0]->user_data = build_user_data(tag,
                        ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
@@ -158,7 +159,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
                                t->start << 9);
                io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
                if (auto_zc || zc) {
-                       sqe[i]->buf_index = tag;
+                       sqe[i]->buf_index = buf_idx;
                        if (zc)
                                sqe[i]->flags |= IOSQE_IO_HARDLINK;
                }
@@ -168,7 +169,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
        if (zc) {
                struct io_uring_sqe *unreg = sqe[s->nr + 1];
 
-               io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, io->buf_index);
+               io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, buf_idx);
                unreg->user_data = build_user_data(
                        tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1);
        }