]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests: ublk: handle UBLK_U_IO_COMMIT_IO_CMDS
authorMing Lei <ming.lei@redhat.com>
Fri, 16 Jan 2026 14:18:53 +0000 (22:18 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jan 2026 03:05:41 +0000 (20:05 -0700)
Implement UBLK_U_IO_COMMIT_IO_CMDS to enable efficient batched
completion of I/O operations in the batch I/O framework.

This completes the batch I/O infrastructure by adding the commit
phase that notifies the kernel about completed I/O operations:

Key features:
- Batch multiple I/O completions into single UBLK_U_IO_COMMIT_IO_CMDS
- Dynamic commit buffer allocation and management per thread
- Automatic commit buffer preparation before processing events
- Commit buffer submission after processing completed I/Os
- Integration with existing completion workflows

Implementation details:
- ublk_batch_prep_commit() allocates and initializes commit buffers
- ublk_batch_complete_io() adds completed I/Os to current batch
- ublk_batch_commit_io_cmds() submits batched completions to kernel
- Modified ublk_process_io() to handle batch commit lifecycle
- Enhanced ublk_complete_io() to route to batch or legacy completion

The commit buffer stores completion information (tag, result, buffer
details) for multiple I/Os, then submits them all at once, significantly
reducing syscall overhead compared to individual I/O completions.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
tools/testing/selftests/ublk/batch.c
tools/testing/selftests/ublk/kublk.c
tools/testing/selftests/ublk/kublk.h

index 079cae77add131f1d6b8f4af30a3017a7dae357d..9c4db7335d44988d05d12470666bee00e1b66db8 100644 (file)
@@ -174,7 +174,7 @@ static void ublk_init_batch_cmd(struct ublk_thread *t, __u16 q_id,
        cmd->elem_bytes = elem_bytes;
        cmd->nr_elem    = nr_elem;
 
-       user_data = build_user_data(buf_idx, _IOC_NR(op), 0, q_id, 0);
+       user_data = build_user_data(buf_idx, _IOC_NR(op), nr_elem, q_id, 0);
        io_uring_sqe_set_data64(sqe, user_data);
 
        t->cmd_inflight += 1;
@@ -244,9 +244,11 @@ static void ublk_batch_compl_commit_cmd(struct ublk_thread *t,
 
        if (op == _IOC_NR(UBLK_U_IO_PREP_IO_CMDS))
                ublk_assert(cqe->res == 0);
-       else if (op == _IOC_NR(UBLK_U_IO_COMMIT_IO_CMDS))
-               ;//assert(cqe->res == t->commit_buf_size);
-       else
+       else if (op == _IOC_NR(UBLK_U_IO_COMMIT_IO_CMDS)) {
+               int nr_elem = user_data_to_tgt_data(cqe->user_data);
+
+               ublk_assert(cqe->res == t->commit_buf_elem_size * nr_elem);
+       } else
                ublk_assert(0);
 
        ublk_free_commit_buf(t, buf_idx);
@@ -264,3 +266,67 @@ void ublk_batch_compl_cmd(struct ublk_thread *t,
                return;
        }
 }
+
+void ublk_batch_commit_io_cmds(struct ublk_thread *t)
+{
+       struct io_uring_sqe *sqe;
+       unsigned short buf_idx;
+       unsigned short nr_elem = t->commit.done;
+
+       /* nothing to commit */
+       if (!nr_elem) {
+               ublk_free_commit_buf(t, t->commit.buf_idx);
+               return;
+       }
+
+       ublk_io_alloc_sqes(t, &sqe, 1);
+       buf_idx = t->commit.buf_idx;
+       sqe->addr = (__u64)t->commit.elem;
+       sqe->len = nr_elem * t->commit_buf_elem_size;
+
+       /* commit isn't per-queue command */
+       ublk_init_batch_cmd(t, t->commit.q_id, sqe, UBLK_U_IO_COMMIT_IO_CMDS,
+                       t->commit_buf_elem_size, nr_elem, buf_idx);
+       ublk_setup_commit_sqe(t, sqe, buf_idx);
+}
+
+static void ublk_batch_init_commit(struct ublk_thread *t,
+                                  unsigned short buf_idx)
+{
+       /* so far only support 1:1 queue/thread mapping */
+       t->commit.q_id = t->idx;
+       t->commit.buf_idx = buf_idx;
+       t->commit.elem = ublk_get_commit_buf(t, buf_idx);
+       t->commit.done = 0;
+       t->commit.count = t->commit_buf_size /
+               t->commit_buf_elem_size;
+}
+
+void ublk_batch_prep_commit(struct ublk_thread *t)
+{
+       unsigned short buf_idx = ublk_alloc_commit_buf(t);
+
+       ublk_assert(buf_idx != UBLKS_T_COMMIT_BUF_INV_IDX);
+       ublk_batch_init_commit(t, buf_idx);
+}
+
+void ublk_batch_complete_io(struct ublk_thread *t, struct ublk_queue *q,
+                           unsigned tag, int res)
+{
+       struct batch_commit_buf *cb = &t->commit;
+       struct ublk_batch_elem *elem = (struct ublk_batch_elem *)(cb->elem +
+                       cb->done * t->commit_buf_elem_size);
+       struct ublk_io *io = &q->ios[tag];
+
+       ublk_assert(q->q_id == t->commit.q_id);
+
+       elem->tag = tag;
+       elem->buf_index = ublk_batch_io_buf_idx(t, q, tag);
+       elem->result = res;
+
+       if (!ublk_queue_no_buf(q))
+               elem->buf_addr  = (__u64) (uintptr_t) io->buf_addr;
+
+       cb->done += 1;
+       ublk_assert(cb->done <= cb->count);
+}
index dba912a44eb3aee670182a433c80d61603707b42..bf217d30c15f9ab9a394a8bbcb9277fe64e4ba09 100644 (file)
@@ -931,7 +931,13 @@ static int ublk_process_io(struct ublk_thread *t)
                return -ENODEV;
 
        ret = io_uring_submit_and_wait(&t->ring, 1);
-       reapped = ublk_reap_events_uring(t);
+       if (ublk_thread_batch_io(t)) {
+               ublk_batch_prep_commit(t);
+               reapped = ublk_reap_events_uring(t);
+               ublk_batch_commit_io_cmds(t);
+       } else {
+               reapped = ublk_reap_events_uring(t);
+       }
 
        ublk_dbg(UBLK_DBG_THREAD, "submit result %d, reapped %d stop %d idle %d\n",
                        ret, reapped, (t->state & UBLKS_T_STOPPING),
index 08320d44c7c29b4dc775aa97e6c9c4dbc253862a..5b05f6d7d808348ab272c91e10031e7007ca005b 100644 (file)
@@ -190,6 +190,14 @@ struct ublk_batch_elem {
        __u64 buf_addr;
 };
 
+struct batch_commit_buf {
+       unsigned short q_id;
+       unsigned short buf_idx;
+       void *elem;
+       unsigned short done;
+       unsigned short count;
+};
+
 struct ublk_thread {
        struct ublk_dev *dev;
        unsigned idx;
@@ -215,6 +223,7 @@ struct ublk_thread {
        void *commit_buf;
 #define UBLKS_T_COMMIT_BUF_INV_IDX  ((unsigned short)-1)
        struct allocator commit_buf_alloc;
+       struct batch_commit_buf commit;
 
        struct io_uring ring;
 };
@@ -458,30 +467,6 @@ static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
        return &q->ios[tag];
 }
 
-static inline int ublk_complete_io(struct ublk_thread *t, struct ublk_queue *q,
-                                  unsigned tag, int res)
-{
-       struct ublk_io *io = &q->ios[tag];
-
-       ublk_mark_io_done(io, res);
-
-       return ublk_queue_io_cmd(t, io);
-}
-
-static inline void ublk_queued_tgt_io(struct ublk_thread *t, struct ublk_queue *q,
-                                     unsigned tag, int queued)
-{
-       if (queued < 0)
-               ublk_complete_io(t, q, tag, queued);
-       else {
-               struct ublk_io *io = ublk_get_io(q, tag);
-
-               t->io_inflight += queued;
-               io->tgt_ios = queued;
-               io->result = 0;
-       }
-}
-
 static inline int ublk_completed_tgt_io(struct ublk_thread *t,
                                        struct ublk_queue *q, unsigned tag)
 {
@@ -540,6 +525,42 @@ int ublk_batch_alloc_buf(struct ublk_thread *t);
 /* Free commit buffers and cleanup batch allocator */
 void ublk_batch_free_buf(struct ublk_thread *t);
 
+/* Prepare a new commit buffer for batching completed I/O operations */
+void ublk_batch_prep_commit(struct ublk_thread *t);
+/* Submit UBLK_U_IO_COMMIT_IO_CMDS with batched completed I/O operations */
+void ublk_batch_commit_io_cmds(struct ublk_thread *t);
+/* Add a completed I/O operation to the current batch commit buffer */
+void ublk_batch_complete_io(struct ublk_thread *t, struct ublk_queue *q,
+                           unsigned tag, int res);
+
+static inline int ublk_complete_io(struct ublk_thread *t, struct ublk_queue *q,
+                                  unsigned tag, int res)
+{
+       if (ublk_queue_batch_io(q)) {
+               ublk_batch_complete_io(t, q, tag, res);
+               return 0;
+       } else {
+               struct ublk_io *io = &q->ios[tag];
+
+               ublk_mark_io_done(io, res);
+               return ublk_queue_io_cmd(t, io);
+       }
+}
+
+static inline void ublk_queued_tgt_io(struct ublk_thread *t, struct ublk_queue *q,
+                                     unsigned tag, int queued)
+{
+       if (queued < 0)
+               ublk_complete_io(t, q, tag, queued);
+       else {
+               struct ublk_io *io = ublk_get_io(q, tag);
+
+               t->io_inflight += queued;
+               io->tgt_ios = queued;
+               io->result = 0;
+       }
+}
+
 extern const struct ublk_tgt_ops null_tgt_ops;
 extern const struct ublk_tgt_ops loop_tgt_ops;
 extern const struct ublk_tgt_ops stripe_tgt_ops;