]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ublk: add new feature UBLK_F_BATCH_IO
authorMing Lei <ming.lei@redhat.com>
Fri, 16 Jan 2026 14:18:44 +0000 (22:18 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jan 2026 03:05:40 +0000 (20:05 -0700)
Add new feature UBLK_F_BATCH_IO which replaces the following two
per-io commands:

- UBLK_U_IO_FETCH_REQ

- UBLK_U_IO_COMMIT_AND_FETCH_REQ

with three per-queue batch io uring_cmd:

- UBLK_U_IO_PREP_IO_CMDS

- UBLK_U_IO_COMMIT_IO_CMDS

- UBLK_U_IO_FETCH_IO_CMDS

Then ublk can deliver batch io commands to ublk server in single
multishort uring_cmd, also allows to prepare & commit multiple
commands in batch style via single uring_cmd, communication cost is
reduced a lot.

This feature also doesn't limit task context any more for all supported
commands, so any allowed uring_cmd can be issued in any task context.
ublk server implementation becomes much easier.

Meantime load balance becomes much easier to support with this feature.
The command `UBLK_U_IO_FETCH_IO_CMDS` can be issued from multiple task
contexts, so each task can adjust this command's buffer length or number
of inflight commands for controlling how much load is handled by current
task.

Later, priority parameter will be added to command `UBLK_U_IO_FETCH_IO_CMDS`
for improving load balance support.

UBLK_U_IO_NEED_GET_DATA isn't supported in batch io yet, but it may be
enabled in future via its batch pair.

Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c
include/uapi/linux/ublk_cmd.h

index 564cf44c238f420342137d152e43da7de22249d2..bec34b5ab5ab183e32328cd61cdea19c3a5bdcae 100644 (file)
@@ -79,7 +79,8 @@
                | UBLK_F_PER_IO_DAEMON \
                | UBLK_F_BUF_REG_OFF_DAEMON \
                | (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ? UBLK_F_INTEGRITY : 0) \
-               | UBLK_F_SAFE_STOP_DEV)
+               | UBLK_F_SAFE_STOP_DEV \
+               | UBLK_F_BATCH_IO)
 
 #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
                | UBLK_F_USER_RECOVERY_REISSUE \
@@ -340,12 +341,12 @@ static void ublk_batch_dispatch(struct ublk_queue *ubq,
 
 static inline bool ublk_dev_support_batch_io(const struct ublk_device *ub)
 {
-       return false;
+       return ub->dev_info.flags & UBLK_F_BATCH_IO;
 }
 
 static inline bool ublk_support_batch_io(const struct ublk_queue *ubq)
 {
-       return false;
+       return ubq->flags & UBLK_F_BATCH_IO;
 }
 
 static inline void ublk_io_lock(struct ublk_io *io)
@@ -3573,9 +3574,11 @@ static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc)
 
 static int ublk_check_batch_cmd(const struct ublk_batch_io_data *data)
 {
-
        const struct ublk_batch_io *uc = &data->header;
 
+       if (uc->q_id >= data->ub->dev_info.nr_hw_queues)
+               return -EINVAL;
+
        if (uc->nr_elem > data->ub->dev_info.queue_depth)
                return -E2BIG;
 
@@ -3655,6 +3658,9 @@ static int ublk_validate_batch_fetch_cmd(struct ublk_batch_io_data *data)
 {
        const struct ublk_batch_io *uc = &data->header;
 
+       if (uc->q_id >= data->ub->dev_info.nr_hw_queues)
+               return -EINVAL;
+
        if (!(data->cmd->flags & IORING_URING_CMD_MULTISHOT))
                return -EINVAL;
 
@@ -3667,6 +3673,35 @@ static int ublk_validate_batch_fetch_cmd(struct ublk_batch_io_data *data)
        return 0;
 }
 
+static int ublk_handle_non_batch_cmd(struct io_uring_cmd *cmd,
+                                    unsigned int issue_flags)
+{
+       const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe);
+       struct ublk_device *ub = cmd->file->private_data;
+       unsigned tag = READ_ONCE(ub_cmd->tag);
+       unsigned q_id = READ_ONCE(ub_cmd->q_id);
+       unsigned index = READ_ONCE(ub_cmd->addr);
+       struct ublk_queue *ubq;
+       struct ublk_io *io;
+
+       if (cmd->cmd_op == UBLK_U_IO_UNREGISTER_IO_BUF)
+               return ublk_unregister_io_buf(cmd, ub, index, issue_flags);
+
+       if (q_id >= ub->dev_info.nr_hw_queues)
+               return -EINVAL;
+
+       if (tag >= ub->dev_info.queue_depth)
+               return -EINVAL;
+
+       if (cmd->cmd_op != UBLK_U_IO_REGISTER_IO_BUF)
+               return -EOPNOTSUPP;
+
+       ubq = ublk_get_queue(ub, q_id);
+       io = &ubq->ios[tag];
+       return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+                       issue_flags);
+}
+
 static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
                                       unsigned int issue_flags)
 {
@@ -3691,9 +3726,6 @@ static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
                return 0;
        }
 
-       if (data.header.q_id >= ub->dev_info.nr_hw_queues)
-               goto out;
-
        switch (cmd_op) {
        case UBLK_U_IO_PREP_IO_CMDS:
                ret = ublk_check_batch_cmd(&data);
@@ -3714,7 +3746,8 @@ static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
                ret = ublk_handle_batch_fetch_cmd(&data);
                break;
        default:
-               ret = -EOPNOTSUPP;
+               ret = ublk_handle_non_batch_cmd(cmd, issue_flags);
+               break;
        }
 out:
        return ret;
@@ -4437,6 +4470,10 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
                UBLK_F_BUF_REG_OFF_DAEMON |
                UBLK_F_SAFE_STOP_DEV;
 
+       /* So far, UBLK_F_PER_IO_DAEMON won't be exposed for BATCH_IO */
+       if (ublk_dev_support_batch_io(ub))
+               ub->dev_info.flags &= ~UBLK_F_PER_IO_DAEMON;
+
        /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
        if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
                                UBLK_F_AUTO_BUF_REG))
@@ -4820,6 +4857,13 @@ static int ublk_wait_for_idle_io(struct ublk_device *ub,
        unsigned int elapsed = 0;
        int ret;
 
+       /*
+        * For UBLK_F_BATCH_IO ublk server can get notified with existing
+        * or new fetch command, so needn't wait any more
+        */
+       if (ublk_dev_support_batch_io(ub))
+               return 0;
+
        while (elapsed < timeout_ms && !signal_pending(current)) {
                unsigned int queues_cancelable = 0;
                int i;
index 70d8ebbf43266d439f86b1a02158304919c69e38..743d314913875a1b1da37fcc13872202d053c875 100644 (file)
  */
 #define UBLK_F_BUF_REG_OFF_DAEMON (1ULL << 14)
 
+/*
+ * Support the following commands for delivering & committing io command
+ * in batch.
+ *
+ *     - UBLK_U_IO_PREP_IO_CMDS
+ *     - UBLK_U_IO_COMMIT_IO_CMDS
+ *     - UBLK_U_IO_FETCH_IO_CMDS
+ *     - UBLK_U_IO_REGISTER_IO_BUF
+ *     - UBLK_U_IO_UNREGISTER_IO_BUF
+ *
+ * The existing UBLK_U_IO_FETCH_REQ, UBLK_U_IO_COMMIT_AND_FETCH_REQ and
+ * UBLK_U_IO_NEED_GET_DATA uring_cmd are not supported for this feature.
+ */
+#define UBLK_F_BATCH_IO                (1ULL << 15)
+
 /*
  * ublk device supports requests with integrity/metadata buffer.
  * Requires UBLK_F_USER_COPY.