]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ublk: add new batch command UBLK_U_IO_PREP_IO_CMDS & UBLK_U_IO_COMMIT_IO_CMDS
authorMing Lei <ming.lei@redhat.com>
Fri, 16 Jan 2026 14:18:36 +0000 (22:18 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jan 2026 03:05:40 +0000 (20:05 -0700)
Add new command UBLK_U_IO_PREP_IO_CMDS, which is the batch version of
UBLK_IO_FETCH_REQ.

Add new command UBLK_U_IO_COMMIT_IO_CMDS, which is for committing io command
result only, still the batch version.

The new command header type is `struct ublk_batch_io`.

This patch doesn't actually implement these commands yet, just validates the
SQE fields.

Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c
include/uapi/linux/ublk_cmd.h

index 0f9fcd16258b2bf09d268e828f8a00ad7a12e695..22c7296d90f39d755a71e489ab5ac8d3748f3b42 100644 (file)
         UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT | \
         UBLK_PARAM_TYPE_INTEGRITY)
 
+#define UBLK_BATCH_F_ALL  \
+       (UBLK_BATCH_F_HAS_ZONE_LBA | \
+        UBLK_BATCH_F_HAS_BUF_ADDR | \
+        UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK)
+
 struct ublk_uring_cmd_pdu {
        /*
         * Store requests in same batch temporarily for queuing them to
@@ -114,6 +119,13 @@ struct ublk_uring_cmd_pdu {
        u16 tag;
 };
 
+struct ublk_batch_io_data {
+       struct ublk_device *ub;
+       struct io_uring_cmd *cmd;
+       struct ublk_batch_io header;
+       unsigned int issue_flags;
+};
+
 /*
  * io command is active: sqe cmd is received, and its cqe isn't done
  *
@@ -2687,10 +2699,83 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
        return ublk_ch_uring_cmd_local(cmd, issue_flags);
 }
 
+static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc)
+{
+       unsigned elem_bytes = sizeof(struct ublk_elem_header);
+
+       if (uc->flags & ~UBLK_BATCH_F_ALL)
+               return -EINVAL;
+
+       /* UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK requires buffer index */
+       if ((uc->flags & UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK) &&
+                       (uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR))
+               return -EINVAL;
+
+       elem_bytes += (uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA ? sizeof(u64) : 0) +
+               (uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR ? sizeof(u64) : 0);
+       if (uc->elem_bytes != elem_bytes)
+               return -EINVAL;
+       return 0;
+}
+
+static int ublk_check_batch_cmd(const struct ublk_batch_io_data *data)
+{
+
+       const struct ublk_batch_io *uc = &data->header;
+
+       if (uc->nr_elem > data->ub->dev_info.queue_depth)
+               return -E2BIG;
+
+       if ((uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA) &&
+                       !ublk_dev_is_zoned(data->ub))
+               return -EINVAL;
+
+       if ((uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR) &&
+                       !ublk_dev_need_map_io(data->ub))
+               return -EINVAL;
+
+       if ((uc->flags & UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK) &&
+                       !ublk_dev_support_auto_buf_reg(data->ub))
+               return -EINVAL;
+
+       return ublk_check_batch_cmd_flags(uc);
+}
+
 static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
                                       unsigned int issue_flags)
 {
-       return -EOPNOTSUPP;
+       const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe);
+       struct ublk_device *ub = cmd->file->private_data;
+       struct ublk_batch_io_data data = {
+               .ub  = ub,
+               .cmd = cmd,
+               .header = (struct ublk_batch_io) {
+                       .q_id = READ_ONCE(uc->q_id),
+                       .flags = READ_ONCE(uc->flags),
+                       .nr_elem = READ_ONCE(uc->nr_elem),
+                       .elem_bytes = READ_ONCE(uc->elem_bytes),
+               },
+               .issue_flags = issue_flags,
+       };
+       u32 cmd_op = cmd->cmd_op;
+       int ret = -EINVAL;
+
+       if (data.header.q_id >= ub->dev_info.nr_hw_queues)
+               goto out;
+
+       switch (cmd_op) {
+       case UBLK_U_IO_PREP_IO_CMDS:
+       case UBLK_U_IO_COMMIT_IO_CMDS:
+               ret = ublk_check_batch_cmd(&data);
+               if (ret)
+                       goto out;
+               ret = -EOPNOTSUPP;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+       }
+out:
+       return ret;
 }
 
 static inline bool ublk_check_ubuf_dir(const struct request *req,
index 90f47da4f435f0e06dbbd769d9405c327241c560..0cc58e19d401c74d877627a0d92e555ec4259d7c 100644 (file)
        _IOWR('u', 0x23, struct ublksrv_io_cmd)
 #define        UBLK_U_IO_UNREGISTER_IO_BUF     \
        _IOWR('u', 0x24, struct ublksrv_io_cmd)
+#define        UBLK_U_IO_PREP_IO_CMDS  \
+       _IOWR('u', 0x25, struct ublk_batch_io)
+#define        UBLK_U_IO_COMMIT_IO_CMDS        \
+       _IOWR('u', 0x26, struct ublk_batch_io)
 
 /* only ABORT means that no re-fetch */
 #define UBLK_IO_RES_OK                 0
@@ -544,6 +548,51 @@ struct ublksrv_io_cmd {
        };
 };
 
+struct ublk_elem_header {
+       __u16 tag;      /* IO tag */
+
+       /*
+        * Buffer index for incoming io command, only valid iff
+        * UBLK_F_AUTO_BUF_REG is set
+        */
+       __u16 buf_index;
+       __s32 result;   /* I/O completion result (commit only) */
+};
+
+/*
+ * uring_cmd buffer structure for batch commands
+ *
+ * buffer includes multiple elements, which number is specified by
+ * `nr_elem`. Each element buffer is organized in the following order:
+ *
+ * struct ublk_elem_buffer {
+ *     // Mandatory fields (8 bytes)
+ *     struct ublk_elem_header header;
+ *
+ *     // Optional fields (8 bytes each, included based on flags)
+ *
+ *     // Buffer address (if UBLK_BATCH_F_HAS_BUF_ADDR) for copying data
+ *     // between ublk request and ublk server buffer
+ *     __u64 buf_addr;
+ *
+ *     // returned Zone append LBA (if UBLK_BATCH_F_HAS_ZONE_LBA)
+ *     __u64 zone_lba;
+ * }
+ *
+ * Used for `UBLK_U_IO_PREP_IO_CMDS` and `UBLK_U_IO_COMMIT_IO_CMDS`
+ */
+struct ublk_batch_io {
+       __u16  q_id;
+#define UBLK_BATCH_F_HAS_ZONE_LBA      (1 << 0)
+#define UBLK_BATCH_F_HAS_BUF_ADDR      (1 << 1)
+#define UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK     (1 << 2)
+       __u16   flags;
+       __u16   nr_elem;
+       __u8    elem_bytes;
+       __u8    reserved;
+       __u64   reserved2;
+};
+
 struct ublk_param_basic {
 #define UBLK_ATTR_READ_ONLY            (1 << 0)
 #define UBLK_ATTR_ROTATIONAL           (1 << 1)