return 0;
}
-static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+static void ublk_clear_auto_buf_reg(struct ublk_io *io,
struct io_uring_cmd *cmd,
u16 *buf_idx)
{
if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
*buf_idx = io->buf.auto_reg.index;
}
+}
+static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
+{
+ ublk_clear_auto_buf_reg(io, cmd, buf_idx);
return ublk_set_auto_buf_reg(io, cmd);
}
return 0;
}
+static inline __u64 ublk_batch_zone_lba(const struct ublk_batch_io *uc,
+ const struct ublk_elem_header *elem)
+{
+ const void *buf = elem;
+
+ if (uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA)
+ return *(const __u64 *)(buf + sizeof(*elem) +
+ 8 * !!(uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR));
+ return -1;
+}
+
static struct ublk_auto_buf_reg
ublk_batch_auto_buf_reg(const struct ublk_batch_io *uc,
const struct ublk_elem_header *elem)
return ret;
}
+static int ublk_batch_commit_io_check(const struct ublk_queue *ubq,
+ struct ublk_io *io,
+ union ublk_io_buf *buf)
+{
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ return -EBUSY;
+
+ /* BATCH_IO doesn't support UBLK_F_NEED_GET_DATA */
+ if (ublk_need_map_io(ubq) && !buf->addr)
+ return -EINVAL;
+ return 0;
+}
+
+static int ublk_batch_commit_io(struct ublk_queue *ubq,
+ const struct ublk_batch_io_data *data,
+ const struct ublk_elem_header *elem)
+{
+ struct ublk_io *io = &ubq->ios[elem->tag];
+ const struct ublk_batch_io *uc = &data->header;
+ u16 buf_idx = UBLK_INVALID_BUF_IDX;
+ union ublk_io_buf buf = { 0 };
+ struct request *req = NULL;
+ bool auto_reg = false;
+ bool compl = false;
+ int ret;
+
+ if (ublk_dev_support_auto_buf_reg(data->ub)) {
+ buf.auto_reg = ublk_batch_auto_buf_reg(uc, elem);
+ auto_reg = true;
+ } else if (ublk_dev_need_map_io(data->ub))
+ buf.addr = ublk_batch_buf_addr(uc, elem);
+
+ ublk_io_lock(io);
+ ret = ublk_batch_commit_io_check(ubq, io, &buf);
+ if (!ret) {
+ io->res = elem->result;
+ io->buf = buf;
+ req = ublk_fill_io_cmd(io, data->cmd);
+
+ if (auto_reg)
+ ublk_clear_auto_buf_reg(io, data->cmd, &buf_idx);
+ compl = ublk_need_complete_req(data->ub, io);
+ }
+ ublk_io_unlock(io);
+
+ if (unlikely(ret)) {
+ pr_warn_ratelimited("%s: dev %u queue %u io %u: commit failure %d\n",
+ __func__, data->ub->dev_info.dev_id, ubq->q_id,
+ elem->tag, ret);
+ return ret;
+ }
+
+ /* can't touch 'ublk_io' any more */
+ if (buf_idx != UBLK_INVALID_BUF_IDX)
+ io_buffer_unregister_bvec(data->cmd, buf_idx, data->issue_flags);
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = ublk_batch_zone_lba(uc, elem);
+ if (compl)
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub));
+ return 0;
+}
+
+static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data)
+{
+ const struct ublk_batch_io *uc = &data->header;
+ struct io_uring_cmd *cmd = data->cmd;
+ struct ublk_batch_io_iter iter = {
+ .uaddr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)),
+ .total = uc->nr_elem * uc->elem_bytes,
+ .elem_bytes = uc->elem_bytes,
+ };
+ int ret;
+
+ ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io);
+
+ return iter.done == 0 ? ret : iter.done;
+}
+
static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc)
{
unsigned elem_bytes = sizeof(struct ublk_elem_header);
ret = ublk_check_batch_cmd(&data);
if (ret)
goto out;
- ret = -EOPNOTSUPP;
+ ret = ublk_handle_batch_commit_cmd(&data);
break;
default:
ret = -EOPNOTSUPP;
UBLK_F_AUTO_BUF_REG))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
+ /* UBLK_F_BATCH_IO doesn't support GET_DATA */
+ if (ublk_dev_support_batch_io(ub))
+ ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
+
/*
* Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
* returning write_append_lba, which is only allowed in case of