__u32 len = iod->nr_sectors << 9;
struct io_uring_sqe *sqe[3];
void *addr = io->buf_addr;
+ unsigned short buf_index = ublk_io_buf_idx(t, q, tag);
if (iod->op_flags & UBLK_IO_F_INTEGRITY) {
ublk_io_alloc_sqes(t, sqe, 1);
len,
offset);
if (auto_zc)
- sqe[0]->buf_index = tag;
+ sqe[0]->buf_index = buf_index;
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
/* bit63 marks us as tgt io */
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
ublk_io_alloc_sqes(t, sqe, 3);
- io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index);
+ io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_index);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
sqe[0]->user_data = build_user_data(tag,
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
len,
offset);
- sqe[1]->buf_index = tag;
+ sqe[1]->buf_index = buf_index;
sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
- io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, io->buf_index);
+ io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_index);
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
return !!(iod->op_flags & UBLK_IO_F_INTEGRITY) + 2;
close(dev->fds[0]);
}
-static void ublk_set_auto_buf_reg(const struct ublk_queue *q,
+static void ublk_set_auto_buf_reg(const struct ublk_thread *t,
+ const struct ublk_queue *q,
struct io_uring_sqe *sqe,
unsigned short tag)
{
struct ublk_auto_buf_reg buf = {};
if (q->tgt_ops->buf_index)
- buf.index = q->tgt_ops->buf_index(q, tag);
+ buf.index = q->tgt_ops->buf_index(t, q, tag);
else
- buf.index = q->ios[tag].buf_index;
+ buf.index = ublk_io_buf_idx(t, q, tag);
if (ublk_queue_auto_zc_fallback(q))
buf.flags = UBLK_AUTO_BUF_REG_FALLBACK;
cmd->addr = 0;
if (ublk_queue_use_auto_zc(q))
- ublk_set_auto_buf_reg(q, sqe[0], io->tag);
+ ublk_set_auto_buf_reg(t, q, sqe[0], io->tag);
user_data = build_user_data(io->tag, _IOC_NR(cmd_op), 0, q->q_id, 0);
io_uring_sqe_set_data64(sqe[0], user_data);
void (*usage)(const struct ublk_tgt_ops *ops);
/* return buffer index for UBLK_F_AUTO_BUF_REG */
- unsigned short (*buf_index)(const struct ublk_queue *, int tag);
+ unsigned short (*buf_index)(const struct ublk_thread *t,
+ const struct ublk_queue *, int tag);
};
struct ublk_tgt {
addr[1] = 0;
}
+static inline unsigned short ublk_io_buf_idx(const struct ublk_thread *t,
+ const struct ublk_queue *q,
+ unsigned tag)
+{
+ return q->ios[tag].buf_index;
+}
+
static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
{
return &q->ios[tag];
}
static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod,
- struct io_uring_sqe *sqe, int q_id)
+ struct io_uring_sqe *sqe, int q_id, unsigned buf_idx)
{
unsigned ublk_op = ublksrv_get_op(iod);
io_uring_prep_nop(sqe);
- sqe->buf_index = tag;
+ sqe->buf_index = buf_idx;
sqe->flags |= IOSQE_FIXED_FILE;
sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT;
sqe->len = iod->nr_sectors << 9; /* injected result */
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[3];
+ unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
ublk_io_alloc_sqes(t, sqe, 3);
- io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+ io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
sqe[0]->user_data = build_user_data(tag,
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
- __setup_nop_io(tag, iod, sqe[1], q->q_id);
+ __setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx);
sqe[1]->flags |= IOSQE_IO_HARDLINK;
- io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+ io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_idx);
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
// buf register is marked as IOSQE_CQE_SKIP_SUCCESS
struct io_uring_sqe *sqe[1];
ublk_io_alloc_sqes(t, sqe, 1);
- __setup_nop_io(tag, iod, sqe[0], q->q_id);
+ __setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag));
return 1;
}
* return invalid buffer index for triggering auto buffer register failure,
* then UBLK_IO_RES_NEED_REG_BUF handling is covered
*/
-static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag)
+static unsigned short ublk_null_buf_index(const struct ublk_thread *t,
+ const struct ublk_queue *q, int tag)
{
if (ublk_queue_auto_zc_fallback(q))
return (unsigned short)-1;
- return q->ios[tag].buf_index;
+ return ublk_io_buf_idx(t, q, tag);
}
const struct ublk_tgt_ops null_tgt_ops = {
struct ublk_io *io = ublk_get_io(q, tag);
int i, extra = zc ? 2 : 0;
void *base = io->buf_addr;
+ unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
io->private_data = s;
calculate_stripe_array(conf, iod, s, base);
ublk_io_alloc_sqes(t, sqe, s->nr + extra);
if (zc) {
- io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index);
+ io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
sqe[0]->user_data = build_user_data(tag,
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
t->start << 9);
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
if (auto_zc || zc) {
- sqe[i]->buf_index = tag;
+ sqe[i]->buf_index = buf_idx;
if (zc)
sqe[i]->flags |= IOSQE_IO_HARDLINK;
}
if (zc) {
struct io_uring_sqe *unreg = sqe[s->nr + 1];
- io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, io->buf_index);
+ io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, buf_idx);
unreg->user_data = build_user_data(
tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1);
}