return 0;
}
-static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag)
+static int ublk_fault_inject_queue_io(struct ublk_thread *t,
+ struct ublk_queue *q, int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe;
.tv_nsec = (long long)q->dev->private_data,
};
- ublk_io_alloc_sqes(ublk_get_io(q, tag), &sqe, 1);
+ ublk_io_alloc_sqes(t, &sqe, 1);
io_uring_prep_timeout(sqe, &ts, 1, 0);
sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1);
return 0;
}
-static void ublk_fault_inject_tgt_io_done(struct ublk_queue *q,
+static void ublk_fault_inject_tgt_io_done(struct ublk_thread *t,
+ struct ublk_queue *q,
const struct io_uring_cqe *cqe)
{
unsigned tag = user_data_to_tag(cqe->user_data);
assert(0);
}
-static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
+static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q,
+ const struct ublksrv_io_desc *iod, int tag)
{
unsigned ublk_op = ublksrv_get_op(iod);
struct io_uring_sqe *sqe[1];
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
+ ublk_io_alloc_sqes(t, sqe, 1);
io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
/* bit63 marks us as tgt io */
return 1;
}
-static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
+static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
+ const struct ublksrv_io_desc *iod, int tag)
{
unsigned ublk_op = ublksrv_get_op(iod);
unsigned zc = ublk_queue_use_zc(q);
void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr;
if (!zc || auto_zc) {
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
+ ublk_io_alloc_sqes(t, sqe, 1);
if (!sqe[0])
return -ENOMEM;
return 1;
}
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
+ ublk_io_alloc_sqes(t, sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
return 2;
}
-static int loop_queue_tgt_io(struct ublk_queue *q, int tag)
+static int loop_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
unsigned ublk_op = ublksrv_get_op(iod);
switch (ublk_op) {
case UBLK_IO_OP_FLUSH:
- ret = loop_queue_flush_io(q, iod, tag);
+ ret = loop_queue_flush_io(t, q, iod, tag);
break;
case UBLK_IO_OP_WRITE_ZEROES:
case UBLK_IO_OP_DISCARD:
break;
case UBLK_IO_OP_READ:
case UBLK_IO_OP_WRITE:
- ret = loop_queue_tgt_rw_io(q, iod, tag);
+ ret = loop_queue_tgt_rw_io(t, q, iod, tag);
break;
default:
ret = -EINVAL;
return ret;
}
-static int ublk_loop_queue_io(struct ublk_queue *q, int tag)
+static int ublk_loop_queue_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
- int queued = loop_queue_tgt_io(q, tag);
+ int queued = loop_queue_tgt_io(t, q, tag);
ublk_queued_tgt_io(q, tag, queued);
return 0;
}
-static void ublk_loop_io_done(struct ublk_queue *q,
+static void ublk_loop_io_done(struct ublk_thread *t, struct ublk_queue *q,
const struct io_uring_cqe *cqe)
{
unsigned tag = user_data_to_tag(cqe->user_data);
if (io_uring_sq_space_left(&t->ring) < 1)
io_uring_submit(&t->ring);
- ublk_io_alloc_sqes(io, sqe, 1);
+ ublk_io_alloc_sqes(t, sqe, 1);
if (!sqe[0]) {
ublk_err("%s: run out of sqe. thread %u, tag %d\n",
__func__, t->idx, io->tag);
return (t->state & UBLKSRV_THREAD_STOPPING) && ublk_thread_is_idle(t);
}
-static inline void ublksrv_handle_tgt_cqe(struct ublk_queue *q,
- struct io_uring_cqe *cqe)
+static inline void ublksrv_handle_tgt_cqe(struct ublk_thread *t,
+ struct ublk_queue *q,
+ struct io_uring_cqe *cqe)
{
if (cqe->res < 0 && cqe->res != -EAGAIN)
ublk_err("%s: failed tgt io: res %d qid %u tag %u, cmd_op %u\n",
user_data_to_op(cqe->user_data));
if (q->tgt_ops->tgt_io_done)
- q->tgt_ops->tgt_io_done(q, cqe);
+ q->tgt_ops->tgt_io_done(t, q, cqe);
}
static void ublk_handle_cqe(struct ublk_thread *t,
/* Don't retrieve io in case of target io */
if (is_target_io(cqe->user_data)) {
- ublksrv_handle_tgt_cqe(q, cqe);
+ ublksrv_handle_tgt_cqe(t, q, cqe);
return;
}
if (cqe->res == UBLK_IO_RES_OK) {
assert(tag < q->q_depth);
if (q->tgt_ops->queue_io)
- q->tgt_ops->queue_io(q, tag);
+ q->tgt_ops->queue_io(t, q, tag);
} else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) {
io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE;
ublk_queue_io_cmd(io);
int (*init_tgt)(const struct dev_ctx *ctx, struct ublk_dev *);
void (*deinit_tgt)(struct ublk_dev *);
- int (*queue_io)(struct ublk_queue *, int tag);
- void (*tgt_io_done)(struct ublk_queue *, const struct io_uring_cqe *);
+ int (*queue_io)(struct ublk_thread *, struct ublk_queue *, int tag);
+ void (*tgt_io_done)(struct ublk_thread *, struct ublk_queue *,
+ const struct io_uring_cqe *);
/*
* Target specific command line handling
return container_of(io, struct ublk_queue, ios[io->tag]);
}
-static inline int ublk_io_alloc_sqes(struct ublk_io *io,
+static inline int ublk_io_alloc_sqes(struct ublk_thread *t,
struct io_uring_sqe *sqes[], int nr_sqes)
{
- struct io_uring *ring = &io->t->ring;
+ struct io_uring *ring = &t->ring;
unsigned left = io_uring_sq_space_left(ring);
int i;
sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1);
}
-static int null_queue_zc_io(struct ublk_queue *q, int tag)
+static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[3];
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
+ ublk_io_alloc_sqes(t, sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[0]->user_data = build_user_data(tag,
return 2;
}
-static int null_queue_auto_zc_io(struct ublk_queue *q, int tag)
+static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[1];
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
+ ublk_io_alloc_sqes(t, sqe, 1);
__setup_nop_io(tag, iod, sqe[0], q->q_id);
return 1;
}
-static void ublk_null_io_done(struct ublk_queue *q,
- const struct io_uring_cqe *cqe)
+static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q,
+ const struct io_uring_cqe *cqe)
{
unsigned tag = user_data_to_tag(cqe->user_data);
unsigned op = user_data_to_op(cqe->user_data);
ublk_complete_io(q, tag, io->result);
}
-static int ublk_null_queue_io(struct ublk_queue *q, int tag)
+static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
unsigned auto_zc = ublk_queue_use_auto_zc(q);
int queued;
if (auto_zc && !ublk_io_auto_zc_fallback(iod))
- queued = null_queue_auto_zc_io(q, tag);
+ queued = null_queue_auto_zc_io(t, q, tag);
else if (zc)
- queued = null_queue_zc_io(q, tag);
+ queued = null_queue_zc_io(t, q, tag);
else {
ublk_complete_io(q, tag, iod->nr_sectors << 9);
return 0;
assert(0);
}
-static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
+static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
+ const struct ublksrv_io_desc *iod, int tag)
{
const struct stripe_conf *conf = get_chunk_shift(q);
unsigned auto_zc = (ublk_queue_use_auto_zc(q) != 0);
io->private_data = s;
calculate_stripe_array(conf, iod, s, base);
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, s->nr + extra);
+ ublk_io_alloc_sqes(t, sqe, s->nr + extra);
if (zc) {
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, io->buf_index);
return s->nr + zc;
}
-static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
+static int handle_flush(struct ublk_thread *t, struct ublk_queue *q,
+ const struct ublksrv_io_desc *iod, int tag)
{
const struct stripe_conf *conf = get_chunk_shift(q);
struct io_uring_sqe *sqe[NR_STRIPE];
int i;
- ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, conf->nr_files);
+ ublk_io_alloc_sqes(t, sqe, conf->nr_files);
for (i = 0; i < conf->nr_files; i++) {
io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
return conf->nr_files;
}
-static int stripe_queue_tgt_io(struct ublk_queue *q, int tag)
+static int stripe_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
unsigned ublk_op = ublksrv_get_op(iod);
switch (ublk_op) {
case UBLK_IO_OP_FLUSH:
- ret = handle_flush(q, iod, tag);
+ ret = handle_flush(t, q, iod, tag);
break;
case UBLK_IO_OP_WRITE_ZEROES:
case UBLK_IO_OP_DISCARD:
break;
case UBLK_IO_OP_READ:
case UBLK_IO_OP_WRITE:
- ret = stripe_queue_tgt_rw_io(q, iod, tag);
+ ret = stripe_queue_tgt_rw_io(t, q, iod, tag);
break;
default:
ret = -EINVAL;
return ret;
}
-static int ublk_stripe_queue_io(struct ublk_queue *q, int tag)
+static int ublk_stripe_queue_io(struct ublk_thread *t, struct ublk_queue *q,
+ int tag)
{
- int queued = stripe_queue_tgt_io(q, tag);
+ int queued = stripe_queue_tgt_io(t, q, tag);
ublk_queued_tgt_io(q, tag, queued);
return 0;
}
-static void ublk_stripe_io_done(struct ublk_queue *q,
- const struct io_uring_cqe *cqe)
+static void ublk_stripe_io_done(struct ublk_thread *t, struct ublk_queue *q,
+ const struct io_uring_cqe *cqe)
{
unsigned tag = user_data_to_tag(cqe->user_data);
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);