We currently have a helper ublk_queue_alloc_sqes which the ublk targets
use to allocate SQEs for their own operations. However, as we move
towards decoupled ublk_queues and ublk server threads, this helper does
not make sense anymore. SQEs are allocated from rings, and we will have
one ring per thread to avoid locking. Change the SQE allocation helper
to ublk_io_alloc_sqes. Currently this still allocates SQEs from the io's
queue's ring, but when we fully decouple threads and queues, it will
allocate from the io's thread's ring instead.
Signed-off-by: Uday Shankar <ushankar@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250529-ublk_task_per_io-v8-3-e9d3b119336a@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
.tv_nsec = (long long)q->dev->private_data,
};
- ublk_queue_alloc_sqes(q, &sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), &sqe, 1);
io_uring_prep_timeout(sqe, &ts, 1, 0);
sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1);
unsigned ublk_op = ublksrv_get_op(iod);
struct io_uring_sqe *sqe[1];
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
/* bit63 marks us as tgt io */
void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr;
if (!zc || auto_zc) {
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
if (!sqe[0])
return -ENOMEM;
return 1;
}
- ublk_queue_alloc_sqes(q, sqe, 3);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
if (io_uring_sq_space_left(&q->ring) < 1)
io_uring_submit(&q->ring);
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
if (!sqe[0]) {
ublk_err("%s: run out of sqe %d, tag %d\n",
__func__, q->q_id, tag);
unsigned short flags;
unsigned short refs; /* used by target code only */
+ int tag;
+
int result;
unsigned short tgt_ios;
}
}
-static inline int ublk_queue_alloc_sqes(struct ublk_queue *q,
+static inline struct ublk_queue *ublk_io_to_queue(const struct ublk_io *io)
+{
+ return container_of(io, struct ublk_queue, ios[io->tag]);
+}
+
+static inline int ublk_io_alloc_sqes(struct ublk_io *io,
struct io_uring_sqe *sqes[], int nr_sqes)
{
- unsigned left = io_uring_sq_space_left(&q->ring);
+ struct io_uring *ring = &ublk_io_to_queue(io)->ring;
+ unsigned left = io_uring_sq_space_left(ring);
int i;
if (left < nr_sqes)
- io_uring_submit(&q->ring);
+ io_uring_submit(ring);
for (i = 0; i < nr_sqes; i++) {
- sqes[i] = io_uring_get_sqe(&q->ring);
+ sqes[i] = io_uring_get_sqe(ring);
if (!sqes[i])
return i;
}
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[3];
- ublk_queue_alloc_sqes(q, sqe, 3);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
sqe[0]->user_data = build_user_data(tag,
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[1];
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
__setup_nop_io(tag, iod, sqe[0], q->q_id);
return 1;
}
io->private_data = s;
calculate_stripe_array(conf, iod, s, base);
- ublk_queue_alloc_sqes(q, sqe, s->nr + extra);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, s->nr + extra);
if (zc) {
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
struct io_uring_sqe *sqe[NR_STRIPE];
int i;
- ublk_queue_alloc_sqes(q, sqe, conf->nr_files);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, conf->nr_files);
for (i = 0; i < conf->nr_files; i++) {
io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);