From: Ming Lei Date: Tue, 31 Mar 2026 15:31:57 +0000 (+0800) Subject: selftests/ublk: add UBLK_F_SHMEM_ZC support for loop target X-Git-Tag: v7.1-rc1~233^2~20 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ec20aa44ac2629943c9b2b5524bcb55d778f746c;p=thirdparty%2Fkernel%2Flinux.git selftests/ublk: add UBLK_F_SHMEM_ZC support for loop target Add loop_queue_shmem_zc_io() which handles I/O requests marked with UBLK_IO_F_SHMEM_ZC. When the kernel sets this flag, the request data lives in a registered shared memory buffer — decode index + offset from iod->addr and use the server's mmap as the I/O buffer. The dispatch check in loop_queue_tgt_rw_io() routes SHMEM_ZC requests to this new function, bypassing the normal buffer registration path. Signed-off-by: Ming Lei Link: https://patch.msgid.link/20260331153207.3635125-7-ming.lei@redhat.com Signed-off-by: Jens Axboe --- diff --git a/tools/testing/selftests/ublk/file_backed.c b/tools/testing/selftests/ublk/file_backed.c index 228af2580ac68..d28da98f917a6 100644 --- a/tools/testing/selftests/ublk/file_backed.c +++ b/tools/testing/selftests/ublk/file_backed.c @@ -27,6 +27,40 @@ static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q, return 1; } +/* + * Shared memory zero-copy I/O: when UBLK_IO_F_SHMEM_ZC is set, the + * request's data lives in a registered shared memory buffer. Decode + * index + offset from iod->addr and use the server's mmap of that + * buffer as the I/O buffer for the backing file. + */ +static int loop_queue_shmem_zc_io(struct ublk_thread *t, struct ublk_queue *q, + const struct ublksrv_io_desc *iod, int tag) +{ + unsigned ublk_op = ublksrv_get_op(iod); + enum io_uring_op op = ublk_to_uring_op(iod, 0); + __u64 file_offset = iod->start_sector << 9; + __u32 len = iod->nr_sectors << 9; + __u32 shmem_idx = ublk_shmem_zc_index(iod->addr); + __u32 shmem_off = ublk_shmem_zc_offset(iod->addr); + struct io_uring_sqe *sqe[1]; + void *addr; + + if (shmem_idx >= UBLK_BUF_MAX || !shmem_table[shmem_idx].mmap_base) + return -EINVAL; + + addr = shmem_table[shmem_idx].mmap_base + shmem_off; + + ublk_io_alloc_sqes(t, sqe, 1); + if (!sqe[0]) + return -ENOMEM; + + io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1), + addr, len, file_offset); + io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); + sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); + return 1; +} + static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) { @@ -41,6 +75,10 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, void *addr = io->buf_addr; unsigned short buf_index = ublk_io_buf_idx(t, q, tag); + /* shared memory zero-copy path */ + if (iod->op_flags & UBLK_IO_F_SHMEM_ZC) + return loop_queue_shmem_zc_io(t, q, iod, tag); + if (iod->op_flags & UBLK_IO_F_INTEGRITY) { ublk_io_alloc_sqes(t, sqe, 1); /* Use second backing file for integrity data */