]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests/ublk: add UBLK_F_SHMEM_ZC support for loop target
authorMing Lei <ming.lei@redhat.com>
Tue, 31 Mar 2026 15:31:57 +0000 (23:31 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 7 Apr 2026 13:42:23 +0000 (07:42 -0600)
Add loop_queue_shmem_zc_io() which handles I/O requests marked with
UBLK_IO_F_SHMEM_ZC. When the kernel sets this flag, the request data
lives in a registered shared memory buffer — decode index + offset
from iod->addr and use the server's mmap as the I/O buffer.

The dispatch check in loop_queue_tgt_rw_io() routes SHMEM_ZC requests
to this new function, bypassing the normal buffer registration path.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://patch.msgid.link/20260331153207.3635125-7-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
tools/testing/selftests/ublk/file_backed.c

index 228af2580ac6845f1490395467cbe7c3c9315fd8..d28da98f917a64bc31b835cdfad9611ff7eaf611 100644 (file)
@@ -27,6 +27,40 @@ static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q,
        return 1;
 }
 
+/*
+ * Shared memory zero-copy I/O: when UBLK_IO_F_SHMEM_ZC is set, the
+ * request's data lives in a registered shared memory buffer. Decode
+ * index + offset from iod->addr and use the server's mmap of that
+ * buffer as the I/O buffer for the backing file.
+ */
+static int loop_queue_shmem_zc_io(struct ublk_thread *t, struct ublk_queue *q,
+                                 const struct ublksrv_io_desc *iod, int tag)
+{
+       unsigned ublk_op = ublksrv_get_op(iod);
+       enum io_uring_op op = ublk_to_uring_op(iod, 0);
+       __u64 file_offset = iod->start_sector << 9;
+       __u32 len = iod->nr_sectors << 9;
+       __u32 shmem_idx = ublk_shmem_zc_index(iod->addr);
+       __u32 shmem_off = ublk_shmem_zc_offset(iod->addr);
+       struct io_uring_sqe *sqe[1];
+       void *addr;
+
+       if (shmem_idx >= UBLK_BUF_MAX || !shmem_table[shmem_idx].mmap_base)
+               return -EINVAL;
+
+       addr = shmem_table[shmem_idx].mmap_base + shmem_off;
+
+       ublk_io_alloc_sqes(t, sqe, 1);
+       if (!sqe[0])
+               return -ENOMEM;
+
+       io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1),
+                        addr, len, file_offset);
+       io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
+       sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
+       return 1;
+}
+
 static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
                                const struct ublksrv_io_desc *iod, int tag)
 {
@@ -41,6 +75,10 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
        void *addr = io->buf_addr;
        unsigned short buf_index = ublk_io_buf_idx(t, q, tag);
 
+       /* shared memory zero-copy path */
+       if (iod->op_flags & UBLK_IO_F_SHMEM_ZC)
+               return loop_queue_shmem_zc_io(t, q, iod, tag);
+
        if (iod->op_flags & UBLK_IO_F_INTEGRITY) {
                ublk_io_alloc_sqes(t, sqe, 1);
                /* Use second backing file for integrity data */