]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/register: fix ring resizing with mixed/large SQEs/CQEs
authorJens Axboe <axboe@kernel.dk>
Mon, 20 Apr 2026 19:41:38 +0000 (13:41 -0600)
committerJens Axboe <axboe@kernel.dk>
Tue, 21 Apr 2026 18:19:08 +0000 (12:19 -0600)
The ring resizing only properly handles "normal" sized SQEs or CQEs, if
there are pending entries around a resize. This normally should not be
the case, but the code is supposed to handle this regardless.

For the mixed SQE/CQE cases, the current copying works fine as they
are indexed in the same way. Each half is just copied separately. But
for fixed large SQEs and CQEs, the iteration and copy need to take that
into account.

Cc: stable@kernel.org
Fixes: 79cfe9e59c2a ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS")
Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/register.c

index 24e593332d1adf654e2c324386de979c63cb8de9..dce5e2f9cf770222341e0dd944d24e3dc132c12b 100644 (file)
@@ -599,10 +599,20 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
        if (tail - old_head > p->sq_entries)
                goto overflow;
        for (i = old_head; i < tail; i++) {
-               unsigned src_head = i & (ctx->sq_entries - 1);
-               unsigned dst_head = i & (p->sq_entries - 1);
-
-               n.sq_sqes[dst_head] = o.sq_sqes[src_head];
+               unsigned index, dst_mask, src_mask;
+               size_t sq_size;
+
+               index = i;
+               sq_size = sizeof(struct io_uring_sqe);
+               src_mask = ctx->sq_entries - 1;
+               dst_mask = p->sq_entries - 1;
+               if (ctx->flags & IORING_SETUP_SQE128) {
+                       index <<= 1;
+                       sq_size <<= 1;
+                       src_mask = (ctx->sq_entries << 1) - 1;
+                       dst_mask = (p->sq_entries << 1) - 1;
+               }
+               memcpy(&n.sq_sqes[index & dst_mask], &o.sq_sqes[index & src_mask], sq_size);
        }
        WRITE_ONCE(n.rings->sq.head, old_head);
        WRITE_ONCE(n.rings->sq.tail, tail);
@@ -619,10 +629,20 @@ overflow:
                goto out;
        }
        for (i = old_head; i < tail; i++) {
-               unsigned src_head = i & (ctx->cq_entries - 1);
-               unsigned dst_head = i & (p->cq_entries - 1);
-
-               n.rings->cqes[dst_head] = o.rings->cqes[src_head];
+               unsigned index, dst_mask, src_mask;
+               size_t cq_size;
+
+               index = i;
+               cq_size = sizeof(struct io_uring_cqe);
+               src_mask = ctx->cq_entries - 1;
+               dst_mask = p->cq_entries - 1;
+               if (ctx->flags & IORING_SETUP_CQE32) {
+                       index <<= 1;
+                       cq_size <<= 1;
+                       src_mask = (ctx->cq_entries << 1) - 1;
+                       dst_mask = (p->cq_entries << 1) - 1;
+               }
+               memcpy(&n.rings->cqes[index & dst_mask], &o.rings->cqes[index & src_mask], cq_size);
        }
        WRITE_ONCE(n.rings->cq.head, old_head);
        WRITE_ONCE(n.rings->cq.tail, tail);