]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: fix drain stalls by invalid SQE
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 9 Aug 2023 12:21:41 +0000 (13:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Sep 2023 07:48:17 +0000 (09:48 +0200)
[ Upstream commit cfdbaa3a291d6fd2cb4a1a70d74e63b4abc2f5ec ]

cq_extra is protected by ->completion_lock, which io_get_sqe() misses.
The bug is harmless as it doesn't happen in real life, requires invalid
SQ index array and racing with submission, and only messes up the
userspace, i.e. stall requests execution but will be cleaned up on
ring destruction.

Fixes: 15641e427070f ("io_uring: don't cache number of dropped SQEs")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/66096d54651b1a60534bb2023f2947f09f50ef73.1691538547.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
io_uring/io_uring.c

index a57bdf336ca8a898bda090d4afe9f886ebbe12e2..d029e578bdfe1f8d927d0563fccc8f419d015904 100644 (file)
@@ -2468,7 +2468,9 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
        }
 
        /* drop invalid entries */
+       spin_lock(&ctx->completion_lock);
        ctx->cq_extra--;
+       spin_unlock(&ctx->completion_lock);
        WRITE_ONCE(ctx->rings->sq_dropped,
                   READ_ONCE(ctx->rings->sq_dropped) + 1);
        return false;