]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: fix drain stalls by invalid SQE
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 9 Aug 2023 12:21:41 +0000 (13:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 19 Sep 2023 10:22:39 +0000 (12:22 +0200)
[ Upstream commit cfdbaa3a291d6fd2cb4a1a70d74e63b4abc2f5ec ]

cq_extra is protected by ->completion_lock, which io_get_sqe() misses.
The bug is harmless as it doesn't happen in real life, requires invalid
SQ index array and racing with submission, and only messes up the
userspace, i.e. stall requests execution but will be cleaned up on
ring destruction.

Fixes: 15641e427070f ("io_uring: don't cache number of dropped SQEs")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/66096d54651b1a60534bb2023f2947f09f50ef73.1691538547.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
io_uring/io_uring.c

index 7c98a820c8dd0972bb6804ef5af628f52df2fda0..e4de493bcff436647b17d54187c5909b58a6b1df 100644 (file)
@@ -7531,7 +7531,9 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
                return &ctx->sq_sqes[head];
 
        /* drop invalid entries */
+       spin_lock(&ctx->completion_lock);
        ctx->cq_extra--;
+       spin_unlock(&ctx->completion_lock);
        WRITE_ONCE(ctx->rings->sq_dropped,
                   READ_ONCE(ctx->rings->sq_dropped) + 1);
        return NULL;