]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring/sqpoll: fix sqpoll error handling races
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 26 Dec 2024 16:49:23 +0000 (16:49 +0000)
committerJens Axboe <axboe@kernel.dk>
Thu, 26 Dec 2024 17:02:40 +0000 (10:02 -0700)
BUG: KASAN: slab-use-after-free in __lock_acquire+0x370b/0x4a10 kernel/locking/lockdep.c:5089
Call Trace:
<TASK>
...
_raw_spin_lock_irqsave+0x3d/0x60 kernel/locking/spinlock.c:162
class_raw_spinlock_irqsave_constructor include/linux/spinlock.h:551 [inline]
try_to_wake_up+0xb5/0x23c0 kernel/sched/core.c:4205
io_sq_thread_park+0xac/0xe0 io_uring/sqpoll.c:55
io_sq_thread_finish+0x6b/0x310 io_uring/sqpoll.c:96
io_sq_offload_create+0x162/0x11d0 io_uring/sqpoll.c:497
io_uring_create io_uring/io_uring.c:3724 [inline]
io_uring_setup+0x1728/0x3230 io_uring/io_uring.c:3806
...

Kun Hu reports that the SQPOLL creating error path has UAF, which
happens if io_uring_alloc_task_context() fails and then io_sq_thread()
manages to run and complete before the rest of error handling code,
which means io_sq_thread_finish() is looking at already killed task.

Note that this is mostly theoretical, requiring fault injection on
the allocation side to trigger in practice.

Cc: stable@vger.kernel.org
Reported-by: Kun Hu <huk23@m.fudan.edu.cn>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/0f2f1aa5729332612bd01fe0f2f385fd1f06ce7c.1735231717.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/sqpoll.c

index 6df5e649c413e39e36db6cde2a8c6745e533bea9..9e5bd79fd2b5f0e094882dfb87f5b24240e43f1d 100644 (file)
@@ -405,6 +405,7 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
                                struct io_uring_params *p)
 {
+       struct task_struct *task_to_put = NULL;
        int ret;
 
        /* Retain compatibility with failing for an invalid attach attempt */
@@ -480,6 +481,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
                }
 
                sqd->thread = tsk;
+               task_to_put = get_task_struct(tsk);
                ret = io_uring_alloc_task_context(tsk, ctx);
                wake_up_new_task(tsk);
                if (ret)
@@ -490,11 +492,15 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
                goto err;
        }
 
+       if (task_to_put)
+               put_task_struct(task_to_put);
        return 0;
 err_sqpoll:
        complete(&ctx->sq_data->exited);
 err:
        io_sq_thread_finish(ctx);
+       if (task_to_put)
+               put_task_struct(task_to_put);
        return ret;
 }