From: Jens Axboe Date: Wed, 8 Apr 2026 17:31:38 +0000 (-0600) Subject: io_uring/tctx: clean up __io_uring_add_tctx_node() error handling X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7880174e1e5e88944ea75cf871efd77ec5e3ef51;p=thirdparty%2Fkernel%2Flinux.git io_uring/tctx: clean up __io_uring_add_tctx_node() error handling Refactor __io_uring_add_tctx_node() so that on error it never leaves current->io_uring pointing at a half-setup tctx. This moves the assignment of current->io_uring to the end of the function post any failure points. Separate out the node installation into io_tctx_install_node() to further clean this up. Signed-off-by: Jens Axboe --- diff --git a/io_uring/tctx.c b/io_uring/tctx.c index e5cef6a8dde00..61533f30494f5 100644 --- a/io_uring/tctx.c +++ b/io_uring/tctx.c @@ -108,10 +108,37 @@ __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *tas return tctx; } +static int io_tctx_install_node(struct io_ring_ctx *ctx, + struct io_uring_task *tctx) +{ + struct io_tctx_node *node; + int ret; + + if (xa_load(&tctx->xa, (unsigned long)ctx)) + return 0; + + node = kmalloc_obj(*node); + if (!node) + return -ENOMEM; + node->ctx = ctx; + node->task = current; + + ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, + node, GFP_KERNEL)); + if (ret) { + kfree(node); + return ret; + } + + mutex_lock(&ctx->tctx_lock); + list_add(&node->ctx_node, &ctx->tctx_list); + mutex_unlock(&ctx->tctx_lock); + return 0; +} + int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) { struct io_uring_task *tctx = current->io_uring; - struct io_tctx_node *node; int ret; if (unlikely(!tctx)) { @@ -119,14 +146,13 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) if (IS_ERR(tctx)) return PTR_ERR(tctx); - current->io_uring = tctx; if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) { unsigned int limits[2] = { ctx->iowq_limits[0], ctx->iowq_limits[1], }; ret = io_wq_max_workers(tctx->io_wq, limits); if (ret) - return ret; + goto err_free; } } @@ -137,25 +163,19 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) */ if (tctx->io_wq) io_wq_set_exit_on_idle(tctx->io_wq, false); - if (!xa_load(&tctx->xa, (unsigned long)ctx)) { - node = kmalloc_obj(*node); - if (!node) - return -ENOMEM; - node->ctx = ctx; - node->task = current; - - ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, - node, GFP_KERNEL)); - if (ret) { - kfree(node); - return ret; - } - mutex_lock(&ctx->tctx_lock); - list_add(&node->ctx_node, &ctx->tctx_list); - mutex_unlock(&ctx->tctx_lock); + ret = io_tctx_install_node(ctx, tctx); + if (!ret) { + current->io_uring = tctx; + return 0; } - return 0; + if (!current->io_uring) { +err_free: + io_wq_put_and_exit(tctx->io_wq); + percpu_counter_destroy(&tctx->inflight); + kfree(tctx); + } + return ret; } int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)