]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring: open code io_account_cq_overflow()
authorPavel Begunkov <asml.silence@gmail.com>
Fri, 9 May 2025 11:12:52 +0000 (12:12 +0100)
committerJens Axboe <axboe@kernel.dk>
Fri, 9 May 2025 14:01:02 +0000 (08:01 -0600)
io_account_cq_overflow() doesn't help explaining what's going on in
there, and it'll become even smaller with following patches, so open
code it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e4333fa0d371f519e52a71148ebdffed4b8d3aa9.1746788718.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index 3d1f4b2e4536df1cda20538ceeffd428c076cb0a..6efecb46c828c012a13fbc2590b27590e9c62c15 100644 (file)
@@ -381,14 +381,6 @@ err:
        return NULL;
 }
 
-static void io_account_cq_overflow(struct io_ring_ctx *ctx)
-{
-       struct io_rings *r = ctx->rings;
-
-       WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
-       ctx->cq_extra--;
-}
-
 static void io_clean_op(struct io_kiocb *req)
 {
        if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
@@ -742,12 +734,15 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
        trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
        if (!ocqe) {
+               struct io_rings *r = ctx->rings;
+
                /*
                 * If we're in ring overflow flush mode, or in task cancel mode,
                 * or cannot allocate an overflow entry, then we need to drop it
                 * on the floor.
                 */
-               io_account_cq_overflow(ctx);
+               WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
+               ctx->cq_extra--;
                set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
                return false;
        }