]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring: open code io_req_cqe_overflow()
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 14 May 2025 08:07:20 +0000 (09:07 +0100)
committerJens Axboe <axboe@kernel.dk>
Fri, 16 May 2025 18:38:36 +0000 (12:38 -0600)
A preparation patch, just open code io_req_cqe_overflow().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c

index 43c285cd229472b10349821d5030c30d54757703..e4d6e572eabc5aaa25d6130467c4630b368849c2 100644 (file)
@@ -739,14 +739,6 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        return true;
 }
 
-static void io_req_cqe_overflow(struct io_kiocb *req)
-{
-       io_cqring_event_overflow(req->ctx, req->cqe.user_data,
-                               req->cqe.res, req->cqe.flags,
-                               req->big_cqe.extra1, req->big_cqe.extra2);
-       memset(&req->big_cqe, 0, sizeof(req->big_cqe));
-}
-
 /*
  * writes to the cq entry need to come after reading head; the
  * control dependency is enough as we're using WRITE_ONCE to
@@ -1435,11 +1427,19 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
                    unlikely(!io_fill_cqe_req(ctx, req))) {
                        if (ctx->lockless_cq) {
                                spin_lock(&ctx->completion_lock);
-                               io_req_cqe_overflow(req);
+                               io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+                                                       req->cqe.res, req->cqe.flags,
+                                                       req->big_cqe.extra1,
+                                                       req->big_cqe.extra2);
                                spin_unlock(&ctx->completion_lock);
                        } else {
-                               io_req_cqe_overflow(req);
+                               io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+                                                       req->cqe.res, req->cqe.flags,
+                                                       req->big_cqe.extra1,
+                                                       req->big_cqe.extra2);
                        }
+
+                       memset(&req->big_cqe, 0, sizeof(req->big_cqe));
                }
        }
        __io_cq_unlock_post(ctx);