]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring: clean up cqe trace points
authorPavel Begunkov <asml.silence@gmail.com>
Fri, 18 Oct 2024 16:14:00 +0000 (17:14 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 29 Oct 2024 19:43:27 +0000 (13:43 -0600)
We have too many helpers posting CQEs, instead of tracing completion
events before filling in a CQE and thus having to pass all the data,
set the CQE first, pass it to the tracing helper and let it extract
everything it needs.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b83c1ca9ee5aed2df0f3bb743bf5ed699cce4c86.1729267437.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
include/trace/events/io_uring.h
io_uring/io_uring.c
io_uring/io_uring.h

index 9c7e1d3f06e5230b323062e598ea9f981bdb4058..39108714466639415f2d22979a37e55abbfbab9f 100644 (file)
@@ -662,4 +662,9 @@ struct io_overflow_cqe {
        struct io_uring_cqe cqe;
 };
 
+static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
+{
+       return ctx->flags & IORING_SETUP_CQE32;
+}
+
 #endif
index 412c9c210a329a2675d88278f2753d80755b8136..fb81c533b3106554cc7f1d21b5aba8124925fb58 100644 (file)
@@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link,
  * io_uring_complete - called when completing an SQE
  *
  * @ctx:               pointer to a ring context structure
- * @req:               pointer to a submitted request
- * @user_data:         user data associated with the request
- * @res:               result of the request
- * @cflags:            completion flags
- * @extra1:            extra 64-bit data for CQE32
- * @extra2:            extra 64-bit data for CQE32
- *
+ * @req:               (optional) pointer to a submitted request
+ * @cqe:               pointer to the filled in CQE being posted
  */
 TRACE_EVENT(io_uring_complete,
 
-       TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
-                u64 extra1, u64 extra2),
+TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
 
-       TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
+       TP_ARGS(ctx, req, cqe),
 
        TP_STRUCT__entry (
                __field(  void *,       ctx             )
@@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete,
        TP_fast_assign(
                __entry->ctx            = ctx;
                __entry->req            = req;
-               __entry->user_data      = user_data;
-               __entry->res            = res;
-               __entry->cflags         = cflags;
-               __entry->extra1         = extra1;
-               __entry->extra2         = extra2;
+               __entry->user_data      = cqe->user_data;
+               __entry->res            = cqe->res;
+               __entry->cflags         = cqe->flags;
+               __entry->extra1         = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0;
+               __entry->extra2         = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0;
        ),
 
        TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
index fa9d31034c626e74f07ff7d1a27d194adb1a7d38..58b401900b4143773c7d3df345c658521ac3ca90 100644 (file)
@@ -828,8 +828,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
         * the ring.
         */
        if (likely(io_get_cqe(ctx, &cqe))) {
-               trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
-
                WRITE_ONCE(cqe->user_data, user_data);
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, cflags);
@@ -838,6 +836,8 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
                        WRITE_ONCE(cqe->big_cqe[0], 0);
                        WRITE_ONCE(cqe->big_cqe[1], 0);
                }
+
+               trace_io_uring_complete(ctx, NULL, cqe);
                return true;
        }
        return false;
index 70b6675941ff765076a4cf33e6a41a8e69babda1..9cd9a127e9ed5c04b905efa2f0ec0ac6b1fb4820 100644 (file)
@@ -189,16 +189,15 @@ static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
        if (unlikely(!io_get_cqe(ctx, &cqe)))
                return false;
 
-       if (trace_io_uring_complete_enabled())
-               trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
-                                       req->cqe.res, req->cqe.flags,
-                                       req->big_cqe.extra1, req->big_cqe.extra2);
 
        memcpy(cqe, &req->cqe, sizeof(*cqe));
        if (ctx->flags & IORING_SETUP_CQE32) {
                memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
                memset(&req->big_cqe, 0, sizeof(req->big_cqe));
        }
+
+       if (trace_io_uring_complete_enabled())
+               trace_io_uring_complete(req->ctx, req, cqe);
        return true;
 }