]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
blktrace: only calculate trace length once
authorJohannes Thumshirn <johannes.thumshirn@wdc.com>
Wed, 22 Oct 2025 11:41:00 +0000 (13:41 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 22 Oct 2025 17:14:05 +0000 (11:14 -0600)
De-duplicate the calculation of the trace length instead of doing the
calculation twice, once for calling trace_buffer_lock_reserve() and once
for calling relay_reserve().

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
kernel/trace/blktrace.c

index 6941145b50589c2ab467193c7dca9db759c7eeb7..bc4b885f2ceca7c842b632cebf46df9117987355 100644 (file)
@@ -76,13 +76,14 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
        int cpu = smp_processor_id();
        bool blk_tracer = blk_tracer_enabled;
        ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
+       size_t trace_len;
 
+       trace_len = sizeof(*t) + cgid_len + len;
        if (blk_tracer) {
                buffer = blk_tr->array_buffer.buffer;
                trace_ctx = tracing_gen_ctx_flags(0);
                event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
-                                                 sizeof(*t) + len + cgid_len,
-                                                 trace_ctx);
+                                                 trace_len, trace_ctx);
                if (!event)
                        return;
                t = ring_buffer_event_data(event);
@@ -92,7 +93,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
        if (!bt->rchan)
                return;
 
-       t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
+       t = relay_reserve(bt->rchan, trace_len);
        if (t) {
                t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
                t->time = ktime_to_ns(ktime_get());
@@ -228,6 +229,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        bool blk_tracer = blk_tracer_enabled;
        ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
        const enum req_op op = opf & REQ_OP_MASK;
+       size_t trace_len;
 
        if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
                return;
@@ -250,14 +252,14 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
                return;
        cpu = raw_smp_processor_id();
 
+       trace_len = sizeof(*t) + pdu_len + cgid_len;
        if (blk_tracer) {
                tracing_record_cmdline(current);
 
                buffer = blk_tr->array_buffer.buffer;
                trace_ctx = tracing_gen_ctx_flags(0);
                event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
-                                                 sizeof(*t) + pdu_len + cgid_len,
-                                                 trace_ctx);
+                                                 trace_len, trace_ctx);
                if (!event)
                        return;
                t = ring_buffer_event_data(event);
@@ -273,7 +275,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
         * from coming in and stepping on our toes.
         */
        local_irq_save(flags);
-       t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
+       t = relay_reserve(bt->rchan, trace_len);
        if (t) {
                sequence = per_cpu_ptr(bt->sequence, cpu);