]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xhci: show DMA address of TRB when tracing TRBs
authorMathias Nyman <mathias.nyman@linux.intel.com>
Wed, 6 Nov 2024 10:14:31 +0000 (12:14 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Nov 2024 12:26:14 +0000 (13:26 +0100)
The DMA address of a queued TRB is essential when looking at traces as
both transfer events and command completion events refer to the command
or transfer based on its DMA address.

Previously the TRB address was figured out from xhci_inc_enq and
xhci_inc_deq trace entries seen after queuing or handlong a TRB.

Now that DMA address is shown in TRB tracing we can get rid of most of the
xhci_inc_enq and xhci_inc_deq traces thus decreasing trace size.

Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20241106101459.775897-6-mathias.nyman@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-trace.h

index 241d7aa1fbc20f7e556ada31033bf23734637c42..408082372be1e2eea0702db17858923bad658428 100644 (file)
@@ -248,8 +248,9 @@ xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
        trb->generic.field[2]   = cpu_to_le32(field3);
        trb->generic.field[3]   = cpu_to_le32(field4);
 
-       trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
-
+       trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic,
+                                      xhci_trb_virt_to_dma(ring->enq_seg,
+                                                           ring->enqueue));
        ring->num_trbs_free--;
        next = ++(ring->enqueue);
        if (TRB_TYPE_LINK_LE32(next->link.control)) {
@@ -747,7 +748,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
                return;
        }
 
-       trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
+       trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma);
 
        switch (comp_code) {
        case COMP_SUCCESS:
@@ -898,7 +899,9 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
                 */
                rmb();
 
-               trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
+               trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic,
+                                           xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
+                                                                dbc->ring_evt->dequeue));
 
                switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
                case TRB_TYPE(TRB_PORT_STATUS):
index f1176c270c43a89254712c372825f77570caf391..3c19f58fcefd7e9d30816cfd691eed1fa20a6954 100644 (file)
@@ -1714,7 +1714,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
        cmd_dma = le64_to_cpu(event->cmd_trb);
        cmd_trb = xhci->cmd_ring->dequeue;
 
-       trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
+       trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma);
 
        cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
 
@@ -2886,7 +2886,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                ep_ring->last_td_was_short = false;
 
        ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
-       trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb);
+       trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
 
        /*
         * No-op TRB could trigger interrupts in a case where a URB was killed
@@ -2936,7 +2936,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter
 {
        u32 trb_type;
 
-       trace_xhci_handle_event(ir->event_ring, &event->generic);
+       trace_xhci_handle_event(ir->event_ring, &event->generic,
+                               xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+                                                    ir->event_ring->dequeue));
 
        /*
         * Barrier between reading the TRB_CYCLE (valid) flag before, and any
@@ -3159,7 +3161,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        wmb();
        trb->field[3] = cpu_to_le32(field4);
 
-       trace_xhci_queue_trb(ring, trb);
+       trace_xhci_queue_trb(ring, trb,
+                            xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue));
 
        inc_enq(xhci, ring, more_trbs_coming);
 }
index 24405315ffe6d9de943f6db88f1184702705442c..cc916e58a3295415200e17917120ccb386a54767 100644 (file)
@@ -108,9 +108,10 @@ DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
 );
 
 DECLARE_EVENT_CLASS(xhci_log_trb,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb),
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma),
        TP_STRUCT__entry(
+               __field(dma_addr_t, dma)
                __field(u32, type)
                __field(u32, field0)
                __field(u32, field1)
@@ -118,51 +119,54 @@ DECLARE_EVENT_CLASS(xhci_log_trb,
                __field(u32, field3)
        ),
        TP_fast_assign(
+               __entry->dma = dma;
                __entry->type = ring->type;
                __entry->field0 = le32_to_cpu(trb->field[0]);
                __entry->field1 = le32_to_cpu(trb->field[1]);
                __entry->field2 = le32_to_cpu(trb->field[2]);
                __entry->field3 = le32_to_cpu(trb->field[3]);
        ),
-       TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
+       TP_printk("%s: @%pad %s",
+                 xhci_ring_type_string(__entry->type), &__entry->dma,
                  xhci_decode_trb(__get_buf(XHCI_MSG_MAX), XHCI_MSG_MAX, __entry->field0,
                                  __entry->field1, __entry->field2, __entry->field3)
        )
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
+
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
 );
 
 DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
-       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
-       TP_ARGS(ring, trb)
+       TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+       TP_ARGS(ring, trb, dma)
 );
 
 DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,