]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
usb: xhci: improve TR Dequeue Pointer mask
authorNiklas Neronin <niklas.neronin@linux.intel.com>
Wed, 17 Sep 2025 21:07:23 +0000 (00:07 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Sep 2025 07:53:11 +0000 (09:53 +0200)
Address the naming and usage of the TR Dequeue Pointer mask in the xhci
driver. The Endpoint Context Field at offset 0x08 is defined as follows:
 Bit 0 Dequeue Cycle State (DCS)
 Bits 3:1 RsvdZ (Reserved and Zero)
 Bits 63:4 TR Dequeue Pointer

When extracting the TR Dequeue Pointer for an Endpoint without Streams,
in xhci_handle_cmd_set_deq(), the inverted Dequeue Cycle State mask
(~EP_CTX_CYCLE_MASK) is used, inadvertently including the Reserved bits.
Although bits 3:1 are typically zero, using the incorrect mask could cause
issues.

The existing mask, named "SCTX_DEQ_MASK," is misleading because "SCTX"
implies exclusivity to Stream Contexts, whereas the TR Dequeue Pointer is
applicable to both Stream and non-Stream Contexts.

Rename the mask to "TR_DEQ_PTR_MASK", utilize GENMASK_ULL() macro and use
the mask when handling the TR Dequeue Pointer field.

Function xhci_get_hw_deq() returns the Endpoint Context Field 0x08, either
directly from the Endpoint context or a Stream.

Signed-off-by: Niklas Neronin <niklas.neronin@linux.intel.com>
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20250917210726.97100-5-mathias.nyman@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h

index 543cbec560c55ec6c30f6db0509ac49560598cf9..8e209aa33ea75796629dae81a8aa6267ba54f76f 100644 (file)
@@ -711,7 +711,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
                return -ENODEV;
        }
 
-       hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
+       hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id) & TR_DEQ_PTR_MASK;
        new_seg = ep_ring->deq_seg;
        new_deq = ep_ring->dequeue;
        new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE;
@@ -723,7 +723,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
         */
        do {
                if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq)
-                   == (dma_addr_t)(hw_dequeue & ~0xf)) {
+                   == (dma_addr_t)hw_dequeue) {
                        hw_dequeue_found = true;
                        if (td_last_trb_found)
                                break;
@@ -1066,7 +1066,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
                 */
                hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
                                         td->urb->stream_id);
-               hw_deq &= ~0xf;
+               hw_deq &= TR_DEQ_PTR_MASK;
 
                if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) {
                        switch (td->cancel_status) {
@@ -1156,7 +1156,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
 
        if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
                hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
-               hw_deq &= ~0xf;
+               hw_deq &= TR_DEQ_PTR_MASK;
                td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
                if (trb_in_td(td, hw_deq))
                        return td;
@@ -1479,7 +1479,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
                u64 deq;
                /* 4.6.10 deq ptr is written to the stream ctx for streams */
                if (ep->ep_state & EP_HAS_STREAMS) {
-                       deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK;
+                       deq = le64_to_cpu(stream_ctx->stream_ring) & TR_DEQ_PTR_MASK;
 
                        /*
                         * Cadence xHCI controllers store some endpoint state
@@ -1495,7 +1495,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
                                stream_ctx->reserved[1] = 0;
                        }
                } else {
-                       deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+                       deq = le64_to_cpu(ep_ctx->deq) & TR_DEQ_PTR_MASK;
                }
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
index e09bd9a5a996cdc81d67045ca4d23aa926018aee..58a51f09cceb8f0c11c9f94a5744a133c6ee03ad 100644 (file)
@@ -500,7 +500,8 @@ struct xhci_ep_ctx {
 
 /* deq bitmasks */
 #define EP_CTX_CYCLE_MASK              (1 << 0)
-#define SCTX_DEQ_MASK                  (~0xfL)
+/* bits 63:4 - TR Dequeue Pointer */
+#define TR_DEQ_PTR_MASK                        GENMASK_ULL(63, 4)
 
 
 /**