]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Do not forward invalid TLB invalidation seqnos to upper layers
authorMatthew Brost <matthew.brost@intel.com>
Fri, 12 Dec 2025 21:32:27 +0000 (05:32 +0800)
committerMatthew Brost <matthew.brost@intel.com>
Sat, 13 Dec 2025 00:59:01 +0000 (16:59 -0800)
Certain TLB invalidation operations send multiple H2G messages per seqno
with only the final H2G containing the valid seqno - the others carry an
invalid seqno. The G2H handler drops these invalid seqno to aovid
prematurely signaling a TLB invalidation fence.

With TLB_INVALIDATION_SEQNO_INVALID used to indicate in progress
multi-step TLB invalidations, reset tdr to ensure that timeout
won't prematurely trigger when G2H actions are still ongoing.

v2: Remove lock from xe_tlb_inval_reset_timeout. (Matthew B)

v3: Squash with dependent patch from Matthew Brost' series.

Signed-off-by: Brian Nguyen <brian3.nguyen@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251212213225.3564537-13-brian3.nguyen@intel.com
drivers/gpu/drm/xe/xe_tlb_inval.c
drivers/gpu/drm/xe/xe_tlb_inval_types.h

index 918a59e686ea72b711b4e0741165604e93d9af3d..a122fbb9fc4acc5fa56c94727caf0f4c75569a18 100644 (file)
@@ -199,6 +199,20 @@ void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval)
        mutex_unlock(&tlb_inval->seqno_lock);
 }
 
+/**
+ * xe_tlb_inval_reset_timeout() - Reset TLB inval fence timeout
+ * @tlb_inval: TLB invalidation client
+ *
+ * Reset the TLB invalidation timeout timer.
+ */
+static void xe_tlb_inval_reset_timeout(struct xe_tlb_inval *tlb_inval)
+{
+       lockdep_assert_held(&tlb_inval->pending_lock);
+
+       mod_delayed_work(system_wq, &tlb_inval->fence_tdr,
+                        tlb_inval->ops->timeout_delay(tlb_inval));
+}
+
 static bool xe_tlb_inval_seqno_past(struct xe_tlb_inval *tlb_inval, int seqno)
 {
        int seqno_recv = READ_ONCE(tlb_inval->seqno_recv);
@@ -360,6 +374,12 @@ void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno)
         * process_g2h_msg().
         */
        spin_lock_irqsave(&tlb_inval->pending_lock, flags);
+       if (seqno == TLB_INVALIDATION_SEQNO_INVALID) {
+               xe_tlb_inval_reset_timeout(tlb_inval);
+               spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+               return;
+       }
+
        if (xe_tlb_inval_seqno_past(tlb_inval, seqno)) {
                spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
                return;
index 8f8b060e9005a608b4de851c993ee2be495e0c05..7a6967ce3b767a9a27027c8f3f5d42b990a7419e 100644 (file)
@@ -80,6 +80,7 @@ struct xe_tlb_inval {
        const struct xe_tlb_inval_ops *ops;
        /** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */
 #define TLB_INVALIDATION_SEQNO_MAX     0x100000
+#define TLB_INVALIDATION_SEQNO_INVALID TLB_INVALIDATION_SEQNO_MAX
        int seqno;
        /** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */
        struct mutex seqno_lock;