]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Add xe_tlb_inval_idle helper
authorMatthew Brost <matthew.brost@intel.com>
Fri, 16 Jan 2026 22:17:29 +0000 (14:17 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Sat, 17 Jan 2026 02:24:54 +0000 (18:24 -0800)
Introduce the xe_tlb_inval_idle helper to detect whether any TLB
invalidations are currently in flight. This is used in context-based TLB
invalidations to determine whether dummy TLB invalidations need to be
sent to maintain proper TLB invalidation fence ordering..

v2:
 - Implement xe_tlb_inval_idle based on pending list

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Stuart Summers <stuart.summers@intel.com>
Tested-by: Stuart Summers <stuart.summers@intel.com>
Link: https://patch.msgid.link/20260116221731.868657-10-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_tlb_inval.c
drivers/gpu/drm/xe/xe_tlb_inval.h

index 21fef337f29c474cc2125fb265698f86c957f22e..989fe0e7f8eeb1b469a971722b7be0219f947937 100644 (file)
@@ -41,11 +41,14 @@ static void xe_tlb_inval_fence_fini(struct xe_tlb_inval_fence *fence)
 static void
 xe_tlb_inval_fence_signal(struct xe_tlb_inval_fence *fence)
 {
+       struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
        bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
 
        lockdep_assert_held(&fence->tlb_inval->pending_lock);
 
        list_del(&fence->link);
+       if (list_empty(&tlb_inval->pending_fences))
+               cancel_delayed_work(&tlb_inval->fence_tdr);
        trace_xe_tlb_inval_fence_signal(fence->tlb_inval->xe, fence);
        xe_tlb_inval_fence_fini(fence);
        dma_fence_signal(&fence->base);
@@ -465,3 +468,21 @@ void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
                dma_fence_get(&fence->base);
        fence->tlb_inval = tlb_inval;
 }
+
+/**
+ * xe_tlb_inval_idle() - Initialize TLB invalidation is idle
+ * @tlb_inval: TLB invalidation client
+ *
+ * Check the TLB invalidation seqno to determine if it is idle (i.e., no TLB
+ * invalidations are in flight). Expected to be called in the backend after the
+ * fence has been added to the pending list, and takes this into account.
+ *
+ * Return: True if TLB invalidation client is idle, False otherwise
+ */
+bool xe_tlb_inval_idle(struct xe_tlb_inval *tlb_inval)
+{
+       lockdep_assert_held(&tlb_inval->seqno_lock);
+
+       guard(spinlock_irq)(&tlb_inval->pending_lock);
+       return list_is_singular(&tlb_inval->pending_fences);
+}
index 858d0690f995f7e16373b9e9a3825f93b4a4c729..62089254fa23995f90a043f678b84a491556c590 100644 (file)
@@ -43,4 +43,6 @@ xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
 
 void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno);
 
+bool xe_tlb_inval_idle(struct xe_tlb_inval *tlb_inval);
+
 #endif /* _XE_TLB_INVAL_ */