From 44ece22518594ec9ffd9ab8c4c500b522278289e Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Sat, 13 Dec 2025 05:32:27 +0800 Subject: [PATCH] drm/xe: Do not forward invalid TLB invalidation seqnos to upper layers Certain TLB invalidation operations send multiple H2G messages per seqno with only the final H2G containing the valid seqno - the others carry an invalid seqno. The G2H handler drops these invalid seqno to aovid prematurely signaling a TLB invalidation fence. With TLB_INVALIDATION_SEQNO_INVALID used to indicate in progress multi-step TLB invalidations, reset tdr to ensure that timeout won't prematurely trigger when G2H actions are still ongoing. v2: Remove lock from xe_tlb_inval_reset_timeout. (Matthew B) v3: Squash with dependent patch from Matthew Brost' series. Signed-off-by: Brian Nguyen Reviewed-by: Matthew Brost Signed-off-by: Matthew Brost Link: https://patch.msgid.link/20251212213225.3564537-13-brian3.nguyen@intel.com --- drivers/gpu/drm/xe/xe_tlb_inval.c | 20 ++++++++++++++++++++ drivers/gpu/drm/xe/xe_tlb_inval_types.h | 1 + 2 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c index 918a59e686ea7..a122fbb9fc4ac 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval.c +++ b/drivers/gpu/drm/xe/xe_tlb_inval.c @@ -199,6 +199,20 @@ void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval) mutex_unlock(&tlb_inval->seqno_lock); } +/** + * xe_tlb_inval_reset_timeout() - Reset TLB inval fence timeout + * @tlb_inval: TLB invalidation client + * + * Reset the TLB invalidation timeout timer. + */ +static void xe_tlb_inval_reset_timeout(struct xe_tlb_inval *tlb_inval) +{ + lockdep_assert_held(&tlb_inval->pending_lock); + + mod_delayed_work(system_wq, &tlb_inval->fence_tdr, + tlb_inval->ops->timeout_delay(tlb_inval)); +} + static bool xe_tlb_inval_seqno_past(struct xe_tlb_inval *tlb_inval, int seqno) { int seqno_recv = READ_ONCE(tlb_inval->seqno_recv); @@ -360,6 +374,12 @@ void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno) * process_g2h_msg(). */ spin_lock_irqsave(&tlb_inval->pending_lock, flags); + if (seqno == TLB_INVALIDATION_SEQNO_INVALID) { + xe_tlb_inval_reset_timeout(tlb_inval); + spin_unlock_irqrestore(&tlb_inval->pending_lock, flags); + return; + } + if (xe_tlb_inval_seqno_past(tlb_inval, seqno)) { spin_unlock_irqrestore(&tlb_inval->pending_lock, flags); return; diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_types.h b/drivers/gpu/drm/xe/xe_tlb_inval_types.h index 8f8b060e9005a..7a6967ce3b767 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval_types.h +++ b/drivers/gpu/drm/xe/xe_tlb_inval_types.h @@ -80,6 +80,7 @@ struct xe_tlb_inval { const struct xe_tlb_inval_ops *ops; /** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */ #define TLB_INVALIDATION_SEQNO_MAX 0x100000 +#define TLB_INVALIDATION_SEQNO_INVALID TLB_INVALIDATION_SEQNO_MAX int seqno; /** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */ struct mutex seqno_lock; -- 2.47.3