mutex_unlock(&tlb_inval->seqno_lock);
}
+/**
+ * xe_tlb_inval_reset_timeout() - Reset TLB inval fence timeout
+ * @tlb_inval: TLB invalidation client
+ *
+ * Reset the TLB invalidation timeout timer.
+ */
+static void xe_tlb_inval_reset_timeout(struct xe_tlb_inval *tlb_inval)
+{
+ lockdep_assert_held(&tlb_inval->pending_lock);
+
+ mod_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+}
+
static bool xe_tlb_inval_seqno_past(struct xe_tlb_inval *tlb_inval, int seqno)
{
int seqno_recv = READ_ONCE(tlb_inval->seqno_recv);
* process_g2h_msg().
*/
spin_lock_irqsave(&tlb_inval->pending_lock, flags);
+ if (seqno == TLB_INVALIDATION_SEQNO_INVALID) {
+ xe_tlb_inval_reset_timeout(tlb_inval);
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+ return;
+ }
+
if (xe_tlb_inval_seqno_past(tlb_inval, seqno)) {
spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
return;
const struct xe_tlb_inval_ops *ops;
/** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */
#define TLB_INVALIDATION_SEQNO_MAX 0x100000
+#define TLB_INVALIDATION_SEQNO_INVALID TLB_INVALIDATION_SEQNO_MAX
int seqno;
/** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */
struct mutex seqno_lock;