/** @irq: IRQ number. */
int irq;
- /** @mask: Current mask being applied to xxx_INT_MASK. */
+ /** @mask: Values to write to xxx_INT_MASK if active. */
u32 mask;
+ /**
+ * @mask_lock: protects modifications to _INT_MASK and @mask.
+ *
+ * In paths where _INT_MASK is updated based on a state
+ * transition/check, it's crucial for the state update/check to be
+ * inside the locked section, otherwise it introduces a race window
+ * leading to potential _INT_MASK inconsistencies.
+ */
+ spinlock_t mask_lock;
+
/** @state: one of &enum panthor_irq_state reflecting the current state. */
atomic_t state;
};
if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \
return IRQ_NONE; \
\
+ guard(spinlock_irqsave)(&pirq->mask_lock); \
old_state = atomic_cmpxchg(&pirq->state, \
PANTHOR_IRQ_STATE_ACTIVE, \
PANTHOR_IRQ_STATE_PROCESSING); \
{ \
struct panthor_irq *pirq = data; \
struct panthor_device *ptdev = pirq->ptdev; \
- enum panthor_irq_state old_state; \
irqreturn_t ret = IRQ_NONE; \
\
while (true) { \
+ /* It's safe to access pirq->mask without the lock held here. If a new \
+ * event gets added to the mask and the corresponding IRQ is pending, \
+ * we'll process it right away instead of adding an extra raw -> threaded \
+ * round trip. If an event is removed and the status bit is set, it will \
+ * be ignored, just like it would have been if the mask had been adjusted \
+ * right before the HW event kicks in. TLDR; it's all expected races we're \
+ * covered for. \
+ */ \
u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \
\
if (!status) \
ret = IRQ_HANDLED; \
} \
\
- old_state = atomic_cmpxchg(&pirq->state, \
- PANTHOR_IRQ_STATE_PROCESSING, \
- PANTHOR_IRQ_STATE_ACTIVE); \
- if (old_state == PANTHOR_IRQ_STATE_PROCESSING) \
- gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
+ scoped_guard(spinlock_irqsave, &pirq->mask_lock) { \
+ enum panthor_irq_state old_state; \
+ \
+ old_state = atomic_cmpxchg(&pirq->state, \
+ PANTHOR_IRQ_STATE_PROCESSING, \
+ PANTHOR_IRQ_STATE_ACTIVE); \
+ if (old_state == PANTHOR_IRQ_STATE_PROCESSING) \
+ gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
+ } \
\
return ret; \
} \
\
static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \
{ \
- pirq->mask = 0; \
- gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \
- atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDING); \
+ scoped_guard(spinlock_irqsave, &pirq->mask_lock) { \
+ atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDING); \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \
+ } \
synchronize_irq(pirq->irq); \
atomic_set(&pirq->state, PANTHOR_IRQ_STATE_SUSPENDED); \
} \
\
-static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \
+static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq) \
{ \
- pirq->mask = mask; \
+ guard(spinlock_irqsave)(&pirq->mask_lock); \
+ \
atomic_set(&pirq->state, PANTHOR_IRQ_STATE_ACTIVE); \
- gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \
- gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, pirq->mask); \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
} \
\
static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
{ \
pirq->ptdev = ptdev; \
pirq->irq = irq; \
- panthor_ ## __name ## _irq_resume(pirq, mask); \
+ pirq->mask = mask; \
+ spin_lock_init(&pirq->mask_lock); \
+ panthor_ ## __name ## _irq_resume(pirq); \
\
return devm_request_threaded_irq(ptdev->base.dev, irq, \
panthor_ ## __name ## _irq_raw_handler, \
panthor_ ## __name ## _irq_threaded_handler, \
IRQF_SHARED, KBUILD_MODNAME "-" # __name, \
pirq); \
+} \
+ \
+static inline void panthor_ ## __name ## _irq_enable_events(struct panthor_irq *pirq, u32 mask) \
+{ \
+ guard(spinlock_irqsave)(&pirq->mask_lock); \
+ pirq->mask |= mask; \
+ \
+ /* The only situation where we need to write the new mask is if the IRQ is active. \
+ * If it's being processed, the mask will be restored for us in _irq_threaded_handler() \
+ * on the PROCESSING -> ACTIVE transition. \
+ * If the IRQ is suspended/suspending, the mask is restored at resume time. \
+ */ \
+ if (atomic_read(&pirq->state) == PANTHOR_IRQ_STATE_ACTIVE) \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
+} \
+ \
+static inline void panthor_ ## __name ## _irq_disable_events(struct panthor_irq *pirq, u32 mask)\
+{ \
+ guard(spinlock_irqsave)(&pirq->mask_lock); \
+ pirq->mask &= ~mask; \
+ \
+ /* The only situation where we need to write the new mask is if the IRQ is active. \
+ * If it's being processed, the mask will be restored for us in _irq_threaded_handler() \
+ * on the PROCESSING -> ACTIVE transition. \
+ * If the IRQ is suspended/suspending, the mask is restored at resume time. \
+ */ \
+ if (atomic_read(&pirq->state) == PANTHOR_IRQ_STATE_ACTIVE) \
+ gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
}
extern struct workqueue_struct *panthor_cleanup_wq;
return region_width | *region_start;
}
+static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
+{
+ return BIT(as);
+}
+
+/* Forward declaration to call helpers within as_enable/disable */
+static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status);
+PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
+
static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
u64 transtab, u64 transcfg, u64 memattr)
{
+ panthor_mmu_irq_enable_events(&ptdev->mmu->irq,
+ panthor_mmu_as_fault_mask(ptdev, as_nr));
+
gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+ panthor_mmu_irq_disable_events(&ptdev->mmu->irq,
+ panthor_mmu_as_fault_mask(ptdev, as_nr));
+
/* Flush+invalidate RW caches, invalidate RO ones. */
ret = panthor_gpu_flush_caches(ptdev, CACHE_CLEAN | CACHE_INV,
CACHE_CLEAN | CACHE_INV, CACHE_INV);
return value & GENMASK(15, 0);
}
-static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
-{
- return BIT(as);
-}
-
/**
* panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
* @vm: VM to check.
struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
int ret = 0, as, cookie;
u64 transtab, transcfg;
+ u32 fault_mask;
if (!drm_dev_enter(&ptdev->base, &cookie))
return -ENODEV;
/* If the VM is re-activated, we clear the fault. */
vm->unhandled_fault = false;
- /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
- * before enabling the AS.
+ /* Unhandled pagefault on this AS, clear the fault and enable the AS,
+ * which re-enables interrupts.
*/
- if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
- gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
- ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
- ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
- gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
+ fault_mask = panthor_mmu_as_fault_mask(ptdev, as);
+ if (ptdev->mmu->as.faulty_mask & fault_mask) {
+ gpu_write(ptdev, MMU_INT_CLEAR, fault_mask);
+ ptdev->mmu->as.faulty_mask &= ~fault_mask;
}
/* The VM update is guarded by ::op_lock, which we take at the beginning
while (status) {
u32 as = ffs(status | (status >> 16)) - 1;
u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
- u32 new_int_mask;
u64 addr;
u32 fault_status;
u32 exception_type;
mutex_lock(&ptdev->mmu->as.slots_lock);
ptdev->mmu->as.faulty_mask |= mask;
- new_int_mask =
- panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
/* terminal fault, print info about the fault */
drm_err(&ptdev->base,
*/
gpu_write(ptdev, MMU_INT_CLEAR, mask);
- /* Ignore MMU interrupts on this AS until it's been
- * re-enabled.
- */
- ptdev->mmu->irq.mask = new_int_mask;
-
if (ptdev->mmu->as.slots[as].vm)
ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
if (has_unhandled_faults)
panthor_sched_report_mmu_fault(ptdev);
}
-PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
/**
* panthor_mmu_suspend() - Suspend the MMU logic
ptdev->mmu->as.faulty_mask = 0;
mutex_unlock(&ptdev->mmu->as.slots_lock);
- panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
+ panthor_mmu_irq_resume(&ptdev->mmu->irq);
}
/**
mutex_unlock(&ptdev->mmu->as.slots_lock);
- panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
+ panthor_mmu_irq_resume(&ptdev->mmu->irq);
/* Restart the VM_BIND queues. */
mutex_lock(&ptdev->mmu->vm.lock);