vm->destroyed = true;
+ /* Tell scheduler to stop all GPU work related to this VM */
+ if (refcount_read(&vm->as.active_cnt) > 0)
+ panthor_sched_prepare_for_vm_destruction(vm->ptdev);
+
mutex_lock(&vm->heaps.lock);
panthor_heap_pool_destroy(vm->heaps.pool);
vm->heaps.pool = NULL;
sched_queue_delayed_work(ptdev->scheduler, tick, 0);
}
+void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
+{
+ /* FW can write out internal state, like the heap context, during CSG
+ * suspend. It is therefore important that the scheduler has fully
+ * evicted any pending and related groups before VM destruction can
+ * safely continue. Failure to do so can lead to GPU page faults.
+ * A controlled termination of a Panthor instance involves destroying
+ * the group(s) before the VM. This means any relevant group eviction
+ * has already been initiated by this point, and we just need to
+ * ensure that any pending tick_work() has been completed.
+ */
+ flush_work(&ptdev->scheduler->tick_work.work);
+}
+
void panthor_sched_resume(struct panthor_device *ptdev)
{
/* Force a tick to re-evaluate after a resume. */
void panthor_sched_resume(struct panthor_device *ptdev);
void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
+void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev);
void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);