From: Boris Brezillon Date: Wed, 3 Dec 2025 12:17:49 +0000 (+0100) Subject: drm/panthor: Make sure caches are flushed/invalidated when an AS is recycled X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=32e593d74c39249ae14c8f0de88eec677c621aa7;p=thirdparty%2Fkernel%2Flinux.git drm/panthor: Make sure caches are flushed/invalidated when an AS is recycled When we re-assign a slot to a different VM, we need to make sure the old VM caches are flushed before doing the switch. Specialize panthor_mmu_as_disable() so we can skip the slot programmation while still getting the cache flushing, and call this helper from panthor_vm_active() when an idle slot is recycled. v2: - Collect R-bs Fixes: 6e2d3b3e8589 ("drm/panthor: Add support for atomic page table updates") Signed-off-by: Boris Brezillon Reviewed-by: Liviu Dudau Reviewed-by: Chia-I Wu Signed-off-by: Liviu Dudau Link: https://patch.msgid.link/20251203121750.404340-3-boris.brezillon@collabora.com --- diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 8ba5259e3d288..3644af1a8e564 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -585,7 +585,8 @@ static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr, return as_send_cmd_and_wait(ptdev, as_nr, AS_COMMAND_UPDATE); } -static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr) +static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr, + bool recycle_slot) { int ret; @@ -595,6 +596,12 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr) if (ret) return ret; + /* If the slot is going to be used immediately, don't bother changing + * the config. + */ + if (recycle_slot) + return 0; + gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0); gpu_write64(ptdev, AS_MEMATTR(as_nr), 0); gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED); @@ -714,6 +721,11 @@ int panthor_vm_active(struct panthor_vm *vm) drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt)); as = lru_vm->as.id; + + ret = panthor_mmu_as_disable(ptdev, as, true); + if (ret) + goto out_unlock; + panthor_vm_release_as_locked(lru_vm); } @@ -853,7 +865,7 @@ static void panthor_vm_declare_unusable(struct panthor_vm *vm) vm->unusable = true; mutex_lock(&ptdev->mmu->as.slots_lock); if (vm->as.id >= 0 && drm_dev_enter(&ptdev->base, &cookie)) { - panthor_mmu_as_disable(ptdev, vm->as.id); + panthor_mmu_as_disable(ptdev, vm->as.id, false); drm_dev_exit(cookie); } mutex_unlock(&ptdev->mmu->as.slots_lock); @@ -1780,7 +1792,7 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) ptdev->mmu->as.slots[as].vm->unhandled_fault = true; /* Disable the MMU to kill jobs on this AS. */ - panthor_mmu_as_disable(ptdev, as); + panthor_mmu_as_disable(ptdev, as, false); mutex_unlock(&ptdev->mmu->as.slots_lock); status &= ~mask; @@ -1809,7 +1821,8 @@ void panthor_mmu_suspend(struct panthor_device *ptdev) struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; if (vm) { - drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i)); + drm_WARN_ON(&ptdev->base, + panthor_mmu_as_disable(ptdev, i, false)); panthor_vm_release_as_locked(vm); } } @@ -1930,7 +1943,7 @@ static void panthor_vm_free(struct drm_gpuvm *gpuvm) int cookie; if (drm_dev_enter(&ptdev->base, &cookie)) { - panthor_mmu_as_disable(ptdev, vm->as.id); + panthor_mmu_as_disable(ptdev, vm->as.id, false); drm_dev_exit(cookie); } @@ -2790,7 +2803,8 @@ void panthor_mmu_unplug(struct panthor_device *ptdev) struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; if (vm) { - drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i)); + drm_WARN_ON(&ptdev->base, + panthor_mmu_as_disable(ptdev, i, false)); panthor_vm_release_as_locked(vm); } }