From: Matthew Brost Date: Fri, 31 Oct 2025 23:40:48 +0000 (-0700) Subject: drm/xe: Skip TLB invalidation waits in page fault binds X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ebb0880d497356befb0ff4d36fb0e1d072701805;p=thirdparty%2Fkernel%2Flinux.git drm/xe: Skip TLB invalidation waits in page fault binds Avoid waiting on unrelated TLB invalidations when servicing page fault binds. Since the migrate queue is shared across processes, TLB invalidations triggered by other processes may occur concurrently but are not relevant to the current bind. Teach the bind pipeline to skip waits on such invalidations to prevent unnecessary serialization. Signed-off-by: Matthew Brost Reviewed-by: Thomas Hellström Link: https://patch.msgid.link/20251031234050.3043507-5-matthew.brost@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 45cbe5f05107b..2b5d25f4dd539 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -755,6 +755,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT; for_each_tile(tile, vm->xe, id) { vops.pt_update_ops[id].wait_vm_bookkeep = true; vops.pt_update_ops[tile->id].q = @@ -845,6 +846,7 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); xe_vma_ops_init(&vops, vm, NULL, NULL, 0); + vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT; for_each_tile(tile, vm->xe, id) { vops.pt_update_ops[id].wait_vm_bookkeep = true; vops.pt_update_ops[tile->id].q = @@ -3111,8 +3113,13 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, if (number_tiles == 0) return ERR_PTR(-ENODATA); - for_each_tile(tile, vm->xe, id) - n_fence += (1 + XE_MAX_GT_PER_TILE); + if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) { + for_each_tile(tile, vm->xe, id) + ++n_fence; + } else { + for_each_tile(tile, vm->xe, id) + n_fence += (1 + XE_MAX_GT_PER_TILE); + } fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL); if (!fences) { @@ -3153,6 +3160,9 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, collect_fences: fences[current_fence++] = fence ?: dma_fence_get_stub(); + if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) + continue; + xe_migrate_job_lock(tile->migrate, q); for_each_tlb_inval(i) fences[current_fence++] = diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 9043bc4a381cb..ccd6cc090309f 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -466,6 +466,7 @@ struct xe_vma_ops { #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0) #define XE_VMA_OPS_FLAG_MADVISE BIT(1) #define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2) +#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT BIT(3) u32 flags; #ifdef TEST_VM_OPS_ERROR /** @inject_error: inject error to test error handling */