]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Skip TLB invalidation waits in page fault binds
authorMatthew Brost <matthew.brost@intel.com>
Fri, 31 Oct 2025 23:40:48 +0000 (16:40 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Tue, 4 Nov 2025 16:21:09 +0000 (08:21 -0800)
Avoid waiting on unrelated TLB invalidations when servicing page fault
binds. Since the migrate queue is shared across processes, TLB
invalidations triggered by other processes may occur concurrently but
are not relevant to the current bind. Teach the bind pipeline to skip
waits on such invalidations to prevent unnecessary serialization.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251031234050.3043507-5-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index 45cbe5f05107b165ea8a2845f3bf94dec94a2f06..2b5d25f4dd539362edadaa15767ea936ea614758 100644 (file)
@@ -755,6 +755,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
        xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
 
        xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+       vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
        for_each_tile(tile, vm->xe, id) {
                vops.pt_update_ops[id].wait_vm_bookkeep = true;
                vops.pt_update_ops[tile->id].q =
@@ -845,6 +846,7 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
        xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
 
        xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+       vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
        for_each_tile(tile, vm->xe, id) {
                vops.pt_update_ops[id].wait_vm_bookkeep = true;
                vops.pt_update_ops[tile->id].q =
@@ -3111,8 +3113,13 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
        if (number_tiles == 0)
                return ERR_PTR(-ENODATA);
 
-       for_each_tile(tile, vm->xe, id)
-               n_fence += (1 + XE_MAX_GT_PER_TILE);
+       if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) {
+               for_each_tile(tile, vm->xe, id)
+                       ++n_fence;
+       } else {
+               for_each_tile(tile, vm->xe, id)
+                       n_fence += (1 + XE_MAX_GT_PER_TILE);
+       }
 
        fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL);
        if (!fences) {
@@ -3153,6 +3160,9 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
 
 collect_fences:
                fences[current_fence++] = fence ?: dma_fence_get_stub();
+               if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT)
+                       continue;
+
                xe_migrate_job_lock(tile->migrate, q);
                for_each_tlb_inval(i)
                        fences[current_fence++] =
index 9043bc4a381cb008d97364bf150847197770d497..ccd6cc090309f1a30919c472a6a2b7cf860cfdd2 100644 (file)
@@ -466,6 +466,7 @@ struct xe_vma_ops {
 #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
 #define XE_VMA_OPS_FLAG_MADVISE          BIT(1)
 #define XE_VMA_OPS_ARRAY_OF_BINDS       BIT(2)
+#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT   BIT(3)
        u32 flags;
 #ifdef TEST_VM_OPS_ERROR
        /** @inject_error: inject error to test error handling */