]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe: Add xe_vm_has_valid_gpu_mapping helper
authorMatthew Brost <matthew.brost@intel.com>
Mon, 16 Jun 2025 06:30:23 +0000 (23:30 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Tue, 17 Jun 2025 22:38:11 +0000 (15:38 -0700)
Rather than having multiple READ_ONCE of the tile_* fields and comments
in code, use helper with kernel doc for single access point and clear
rules.

v3:
 - s/xe_vm_has_valid_gpu_pages/xe_vm_has_valid_gpu_mapping

Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Link: https://lore.kernel.org/r/20250616063024.2059829-2-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h

index e2d975b2fddbd6c4657474e5b10597c2631f7bbc..3522865c67c98246ec257a40f7a0bd260e4b5d6b 100644 (file)
@@ -69,15 +69,10 @@ static bool access_is_atomic(enum access_type access_type)
 
 static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
 {
-       /*
-        * Advisory only check whether the VMA currently has a valid mapping,
-        * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
-        */
-       return BIT(tile->id) & READ_ONCE(vma->tile_present) &&
-               !(BIT(tile->id) & READ_ONCE(vma->tile_invalidated));
+       return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+                                          vma->tile_invalidated);
 }
 
-
 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
                       bool atomic, unsigned int id)
 {
index 971e55fd00616ab364bef5782adea5fb7d3148d8..ef39b01f35193cff318224531ad1d66af511dbca 100644 (file)
@@ -2196,7 +2196,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
                                           DMA_RESV_USAGE_KERNEL :
                                           DMA_RESV_USAGE_BOOKKEEP);
        }
-       /* All WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
+       /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
        WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
        if (invalidate_on_bind)
                WRITE_ONCE(vma->tile_invalidated,
@@ -2255,7 +2255,7 @@ static void range_present_and_invalidated_tile(struct xe_vm *vm,
                                               struct xe_svm_range *range,
                                               u8 tile_id)
 {
-       /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+       /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
 
        lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
 
@@ -2324,7 +2324,7 @@ static void op_commit(struct xe_vm *vm,
        }
        case DRM_GPUVA_OP_DRIVER:
        {
-               /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+               /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
                if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
                        range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
                else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
index 2fbbd6a604ea0b70d9cf3c560987dc0b52d3d2af..26418e9bdff0e8ab467d1b183729e456af46169d 100644 (file)
@@ -141,7 +141,10 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
        for_each_tile(tile, xe, id)
                if (xe_pt_zap_ptes_range(tile, vm, range)) {
                        tile_mask |= BIT(id);
-                       /* Pairs with READ_ONCE in xe_svm_range_is_valid */
+                       /*
+                        * WRITE_ONCE pairs with READ_ONCE in
+                        * xe_vm_has_valid_gpu_mapping()
+                        */
                        WRITE_ONCE(range->tile_invalidated,
                                   range->tile_invalidated | BIT(id));
                }
@@ -605,14 +608,9 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
                                  struct xe_tile *tile,
                                  bool devmem_only)
 {
-       /*
-        * Advisory only check whether the range currently has a valid mapping,
-        * READ_ONCE pairs with WRITE_ONCE in xe_pt.c,
-        * xe_svm_range_notifier_event_begin
-        */
-       return ((READ_ONCE(range->tile_present) &
-                ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
-               (!devmem_only || xe_svm_range_in_vram(range));
+       return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+                                           range->tile_invalidated) &&
+               (!devmem_only || xe_svm_range_in_vram(range)));
 }
 
 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
index 6ef8c4dab647f9284e9eeb55004013f07a24707b..04d1a43b81e339d10c1a740a6ac91ca8792acc71 100644 (file)
@@ -3961,7 +3961,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
        ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
                                                    xe_vma_end(vma), tile_mask);
 
-       /* WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
+       /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
        WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
 
        return ret;
index acd3fd6c605bd101c855b3d0eaafadacf0559050..3475a118f66654b4a46e2c12325ba607f93dc2d4 100644 (file)
@@ -375,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
        return false;
 }
 
+/**
+ * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
+ * a valid GPU mapping
+ * @tile: The tile which the GPU mapping belongs to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)      \
+       ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+
 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
 #else