From: Matthew Brost Date: Mon, 16 Jun 2025 06:30:23 +0000 (-0700) Subject: drm/xe: Add xe_vm_has_valid_gpu_mapping helper X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=fab76ce56538fdeb9baaf1bfc06f34532e04f9d7;p=thirdparty%2Flinux.git drm/xe: Add xe_vm_has_valid_gpu_mapping helper Rather than having multiple READ_ONCE of the tile_* fields and comments in code, use helper with kernel doc for single access point and clear rules. v3: - s/xe_vm_has_valid_gpu_pages/xe_vm_has_valid_gpu_mapping Suggested-by: Thomas Hellström Signed-off-by: Matthew Brost Reviewed-by: Himal Prasad Ghimiray Link: https://lore.kernel.org/r/20250616063024.2059829-2-matthew.brost@intel.com --- diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index e2d975b2fddbd..3522865c67c98 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -69,15 +69,10 @@ static bool access_is_atomic(enum access_type access_type) static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma) { - /* - * Advisory only check whether the VMA currently has a valid mapping, - * READ_ONCE pairs with WRITE_ONCE in xe_pt.c - */ - return BIT(tile->id) & READ_ONCE(vma->tile_present) && - !(BIT(tile->id) & READ_ONCE(vma->tile_invalidated)); + return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present, + vma->tile_invalidated); } - static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma, bool atomic, unsigned int id) { diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 971e55fd00616..ef39b01f35193 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2196,7 +2196,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_BOOKKEEP); } - /* All WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */ + /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id)); if (invalidate_on_bind) WRITE_ONCE(vma->tile_invalidated, @@ -2255,7 +2255,7 @@ static void range_present_and_invalidated_tile(struct xe_vm *vm, struct xe_svm_range *range, u8 tile_id) { - /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */ + /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock); @@ -2324,7 +2324,7 @@ static void op_commit(struct xe_vm *vm, } case DRM_GPUVA_OP_DRIVER: { - /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */ + /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) range_present_and_invalidated_tile(vm, op->map_range.range, tile->id); else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 2fbbd6a604ea0..26418e9bdff0e 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -141,7 +141,10 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, for_each_tile(tile, xe, id) if (xe_pt_zap_ptes_range(tile, vm, range)) { tile_mask |= BIT(id); - /* Pairs with READ_ONCE in xe_svm_range_is_valid */ + /* + * WRITE_ONCE pairs with READ_ONCE in + * xe_vm_has_valid_gpu_mapping() + */ WRITE_ONCE(range->tile_invalidated, range->tile_invalidated | BIT(id)); } @@ -605,14 +608,9 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range, struct xe_tile *tile, bool devmem_only) { - /* - * Advisory only check whether the range currently has a valid mapping, - * READ_ONCE pairs with WRITE_ONCE in xe_pt.c, - * xe_svm_range_notifier_event_begin - */ - return ((READ_ONCE(range->tile_present) & - ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) && - (!devmem_only || xe_svm_range_in_vram(range)); + return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present, + range->tile_invalidated) && + (!devmem_only || xe_svm_range_in_vram(range))); } /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 6ef8c4dab647f..04d1a43b81e33 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3961,7 +3961,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma), xe_vma_end(vma), tile_mask); - /* WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */ + /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */ WRITE_ONCE(vma->tile_invalidated, vma->tile_mask); return ret; diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index acd3fd6c605bd..3475a118f6665 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -375,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm) return false; } +/** + * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has + * a valid GPU mapping + * @tile: The tile which the GPU mapping belongs to + * @tile_present: Tile present mask + * @tile_invalidated: Tile invalidated mask + * + * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths + * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable + * without the notifier lock in userptr or SVM cases, and not reliable without + * the BO dma-resv lock in the BO case. As such, they should only be used in + * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB + * invalidation) where it is harmless. + * + * Return: True is there are valid GPU pages, False otherwise + */ +#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \ + ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id)) + #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); #else