static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- /*
- * Advisory only check whether the VMA currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
- */
- return BIT(tile->id) & READ_ONCE(vma->tile_present) &&
- !(BIT(tile->id) & READ_ONCE(vma->tile_invalidated));
+ return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+ vma->tile_invalidated);
}
-
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
bool atomic, unsigned int id)
{
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
- /* All WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
if (invalidate_on_bind)
WRITE_ONCE(vma->tile_invalidated,
struct xe_svm_range *range,
u8 tile_id)
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
}
case DRM_GPUVA_OP_DRIVER:
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
tile_mask |= BIT(id);
- /* Pairs with READ_ONCE in xe_svm_range_is_valid */
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping()
+ */
WRITE_ONCE(range->tile_invalidated,
range->tile_invalidated | BIT(id));
}
struct xe_tile *tile,
bool devmem_only)
{
- /*
- * Advisory only check whether the range currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c,
- * xe_svm_range_notifier_event_begin
- */
- return ((READ_ONCE(range->tile_present) &
- ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
- (!devmem_only || xe_svm_range_in_vram(range));
+ return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) &&
+ (!devmem_only || xe_svm_range_in_vram(range)));
}
/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
xe_vma_end(vma), tile_mask);
- /* WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
return ret;
return false;
}
+/**
+ * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
+ * a valid GPU mapping
+ * @tile: The tile which the GPU mapping belongs to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
+ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else