]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/vm: Prevent binding of purged buffer objects
authorArvind Yadav <arvind.yadav@intel.com>
Thu, 26 Mar 2026 13:08:31 +0000 (18:38 +0530)
committerMatthew Brost <matthew.brost@intel.com>
Fri, 27 Mar 2026 02:59:45 +0000 (19:59 -0700)
Add purge checking to vma_lock_and_validate() to block new mapping
operations on purged BOs while allowing cleanup operations to proceed.

Purged BOs have their backing pages freed by the kernel. New
mapping operations (MAP, PREFETCH, REMAP) must be rejected with
-EINVAL to prevent GPU access to invalid memory. Cleanup
operations (UNMAP) must be allowed so applications can release
resources after detecting purge via the retained field.

REMAP operations require mixed handling - reject new prev/next
VMAs if the BO is purged, but allow the unmap portion to proceed
for cleanup.

The check_purged flag in struct xe_vma_lock_and_validate_flags
distinguishes between these cases: true for new mappings (must reject),
false for cleanup (allow).

Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Arvind Yadav <arvind.yadav@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260326130843.3545241-6-arvind.yadav@intel.com
drivers/gpu/drm/xe/xe_vm.c

index 19e5d6b3de319e7c4e8579ae6253f738a0bd9dcc..8420fa4ddaac15b6ac2aea46f963e430b352e4aa 100644 (file)
@@ -3006,8 +3006,22 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
        }
 }
 
+/**
+ * struct xe_vma_lock_and_validate_flags - Flags for vma_lock_and_validate()
+ * @res_evict: Allow evicting resources during validation
+ * @validate: Perform BO validation
+ * @request_decompress: Request BO decompression
+ * @check_purged: Reject operation if BO is purged
+ */
+struct xe_vma_lock_and_validate_flags {
+       u32 res_evict : 1;
+       u32 validate : 1;
+       u32 request_decompress : 1;
+       u32 check_purged : 1;
+};
+
 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
-                                bool res_evict, bool validate, bool request_decompress)
+                                struct xe_vma_lock_and_validate_flags flags)
 {
        struct xe_bo *bo = xe_vma_bo(vma);
        struct xe_vm *vm = xe_vma_vm(vma);
@@ -3016,15 +3030,24 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
        if (bo) {
                if (!bo->vm)
                        err = drm_exec_lock_obj(exec, &bo->ttm.base);
-               if (!err && validate)
+
+               /* Reject new mappings to DONTNEED/purged BOs; allow cleanup operations */
+               if (!err && flags.check_purged) {
+                       if (xe_bo_madv_is_dontneed(bo))
+                               err = -EBUSY;  /* BO marked purgeable */
+                       else if (xe_bo_is_purged(bo))
+                               err = -EINVAL; /* BO already purged */
+               }
+
+               if (!err && flags.validate)
                        err = xe_bo_validate(bo, vm,
                                             xe_vm_allow_vm_eviction(vm) &&
-                                            res_evict, exec);
+                                            flags.res_evict, exec);
 
                if (err)
                        return err;
 
-               if (request_decompress)
+               if (flags.request_decompress)
                        err = xe_bo_decompress(bo);
        }
 
@@ -3118,10 +3141,14 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
        case DRM_GPUVA_OP_MAP:
                if (!op->map.invalidate_on_bind)
                        err = vma_lock_and_validate(exec, op->map.vma,
-                                                   res_evict,
-                                                   !xe_vm_in_fault_mode(vm) ||
-                                                   op->map.immediate,
-                                                   op->map.request_decompress);
+                                                   (struct xe_vma_lock_and_validate_flags) {
+                                                       .res_evict = res_evict,
+                                                       .validate = !xe_vm_in_fault_mode(vm) ||
+                                                                   op->map.immediate,
+                                                       .request_decompress =
+                                                       op->map.request_decompress,
+                                                       .check_purged = true,
+                                                   });
                break;
        case DRM_GPUVA_OP_REMAP:
                err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -3130,13 +3157,28 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
 
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.remap.unmap->va),
-                                           res_evict, false, false);
+                                           (struct xe_vma_lock_and_validate_flags) {
+                                                   .res_evict = res_evict,
+                                                   .validate = false,
+                                                   .request_decompress = false,
+                                                   .check_purged = false,
+                                           });
                if (!err && op->remap.prev)
                        err = vma_lock_and_validate(exec, op->remap.prev,
-                                                   res_evict, true, false);
+                                                   (struct xe_vma_lock_and_validate_flags) {
+                                                           .res_evict = res_evict,
+                                                           .validate = true,
+                                                           .request_decompress = false,
+                                                           .check_purged = true,
+                                                   });
                if (!err && op->remap.next)
                        err = vma_lock_and_validate(exec, op->remap.next,
-                                                   res_evict, true, false);
+                                                   (struct xe_vma_lock_and_validate_flags) {
+                                                           .res_evict = res_evict,
+                                                           .validate = true,
+                                                           .request_decompress = false,
+                                                           .check_purged = true,
+                                                   });
                break;
        case DRM_GPUVA_OP_UNMAP:
                err = check_ufence(gpuva_to_vma(op->base.unmap.va));
@@ -3145,7 +3187,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
 
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.unmap.va),
-                                           res_evict, false, false);
+                                           (struct xe_vma_lock_and_validate_flags) {
+                                                   .res_evict = res_evict,
+                                                   .validate = false,
+                                                   .request_decompress = false,
+                                                   .check_purged = false,
+                                           });
                break;
        case DRM_GPUVA_OP_PREFETCH:
        {
@@ -3158,9 +3205,19 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
                                  region <= ARRAY_SIZE(region_to_mem_type));
                }
 
+               /*
+                * Prefetch attempts to migrate BO's backing store without
+                * repopulating it first. Purged BOs have no backing store
+                * to migrate, so reject the operation.
+                */
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.prefetch.va),
-                                           res_evict, false, false);
+                                           (struct xe_vma_lock_and_validate_flags) {
+                                                   .res_evict = res_evict,
+                                                   .validate = false,
+                                                   .request_decompress = false,
+                                                   .check_purged = true,
+                                           });
                if (!err && !xe_vma_has_no_bo(vma))
                        err = xe_bo_migrate(xe_vma_bo(vma),
                                            region_to_mem_type[region],