}
}
+/**
+ * struct xe_vma_lock_and_validate_flags - Flags for vma_lock_and_validate()
+ * @res_evict: Allow evicting resources during validation
+ * @validate: Perform BO validation
+ * @request_decompress: Request BO decompression
+ * @check_purged: Reject operation if BO is purged
+ */
+struct xe_vma_lock_and_validate_flags {
+ u32 res_evict : 1;
+ u32 validate : 1;
+ u32 request_decompress : 1;
+ u32 check_purged : 1;
+};
+
static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
- bool res_evict, bool validate, bool request_decompress)
+ struct xe_vma_lock_and_validate_flags flags)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
if (bo) {
if (!bo->vm)
err = drm_exec_lock_obj(exec, &bo->ttm.base);
- if (!err && validate)
+
+ /* Reject new mappings to DONTNEED/purged BOs; allow cleanup operations */
+ if (!err && flags.check_purged) {
+ if (xe_bo_madv_is_dontneed(bo))
+ err = -EBUSY; /* BO marked purgeable */
+ else if (xe_bo_is_purged(bo))
+ err = -EINVAL; /* BO already purged */
+ }
+
+ if (!err && flags.validate)
err = xe_bo_validate(bo, vm,
xe_vm_allow_vm_eviction(vm) &&
- res_evict, exec);
+ flags.res_evict, exec);
if (err)
return err;
- if (request_decompress)
+ if (flags.request_decompress)
err = xe_bo_decompress(bo);
}
case DRM_GPUVA_OP_MAP:
if (!op->map.invalidate_on_bind)
err = vma_lock_and_validate(exec, op->map.vma,
- res_evict,
- !xe_vm_in_fault_mode(vm) ||
- op->map.immediate,
- op->map.request_decompress);
+ (struct xe_vma_lock_and_validate_flags) {
+ .res_evict = res_evict,
+ .validate = !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate,
+ .request_decompress =
+ op->map.request_decompress,
+ .check_purged = true,
+ });
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.remap.unmap->va),
- res_evict, false, false);
+ (struct xe_vma_lock_and_validate_flags) {
+ .res_evict = res_evict,
+ .validate = false,
+ .request_decompress = false,
+ .check_purged = false,
+ });
if (!err && op->remap.prev)
err = vma_lock_and_validate(exec, op->remap.prev,
- res_evict, true, false);
+ (struct xe_vma_lock_and_validate_flags) {
+ .res_evict = res_evict,
+ .validate = true,
+ .request_decompress = false,
+ .check_purged = true,
+ });
if (!err && op->remap.next)
err = vma_lock_and_validate(exec, op->remap.next,
- res_evict, true, false);
+ (struct xe_vma_lock_and_validate_flags) {
+ .res_evict = res_evict,
+ .validate = true,
+ .request_decompress = false,
+ .check_purged = true,
+ });
break;
case DRM_GPUVA_OP_UNMAP:
err = check_ufence(gpuva_to_vma(op->base.unmap.va));
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.unmap.va),
- res_evict, false, false);
+ (struct xe_vma_lock_and_validate_flags) {
+ .res_evict = res_evict,
+ .validate = false,
+ .request_decompress = false,
+ .check_purged = false,
+ });
break;
case DRM_GPUVA_OP_PREFETCH:
{
region <= ARRAY_SIZE(region_to_mem_type));
}
+ /*
+ * Prefetch attempts to migrate BO's backing store without
+ * repopulating it first. Purged BOs have no backing store
+ * to migrate, so reject the operation.
+ */
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
- res_evict, false, false);
+ (struct xe_vma_lock_and_validate_flags) {
+ .res_evict = res_evict,
+ .validate = false,
+ .request_decompress = false,
+ .check_purged = true,
+ });
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
region_to_mem_type[region],