.access = xe_bo_vm_access,
};
+static int xe_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ int err = 0;
+
+ /*
+ * Reject mmap of purgeable BOs. DONTNEED BOs can be purged
+ * at any time, making CPU access undefined behavior. Purged BOs have
+ * no backing store and are permanently invalid.
+ */
+ err = xe_bo_lock(bo, true);
+ if (err)
+ return err;
+
+ if (xe_bo_madv_is_dontneed(bo))
+ err = -EBUSY;
+ else if (xe_bo_is_purged(bo))
+ err = -EINVAL;
+ xe_bo_unlock(bo);
+ if (err)
+ return err;
+
+ return drm_gem_ttm_mmap(obj, vma);
+}
+
static const struct drm_gem_object_funcs xe_gem_object_funcs = {
.free = xe_gem_object_free,
.close = xe_gem_object_close,
- .mmap = drm_gem_ttm_mmap,
+ .mmap = xe_gem_object_mmap,
.export = xe_gem_prime_export,
.vm_ops = &xe_gem_vm_ops,
};