#include "xe_tile.h"
#include "xe_tlb_inval.h"
#include "xe_trace_bo.h"
+#include "xe_vm_madvise.h"
#include "xe_wa.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
static void xe_vma_destroy_late(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_bo *bo = xe_vma_bo(vma);
if (vma->ufence) {
xe_sync_ufence_put(vma->ufence);
} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
} else {
- xe_bo_put(xe_vma_bo(vma));
+ xe_bo_put(bo);
}
xe_vma_free(vma);
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
{
struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_bo *bo = xe_vma_bo(vma);
lockdep_assert_held_write(&vm->lock);
xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
xe_userptr_destroy(to_userptr_vma(vma));
} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
- xe_bo_assert_held(xe_vma_bo(vma));
+ xe_bo_assert_held(bo);
drm_gpuva_unlink(&vma->gpuva);
+ xe_bo_recompute_purgeable_state(bo);
}
xe_vm_assert_held(vm);
.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
.default_pat_index = op->map.pat_index,
.pat_index = op->map.pat_index,
+ .purgeable_state = XE_MADV_PURGEABLE_WILLNEED,
};
flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
#include "xe_pt.h"
#include "xe_svm.h"
#include "xe_tlb_inval.h"
+#include "xe_vm.h"
struct xe_vmas_in_madvise_range {
u64 addr;
}
}
+/**
+ * enum xe_bo_vmas_purge_state - VMA purgeable state aggregation
+ *
+ * Distinguishes whether a BO's VMAs are all DONTNEED, have at least
+ * one WILLNEED, or have no VMAs at all.
+ *
+ * Enum values align with XE_MADV_PURGEABLE_* states for consistency.
+ */
+enum xe_bo_vmas_purge_state {
+ /** @XE_BO_VMAS_STATE_WILLNEED: At least one VMA is WILLNEED */
+ XE_BO_VMAS_STATE_WILLNEED = 0,
+ /** @XE_BO_VMAS_STATE_DONTNEED: All VMAs are DONTNEED */
+ XE_BO_VMAS_STATE_DONTNEED = 1,
+ /** @XE_BO_VMAS_STATE_NO_VMAS: BO has no VMAs */
+ XE_BO_VMAS_STATE_NO_VMAS = 2,
+};
+
+/*
+ * xe_bo_recompute_purgeable_state() casts between xe_bo_vmas_purge_state and
+ * xe_madv_purgeable_state. Enforce that WILLNEED=0 and DONTNEED=1 match across
+ * both enums so the single-line cast is always valid.
+ */
+static_assert(XE_BO_VMAS_STATE_WILLNEED == (int)XE_MADV_PURGEABLE_WILLNEED,
+ "VMA purge state WILLNEED must equal madv purgeable WILLNEED");
+static_assert(XE_BO_VMAS_STATE_DONTNEED == (int)XE_MADV_PURGEABLE_DONTNEED,
+ "VMA purge state DONTNEED must equal madv purgeable DONTNEED");
+
+/**
+ * xe_bo_all_vmas_dontneed() - Determine BO VMA purgeable state
+ * @bo: Buffer object
+ *
+ * Check all VMAs across all VMs to determine aggregate purgeable state.
+ * Shared BOs require unanimous DONTNEED state from all mappings.
+ *
+ * Caller must hold BO dma-resv lock.
+ *
+ * Return: XE_BO_VMAS_STATE_DONTNEED if all VMAs are DONTNEED,
+ * XE_BO_VMAS_STATE_WILLNEED if at least one VMA is not DONTNEED,
+ * XE_BO_VMAS_STATE_NO_VMAS if BO has no VMAs
+ */
+static enum xe_bo_vmas_purge_state xe_bo_all_vmas_dontneed(struct xe_bo *bo)
+{
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gpuva *gpuva;
+ struct drm_gem_object *obj = &bo->ttm.base;
+ bool has_vmas = false;
+
+ xe_bo_assert_held(bo);
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ has_vmas = true;
+
+ /* Any non-DONTNEED VMA prevents purging */
+ if (vma->attr.purgeable_state != XE_MADV_PURGEABLE_DONTNEED)
+ return XE_BO_VMAS_STATE_WILLNEED;
+ }
+ }
+
+ /*
+ * No VMAs => preserve existing BO purgeable state.
+ * Avoids incorrectly flipping DONTNEED -> WILLNEED when last VMA unmapped.
+ */
+ if (!has_vmas)
+ return XE_BO_VMAS_STATE_NO_VMAS;
+
+ return XE_BO_VMAS_STATE_DONTNEED;
+}
+
+/**
+ * xe_bo_recompute_purgeable_state() - Recompute BO purgeable state from VMAs
+ * @bo: Buffer object
+ *
+ * Walk all VMAs to determine if BO should be purgeable or not.
+ * Shared BOs require unanimous DONTNEED state from all mappings.
+ * If the BO has no VMAs the existing state is preserved.
+ *
+ * Locking: Caller must hold BO dma-resv lock. When iterating GPUVM lists,
+ * VM lock must also be held (write) to prevent concurrent VMA modifications.
+ * This is satisfied at both call sites:
+ * - xe_vma_destroy(): holds vm->lock write
+ * - madvise_purgeable(): holds vm->lock write (from madvise ioctl path)
+ *
+ * Return: nothing
+ */
+void xe_bo_recompute_purgeable_state(struct xe_bo *bo)
+{
+ enum xe_bo_vmas_purge_state vma_state;
+
+ if (!bo)
+ return;
+
+ xe_bo_assert_held(bo);
+
+ /*
+ * Once purged, always purged. Cannot transition back to WILLNEED.
+ * This matches i915 semantics where purged BOs are permanently invalid.
+ */
+ if (bo->madv_purgeable == XE_MADV_PURGEABLE_PURGED)
+ return;
+
+ vma_state = xe_bo_all_vmas_dontneed(bo);
+
+ if (vma_state != (enum xe_bo_vmas_purge_state)bo->madv_purgeable &&
+ vma_state != XE_BO_VMAS_STATE_NO_VMAS)
+ xe_bo_set_purgeable_state(bo, (enum xe_madv_purgeable_state)vma_state);
+}
+
/**
* madvise_purgeable - Handle purgeable buffer object advice
* @xe: XE device
for (i = 0; i < num_vmas; i++) {
struct xe_bo *bo = xe_vma_bo(vmas[i]);
- if (!bo)
+ if (!bo) {
+ /* Purgeable state applies to BOs only, skip non-BO VMAs */
+ vmas[i]->skip_invalidation = true;
continue;
+ }
/* BO must be locked before modifying madv state */
xe_bo_assert_held(bo);
*/
if (xe_bo_is_purged(bo)) {
details->has_purged_bo = true;
+ vmas[i]->skip_invalidation = true;
continue;
}
switch (op->purge_state_val.val) {
case DRM_XE_VMA_PURGEABLE_STATE_WILLNEED:
- xe_bo_set_purgeable_state(bo, XE_MADV_PURGEABLE_WILLNEED);
+ vmas[i]->attr.purgeable_state = XE_MADV_PURGEABLE_WILLNEED;
+ vmas[i]->skip_invalidation = true;
+
+ xe_bo_recompute_purgeable_state(bo);
break;
case DRM_XE_VMA_PURGEABLE_STATE_DONTNEED:
- xe_bo_set_purgeable_state(bo, XE_MADV_PURGEABLE_DONTNEED);
+ vmas[i]->attr.purgeable_state = XE_MADV_PURGEABLE_DONTNEED;
+ /*
+ * Don't zap PTEs at DONTNEED time -- pages are still
+ * alive. The zap happens in xe_bo_move_notify() right
+ * before the shrinker frees them.
+ */
+ vmas[i]->skip_invalidation = true;
+
+ xe_bo_recompute_purgeable_state(bo);
break;
default:
- drm_warn(&vm->xe->drm, "Invalid madvise value = %d\n",
- op->purge_state_val.val);
+ /* Should never hit - values validated in madvise_args_are_sane() */
+ xe_assert(vm->xe, 0);
return;
}
}