From cc795e0410342076a4ad65ca2b5450cda779f784 Mon Sep 17 00:00:00 2001 From: Himal Prasad Ghimiray Date: Tue, 13 May 2025 09:32:23 +0530 Subject: [PATCH] drm/xe/svm: Make xe_svm_range_needs_migrate_to_vram() public xe_svm_range_needs_migrate_to_vram() determines whether range needs migration to vram or not, modify it to accept region preference parameter too, so we can use it in prefetch too. v2 - add assert instead of warn (Matthew Brost) Reviewed-by: Matthew Brost Link: https://lore.kernel.org/r/20250513040228.470682-11-himal.prasad.ghimiray@intel.com Signed-off-by: Himal Prasad Ghimiray --- drivers/gpu/drm/xe/xe_svm.c | 26 ++++++++++++++++++-------- drivers/gpu/drm/xe/xe_svm.h | 10 ++++++++++ 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 6e593733d473f..c6505350c8ffd 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -733,22 +733,32 @@ static bool supports_4K_migration(struct xe_device *xe) return true; } -static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, - struct xe_vma *vma) +/** + * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not + * @range: SVM range for which migration needs to be decided + * @vma: vma which has range + * @preferred_region_is_vram: preferred region for range is vram + * + * Return: True for range needing migration and migration is supported else false + */ +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + bool preferred_region_is_vram) { struct xe_vm *vm = range_to_vm(&range->base); u64 range_size = xe_svm_range_size(range); - if (!range->base.flags.migrate_devmem) + if (!range->base.flags.migrate_devmem || !preferred_region_is_vram) return false; - if (xe_svm_range_in_vram(range)) { - drm_dbg(&vm->xe->drm, "Range is already in VRAM\n"); + xe_assert(vm->xe, IS_DGFX(vm->xe)); + + if (preferred_region_is_vram && xe_svm_range_in_vram(range)) { + drm_info(&vm->xe->drm, "Range is already in VRAM\n"); return false; } - if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) { - drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); + if (preferred_region_is_vram && range_size <= SZ_64K && !supports_4K_migration(vm->xe)) { + drm_warn(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); return false; } @@ -817,7 +827,7 @@ retry: range_debug(range, "PAGE FAULT"); if (--migrate_try_count >= 0 && - xe_svm_range_needs_migrate_to_vram(range, vma)) { + xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) { err = xe_svm_alloc_vram(vm, tile, range, &ctx); ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (err) { diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index e304d147d309b..0ee845e35b3ee 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -80,6 +80,9 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, struct drm_gpusvm_ctx *ctx); +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + bool preferred_region_is_vram); + /** * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping * @range: SVM range @@ -268,6 +271,13 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) return 0; } +static inline +bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, + u32 region) +{ + return false; +} + #define xe_svm_assert_in_notifier(...) do {} while (0) #define xe_svm_range_has_dma_mapping(...) false -- 2.47.2