From 4a938d3886d6dd29a54a2e6ebc03b6c2b7509ebf Mon Sep 17 00:00:00 2001 From: Himal Prasad Ghimiray Date: Tue, 25 Nov 2025 13:26:25 +0530 Subject: [PATCH] drm/xe: Merge adjacent default-attribute VMAs during garbage collection MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit While restoring default memory attributes for VMAs during garbage collection, extend the target range by checking neighboring VMAs. If adjacent VMAs are CPU-address-mirrored and have default attributes, include them in the mergeable range to reduce fragmentation and improve VMA reuse. v2 -Rebase Cc: Matthew Brost Cc: Thomas Hellström Reviewed-by: Matthew Brost Link: https://patch.msgid.link/20251125075628.1182481-3-himal.prasad.ghimiray@intel.com Signed-off-by: Himal Prasad Ghimiray --- drivers/gpu/drm/xe/xe_svm.c | 62 +++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 55c5a0eb82e12..e895a95af126e 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -285,19 +285,21 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm, return 0; } -static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end) +static void xe_vma_set_default_attributes(struct xe_vma *vma) +{ + vma->attr.preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE; + vma->attr.preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES; + vma->attr.pat_index = vma->attr.default_pat_index; + vma->attr.atomic_access = DRM_XE_ATOMIC_UNDEFINED; +} + +static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end) { struct xe_vma *vma; - struct xe_vma_mem_attr default_attr = { - .preferred_loc = { - .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE, - .migration_policy = DRM_XE_MIGRATE_ALL_PAGES, - }, - .atomic_access = DRM_XE_ATOMIC_UNDEFINED, - }; - int err = 0; + bool has_default_attr; + int err; - vma = xe_vm_find_vma_by_addr(vm, range_start); + vma = xe_vm_find_vma_by_addr(vm, start); if (!vma) return -EINVAL; @@ -306,25 +308,33 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 return 0; } - if (xe_vma_has_default_mem_attrs(vma)) - return 0; - vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx", xe_vma_start(vma), xe_vma_end(vma)); - if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) { - default_attr.pat_index = vma->attr.default_pat_index; - default_attr.default_pat_index = vma->attr.default_pat_index; - vma->attr = default_attr; - } else { - vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx", - range_start, range_end); - err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start); - if (err) { - drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err)); - xe_vm_kill(vm, true); - return err; - } + has_default_attr = xe_vma_has_default_mem_attrs(vma); + + if (has_default_attr) { + if (xe_svm_has_mapping(vm, xe_vma_start(vma), xe_vma_end(vma))) + return 0; + + start = xe_vma_start(vma); + end = xe_vma_end(vma); + } else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) { + xe_vma_set_default_attributes(vma); + } + + xe_vm_find_cpu_addr_mirror_vma_range(vm, &start, &end); + + if (xe_vma_start(vma) == start && xe_vma_end(vma) == end && has_default_attr) + return 0; + + vm_dbg(&vm->xe->drm, "New VMA start=0x%016llx, vma_end=0x%016llx", start, end); + + err = xe_vm_alloc_cpu_addr_mirror_vma(vm, start, end - start); + if (err) { + drm_warn(&vm->xe->drm, "New VMA MAP failed: %pe\n", ERR_PTR(err)); + xe_vm_kill(vm, true); + return err; } /* -- 2.47.3