]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Merge adjacent default-attribute VMAs during garbage collection
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 25 Nov 2025 07:56:25 +0000 (13:26 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Wed, 26 Nov 2025 10:12:33 +0000 (15:42 +0530)
While restoring default memory attributes for VMAs during garbage
collection, extend the target range by checking neighboring VMAs. If
adjacent VMAs are CPU-address-mirrored and have default attributes,
include them in the mergeable range to reduce fragmentation and improve
VMA reuse.

v2
-Rebase

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251125075628.1182481-3-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_svm.c

index 55c5a0eb82e122abc50109f5ba434be88028ddbe..e895a95af126ec7aefd08f6902748ee7b6717d8a 100644 (file)
@@ -285,19 +285,21 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
        return 0;
 }
 
-static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
+static void xe_vma_set_default_attributes(struct xe_vma *vma)
+{
+       vma->attr.preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE;
+       vma->attr.preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES;
+       vma->attr.pat_index = vma->attr.default_pat_index;
+       vma->attr.atomic_access = DRM_XE_ATOMIC_UNDEFINED;
+}
+
+static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
 {
        struct xe_vma *vma;
-       struct xe_vma_mem_attr default_attr = {
-               .preferred_loc = {
-                       .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
-                       .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
-               },
-               .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
-       };
-       int err = 0;
+       bool has_default_attr;
+       int err;
 
-       vma = xe_vm_find_vma_by_addr(vm, range_start);
+       vma = xe_vm_find_vma_by_addr(vm, start);
        if (!vma)
                return -EINVAL;
 
@@ -306,25 +308,33 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
                return 0;
        }
 
-       if (xe_vma_has_default_mem_attrs(vma))
-               return 0;
-
        vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
               xe_vma_start(vma), xe_vma_end(vma));
 
-       if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
-               default_attr.pat_index = vma->attr.default_pat_index;
-               default_attr.default_pat_index  = vma->attr.default_pat_index;
-               vma->attr = default_attr;
-       } else {
-               vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
-                      range_start, range_end);
-               err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
-               if (err) {
-                       drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
-                       xe_vm_kill(vm, true);
-                       return err;
-               }
+       has_default_attr = xe_vma_has_default_mem_attrs(vma);
+
+       if (has_default_attr) {
+               if (xe_svm_has_mapping(vm, xe_vma_start(vma), xe_vma_end(vma)))
+                       return 0;
+
+               start = xe_vma_start(vma);
+               end = xe_vma_end(vma);
+       } else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) {
+               xe_vma_set_default_attributes(vma);
+       }
+
+       xe_vm_find_cpu_addr_mirror_vma_range(vm, &start, &end);
+
+       if (xe_vma_start(vma) == start && xe_vma_end(vma) == end && has_default_attr)
+               return 0;
+
+       vm_dbg(&vm->xe->drm, "New VMA start=0x%016llx, vma_end=0x%016llx",  start, end);
+
+       err = xe_vm_alloc_cpu_addr_mirror_vma(vm, start, end - start);
+       if (err) {
+               drm_warn(&vm->xe->drm, "New VMA MAP failed: %pe\n", ERR_PTR(err));
+               xe_vm_kill(vm, true);
+               return err;
        }
 
        /*