]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/svm: Enable UNMAP for VMA merging operations
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 25 Nov 2025 07:56:27 +0000 (13:26 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Wed, 26 Nov 2025 10:12:33 +0000 (15:42 +0530)
ALLOW UNMAP of VMAs associated with SVM mappings when the MAP operation
is intended to merge adjacent CPU_ADDR_MIRROR VMAs.

v2
- Remove mapping exist check in garbage collector

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251125075628.1182481-5-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index e895a95af126ec7aefd08f6902748ee7b6717d8a..46977ec1e0de5b7e9cc83ea4e50782ec21f479d3 100644 (file)
@@ -314,9 +314,6 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
        has_default_attr = xe_vma_has_default_mem_attrs(vma);
 
        if (has_default_attr) {
-               if (xe_svm_has_mapping(vm, xe_vma_start(vma), xe_vma_end(vma)))
-                       return 0;
-
                start = xe_vma_start(vma);
                end = xe_vma_end(vma);
        } else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) {
index 2681039f32214e2ee5cb6f96c991fa8d526e8516..a7a21966e390b8d4b60d325c28cef93541331a31 100644 (file)
@@ -2246,8 +2246,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
 
        switch (operation) {
        case DRM_XE_VM_BIND_OP_MAP:
-               if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+               if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) {
                        xe_vm_find_cpu_addr_mirror_vma_range(vm, &range_start, &range_end);
+                       vops->flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP;
+               }
 
                fallthrough;
        case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
@@ -2729,7 +2731,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
 
                        if (xe_vma_is_cpu_addr_mirror(vma) &&
                            xe_svm_has_mapping(vm, xe_vma_start(vma),
-                                              xe_vma_end(vma)))
+                                              xe_vma_end(vma)) &&
+                           !(vops->flags & XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP))
                                return -EBUSY;
 
                        if (!xe_vma_is_cpu_addr_mirror(vma))
@@ -4315,6 +4318,8 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
 
        if (is_madvise)
                vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
+       else
+               vops.flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP;
 
        err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
        if (err)
@@ -4391,7 +4396,6 @@ int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
 static bool is_cpu_addr_vma_with_default_attr(struct xe_vma *vma)
 {
        return vma && xe_vma_is_cpu_addr_mirror(vma) &&
-              !xe_svm_has_mapping(xe_vma_vm(vma), xe_vma_start(vma), xe_vma_end(vma)) &&
               xe_vma_has_default_mem_attrs(vma);
 }
 
index ccd6cc090309f1a30919c472a6a2b7cf860cfdd2..3bf912bfbdcc496fbeb1ab0ee2f65cd8024820c6 100644 (file)
@@ -467,6 +467,7 @@ struct xe_vma_ops {
 #define XE_VMA_OPS_FLAG_MADVISE          BIT(1)
 #define XE_VMA_OPS_ARRAY_OF_BINDS       BIT(2)
 #define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT   BIT(3)
+#define XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP  BIT(4)
        u32 flags;
 #ifdef TEST_VM_OPS_ERROR
        /** @inject_error: inject error to test error handling */