]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Thu, 21 Aug 2025 17:30:50 +0000 (23:00 +0530)
committerHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Tue, 26 Aug 2025 05:55:35 +0000 (11:25 +0530)
In the case of the MADVISE ioctl, if the start or end addresses fall
within a VMA and existing SVM ranges are present, remove the existing
SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
unmapping of old one.

v2 (Matthew Brost)
- Use vops flag to call unmapping of ranges in vm_bind_ioctl_ops_parse
- Rename the function

v3
- Fix doc

v4
- check if range is already in garbage collector (Matthew Brost)

Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250821173104.3030148-7-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_svm.h
drivers/gpu/drm/xe/xe_vm.c

index e35c6d4def20cc3a2c2ff999f1eb0068e2f9317c..ce42100cb753e4ef16135faabfa5b2b9cbf69303 100644 (file)
@@ -932,6 +932,41 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
        return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
 }
 
+/**
+ * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
+ * @vm: The VM
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function UNMAPS svm ranges if start or end address are inside them.
+ */
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+       struct drm_gpusvm_notifier *notifier, *next;
+
+       lockdep_assert_held_write(&vm->lock);
+
+       drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+               struct drm_gpusvm_range *range, *__next;
+
+               drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+                       if (start > drm_gpusvm_range_start(range) ||
+                           end < drm_gpusvm_range_end(range)) {
+                               if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+                                       drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+                               drm_gpusvm_range_get(range);
+                               __xe_svm_garbage_collector(vm, to_xe_range(range));
+                               if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
+                                       spin_lock(&vm->svm.garbage_collector.lock);
+                                       list_del(&to_xe_range(range)->garbage_collector_link);
+                                       spin_unlock(&vm->svm.garbage_collector.lock);
+                               }
+                               drm_gpusvm_range_put(range);
+                       }
+               }
+       }
+}
+
 /**
  * xe_svm_bo_evict() - SVM evict BO to system memory
  * @bo: BO to evict
index 4bdccb56d25f5c6c059505ec84fe0fc4853ba905..184b3f4f0b2a5da7172e6dabf6218608fd659f35 100644 (file)
@@ -90,6 +90,8 @@ bool xe_svm_range_validate(struct xe_vm *vm,
 
 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *vma);
 
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
+
 /**
  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
  * @range: SVM range
@@ -303,6 +305,11 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
        return ULONG_MAX;
 }
 
+static inline
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
 #define xe_svm_assert_in_notifier(...) do {} while (0)
 #define xe_svm_range_has_dma_mapping(...) false
 
index 9c76cb31025d03f85753263fd9a36828609d8626..9fef01400c74ca78432a9b1c6b1d268c1c4f5675 100644 (file)
@@ -2694,8 +2694,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                                end = op->base.remap.next->va.addr;
 
                        if (xe_vma_is_cpu_addr_mirror(old) &&
-                           xe_svm_has_mapping(vm, start, end))
-                               return -EBUSY;
+                           xe_svm_has_mapping(vm, start, end)) {
+                               if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
+                                       xe_svm_unmap_address_range(vm, start, end);
+                               else
+                                       return -EBUSY;
+                       }
 
                        op->remap.start = xe_vma_start(old);
                        op->remap.range = xe_vma_size(old);