]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe/userptr: Unmap userptrs in the mmu notifier
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Tue, 4 Mar 2025 17:33:42 +0000 (18:33 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Wed, 5 Mar 2025 19:25:27 +0000 (14:25 -0500)
If userptr pages are freed after a call to the xe mmu notifier,
the device will not be blocked out from theoretically accessing
these pages unless they are also unmapped from the iommu, and
this violates some aspects of the iommu-imposed security.

Ensure that userptrs are unmapped in the mmu notifier to
mitigate this. A naive attempt would try to free the sg table, but
the sg table itself may be accessed by a concurrent bind
operation, so settle for only unmapping.

v3:
- Update lockdep asserts.
- Fix a typo (Matthew Auld)

Fixes: 81e058a3e7fd ("drm/xe: Introduce helper to populate userptr")
Cc: Oak Zeng <oak.zeng@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: <stable@vger.kernel.org> # v6.10+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Acked-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-4-thomas.hellstrom@linux.intel.com
(cherry picked from commit ba767b9d01a2c552d76cf6f46b125d50ec4147a6)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_hmm.c
drivers/gpu/drm/xe/xe_hmm.h
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index be284b852307ee459f211e94a8dd2b9738a8b661..392102515f3d8f7dae6c2af4d7876a7211da7823 100644 (file)
@@ -150,6 +150,45 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
                               DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
 }
 
+static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
+{
+       struct xe_userptr *userptr = &uvma->userptr;
+       struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+       lockdep_assert_held_write(&vm->lock);
+       lockdep_assert_held(&vm->userptr.notifier_lock);
+
+       mutex_lock(&userptr->unmap_mutex);
+       xe_assert(vm->xe, !userptr->mapped);
+       userptr->mapped = true;
+       mutex_unlock(&userptr->unmap_mutex);
+}
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
+{
+       struct xe_userptr *userptr = &uvma->userptr;
+       struct xe_vma *vma = &uvma->vma;
+       bool write = !xe_vma_read_only(vma);
+       struct xe_vm *vm = xe_vma_vm(vma);
+       struct xe_device *xe = vm->xe;
+
+       if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
+           !lockdep_is_held_type(&vm->lock, 0) &&
+           !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+               /* Don't unmap in exec critical section. */
+               xe_vm_assert_held(vm);
+               /* Don't unmap while mapping the sg. */
+               lockdep_assert_held(&vm->lock);
+       }
+
+       mutex_lock(&userptr->unmap_mutex);
+       if (userptr->sg && userptr->mapped)
+               dma_unmap_sgtable(xe->drm.dev, userptr->sg,
+                                 write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+       userptr->mapped = false;
+       mutex_unlock(&userptr->unmap_mutex);
+}
+
 /**
  * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
  * @uvma: the userptr vma which hold the scatter gather table
@@ -161,16 +200,9 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
 void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
 {
        struct xe_userptr *userptr = &uvma->userptr;
-       struct xe_vma *vma = &uvma->vma;
-       bool write = !xe_vma_read_only(vma);
-       struct xe_vm *vm = xe_vma_vm(vma);
-       struct xe_device *xe = vm->xe;
-       struct device *dev = xe->drm.dev;
-
-       xe_assert(xe, userptr->sg);
-       dma_unmap_sgtable(dev, userptr->sg,
-                         write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
 
+       xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
+       xe_hmm_userptr_unmap(uvma);
        sg_free_table(userptr->sg);
        userptr->sg = NULL;
 }
@@ -297,6 +329,7 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
 
        xe_mark_range_accessed(&hmm_range, write);
        userptr->sg = &userptr->sgt;
+       xe_hmm_userptr_set_mapped(uvma);
        userptr->notifier_seq = hmm_range.notifier_seq;
        up_read(&vm->userptr.notifier_lock);
        kvfree(pfns);
index 9602cb7d976ddfc8a7f2a941bcc5523472e4fc8f..0ea98d8e7bbc76ea236b0d6f3f9742e713f0675c 100644 (file)
@@ -13,4 +13,6 @@ struct xe_userptr_vma;
 int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
 
 void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
 #endif
index d54aaa5eaff38b46d088f5c86c366b9a7f805a76..ec6ec18ab3faa4844f94a55da45578e4ddbafae8 100644 (file)
@@ -620,6 +620,8 @@ static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uv
                err = xe_vm_invalidate_vma(vma);
                XE_WARN_ON(err);
        }
+
+       xe_hmm_userptr_unmap(uvma);
 }
 
 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
@@ -1039,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                        INIT_LIST_HEAD(&userptr->invalidate_link);
                        INIT_LIST_HEAD(&userptr->repin_link);
                        vma->gpuva.gem.offset = bo_offset_or_userptr;
+                       mutex_init(&userptr->unmap_mutex);
 
                        err = mmu_interval_notifier_insert(&userptr->notifier,
                                                           current->mm,
@@ -1080,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
                 * them anymore
                 */
                mmu_interval_notifier_remove(&userptr->notifier);
+               mutex_destroy(&userptr->unmap_mutex);
                xe_vm_put(vm);
        } else if (xe_vma_is_null(vma)) {
                xe_vm_put(vm);
index d2511819cdf43041af57f9adbbb36bfe8122b555..a4b4091cfd0dab902bc6ad3d26064b50ed9aed67 100644 (file)
@@ -59,12 +59,16 @@ struct xe_userptr {
        struct sg_table *sg;
        /** @notifier_seq: notifier sequence number */
        unsigned long notifier_seq;
+       /** @unmap_mutex: Mutex protecting dma-unmapping */
+       struct mutex unmap_mutex;
        /**
         * @initial_bind: user pointer has been bound at least once.
         * write: vm->userptr.notifier_lock in read mode and vm->resv held.
         * read: vm->userptr.notifier_lock in write mode or vm->resv held.
         */
        bool initial_bind;
+       /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
+       bool mapped;
 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
        u32 divisor;
 #endif