]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/amdgpu/userq: fix access to stale wptr mapping
authorSunil Khatri <sunil.khatri@amd.com>
Mon, 4 May 2026 12:51:17 +0000 (18:21 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 5 May 2026 14:22:13 +0000 (10:22 -0400)
Use drm_exec to take both locks i.e vm root bo and
wptr_obj bo to access the mapping data properly.

This fixes the security issue of unmap the wptr_obj while
a queue creation is in progress and passing other
bo at same address.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 1fc6c8ab45dbee096469c08c13f6099d57a52d6c)
Cc: stable@vger.kernel.org
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c

index 2fc39a6938f6db668c61a5c70036e75363374198..5b4121ddc78c6f3aa0915fc253a48bab373e3c17 100644 (file)
 #define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
 #define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
 
-static int
-mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
-{
-       int ret;
-
-       ret = amdgpu_bo_reserve(bo, true);
-       if (ret) {
-               DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
-               goto err_reserve_bo_failed;
-       }
-
-       ret = amdgpu_ttm_alloc_gart(&bo->tbo);
-       if (ret) {
-               DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
-               goto err_map_bo_gart_failed;
-       }
-
-       amdgpu_bo_unreserve(bo);
-       bo = amdgpu_bo_ref(bo);
-
-       return 0;
-
-err_map_bo_gart_failed:
-       amdgpu_bo_unreserve(bo);
-err_reserve_bo_failed:
-       return ret;
-}
-
 static int
 mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
                              struct amdgpu_userq_mgr *uq_mgr,
@@ -65,55 +37,62 @@ mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
                              uint64_t wptr)
 {
        struct amdgpu_bo_va_mapping *wptr_mapping;
-       struct amdgpu_vm *wptr_vm;
        struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+       struct amdgpu_bo *obj;
+       struct amdgpu_vm *vm = queue->vm;
+       struct drm_exec exec;
        int ret;
 
-       wptr_vm = queue->vm;
-       ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
-       if (ret)
-               return ret;
-
        wptr &= AMDGPU_GMC_HOLE_MASK;
-       wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
-       amdgpu_bo_unreserve(wptr_vm->root.bo);
-       if (!wptr_mapping) {
-               DRM_ERROR("Failed to lookup wptr bo\n");
-               return -EINVAL;
-       }
 
-       wptr_obj->obj = wptr_mapping->bo_va->base.bo;
-       if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
-               DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
-               return -EINVAL;
-       }
+       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 2);
+       drm_exec_until_all_locked(&exec) {
+               ret = amdgpu_vm_lock_pd(vm, &exec, 1);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(ret))
+                       goto fail_lock;
+
+               wptr_mapping = amdgpu_vm_bo_lookup_mapping(vm, wptr >> PAGE_SHIFT);
+               if (!wptr_mapping) {
+                       ret = -EINVAL;
+                       goto fail_lock;
+               }
 
-       ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
-       if (ret) {
-               DRM_ERROR("Failed to map wptr bo to GART\n");
-               return ret;
+               obj = wptr_mapping->bo_va->base.bo;
+               ret = drm_exec_lock_obj(&exec, &obj->tbo.base);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(ret))
+                       goto fail_lock;
        }
 
-       ret = amdgpu_bo_reserve(wptr_obj->obj, true);
-       if (ret) {
-               DRM_ERROR("Failed to reserve wptr bo\n");
-               return ret;
+       wptr_obj->obj = amdgpu_bo_ref(wptr_mapping->bo_va->base.bo);
+       if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+               ret = -EINVAL;
+               goto fail_map;
        }
 
        /* TODO use eviction fence instead of pinning. */
        ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT);
        if (ret) {
-               drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin wptr bo\n");
-               goto unresv_bo;
+               DRM_ERROR("Failed to pin wptr bo. ret %d\n", ret);
+               goto fail_map;
+       }
+
+       ret = amdgpu_ttm_alloc_gart(&wptr_obj->obj->tbo);
+       if (ret) {
+               DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+               goto fail_map;
        }
 
        queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj);
-       amdgpu_bo_unreserve(wptr_obj->obj);
 
+       drm_exec_fini(&exec);
        return 0;
 
-unresv_bo:
-       amdgpu_bo_unreserve(wptr_obj->obj);
+fail_map:
+       amdgpu_bo_unref(&wptr_obj->obj);
+fail_lock:
+       drm_exec_fini(&exec);
        return ret;
 
 }