]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdgpu: validate the queue va for resuming the queue
authorPrike Liang <Prike.Liang@amd.com>
Thu, 9 Oct 2025 08:45:27 +0000 (16:45 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 13 Oct 2025 18:14:34 +0000 (14:14 -0400)
It requires validating the userq VA whether is mapped before
trying to resume the queue.

Signed-off-by: Prike Liang <Prike.Liang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c

index 4cd662ad7be9ea35d19c6963854f80a90e412b5b..c1fee1ac2c0fcf0d652f13b317bf341a5a7f118c 100644 (file)
@@ -97,6 +97,42 @@ out_err:
        return r;
 }
 
+static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
+{
+       struct amdgpu_bo_va_mapping *mapping;
+       bool r;
+
+       if (amdgpu_bo_reserve(vm->root.bo, false))
+               return false;
+
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+       if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
+               r = true;
+       else
+               r = false;
+       amdgpu_bo_unreserve(vm->root.bo);
+
+       return r;
+}
+
+static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+       int r = 0;
+
+       list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+               r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
+               dev_dbg(queue->userq_mgr->adev->dev,
+                       "validate the userq mapping:%p va:%llx r:%d\n",
+                       queue, va_cursor->gpu_addr, r);
+       }
+
+       if (r != 0)
+               return true;
+
+       return false;
+}
+
 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
                                            struct amdgpu_userq_va_cursor *va_cursor)
 {
@@ -761,6 +797,14 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
 
        /* Resume all the queues for this process */
        idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+
+               if (!amdgpu_userq_buffer_vas_mapped(queue)) {
+                       drm_file_err(uq_mgr->file,
+                                    "trying restore queue without va mapping\n");
+                       queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+                       continue;
+               }
+
                r = amdgpu_userq_restore_helper(uq_mgr, queue);
                if (r)
                        ret = r;