]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/amdgpu: validate userq buffer virtual address and size
authorPrike Liang <Prike.Liang@amd.com>
Mon, 23 Jun 2025 08:29:38 +0000 (16:29 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 13 Nov 2025 20:37:23 +0000 (15:37 -0500)
[ Upstream commit 9e46b8bb0539d7bc9a9e7b3072fa4f6082490392 ]

It needs to validate the userq object virtual address to
determine whether it is residented in a valid vm mapping.

Signed-off-by: Prike Liang <Prike.Liang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c

index 65c8a38890d4877b7bd25e8c611aa3b6368d5d19..695eb2b052fc058bc23b9b36d5e80ea117c08d21 100644 (file)
@@ -44,6 +44,38 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
        return userq_ip_mask;
 }
 
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+                                  u64 expected_size)
+{
+       struct amdgpu_bo_va_mapping *va_map;
+       u64 user_addr;
+       u64 size;
+       int r = 0;
+
+       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+       size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+       r = amdgpu_bo_reserve(vm->root.bo, false);
+       if (r)
+               return r;
+
+       va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       if (!va_map) {
+               r = -EINVAL;
+               goto out_err;
+       }
+       /* Only validate the userq whether resident in the VM mapping range */
+       if (user_addr >= va_map->start  &&
+           va_map->last - user_addr + 1 >= size) {
+               amdgpu_bo_unreserve(vm->root.bo);
+               return 0;
+       }
+
+out_err:
+       amdgpu_bo_unreserve(vm->root.bo);
+       return r;
+}
+
 static int
 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
                          struct amdgpu_usermode_queue *queue)
@@ -439,6 +471,14 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
                r = -ENOMEM;
                goto unlock;
        }
+
+       /* Validate the userq virtual address.*/
+       if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
+           amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+           amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+               kfree(queue);
+               goto unlock;
+       }
        queue->doorbell_handle = args->in.doorbell_handle;
        queue->queue_type = args->in.ip_type;
        queue->vm = &fpriv->vm;
index b1ca91b7cda4bd8b51152516e7df77fff9c42137..8603c31320f11a09672d3362f09ee2a5ee7b3a90 100644 (file)
@@ -133,4 +133,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
                                                   u32 idx);
 
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+                                  u64 expected_size);
 #endif
index 1457fb49a794fdddea9a3eeb1c04d75af3d84d68..ef54d211214f4cfc68608663d4f9756ac47c6326 100644 (file)
@@ -206,6 +206,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
        struct drm_amdgpu_userq_in *mqd_user = args_in;
        struct amdgpu_mqd_prop *userq_props;
+       struct amdgpu_gfx_shadow_info shadow_info;
        int r;
 
        /* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -231,6 +232,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        userq_props->doorbell_index = queue->doorbell_index;
        userq_props->fence_address = queue->fence_drv->gpu_addr;
 
+       if (adev->gfx.funcs->get_gfx_shadow_info)
+               adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
        if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
                struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
 
@@ -247,6 +250,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                        goto free_mqd;
                }
 
+               if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
+                   max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
+                       goto free_mqd;
+
                userq_props->eop_gpu_addr = compute_mqd->eop_va;
                userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
                userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -274,6 +281,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                userq_props->csa_addr = mqd_gfx_v11->csa_va;
                userq_props->tmz_queue =
                        mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+               if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
+                   shadow_info.shadow_size))
+                       goto free_mqd;
+
                kfree(mqd_gfx_v11);
        } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
                struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -291,6 +303,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                        goto free_mqd;
                }
 
+               if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
+                   shadow_info.csa_size))
+                       goto free_mqd;
+
                userq_props->csa_addr = mqd_sdma_v11->csa_va;
                kfree(mqd_sdma_v11);
        }