]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu/userq: fix SDMA and compute validation
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 10 Oct 2025 19:21:02 +0000 (15:21 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 28 Oct 2025 13:59:48 +0000 (09:59 -0400)
The CSA and EOP buffers have different alignement requirements.
Hardcode them for now as a bug fix.  A proper query will be added in
a subsequent patch.

v2: verify gfx shadow helper callback (Prike)

Fixes: 9e46b8bb0539 ("drm/amdgpu: validate userq buffer virtual address and size")
Reviewed-by: Prike Liang <Prike.Liang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c

index 9894a3eed21524cf2fb329e4b259254e24eaf955..b1ee9473d6280611222d03ebb692d5f40c61617b 100644 (file)
@@ -252,7 +252,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
        struct drm_amdgpu_userq_in *mqd_user = args_in;
        struct amdgpu_mqd_prop *userq_props;
-       struct amdgpu_gfx_shadow_info shadow_info;
        int r;
 
        /* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -278,8 +277,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        userq_props->doorbell_index = queue->doorbell_index;
        userq_props->fence_address = queue->fence_drv->gpu_addr;
 
-       if (adev->gfx.funcs->get_gfx_shadow_info)
-               adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
        if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
                struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
 
@@ -297,7 +294,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                }
 
                r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
-                                                  max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE));
+                                                  2048);
                if (r)
                        goto free_mqd;
 
@@ -310,6 +307,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                kfree(compute_mqd);
        } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
                struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+               struct amdgpu_gfx_shadow_info shadow_info;
+
+               if (adev->gfx.funcs->get_gfx_shadow_info) {
+                       adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+               } else {
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
 
                if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
                        DRM_ERROR("Invalid GFX MQD\n");
@@ -333,6 +338,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                                                   shadow_info.shadow_size);
                if (r)
                        goto free_mqd;
+               r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+                                                  shadow_info.csa_size);
+               if (r)
+                       goto free_mqd;
 
                kfree(mqd_gfx_v11);
        } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
@@ -351,7 +360,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                        goto free_mqd;
                }
                r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
-                                                  shadow_info.csa_size);
+                                                  32);
                if (r)
                        goto free_mqd;