From: Christian König Date: Thu, 29 Jan 2026 11:58:10 +0000 (+0100) Subject: drm/amdgpu: make amdgpu_user_wait_ioctl more resilent v2 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=1cc16538402a69d9d028e0cdb7b3bbe19e64019a;p=thirdparty%2Flinux.git drm/amdgpu: make amdgpu_user_wait_ioctl more resilent v2 When the memory allocated by userspace isn't sufficient for all the fences then just wait on them instead of returning an error. v2: use correct variable as pointed out by Sunil Signed-off-by: Christian König Reviewed-by: Sunil Khatri Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 781896c9fd267..f77fc210cb939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -705,7 +705,7 @@ amdgpu_userq_wait_count_fences(struct drm_file *filp, num_fences++; } - wait_info->num_fences = num_fences; + wait_info->num_fences = min(num_fences, USHRT_MAX); r = 0; error_unlock: @@ -714,6 +714,19 @@ error_unlock: return r; } +static int +amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info, + struct dma_fence **fences, unsigned int *num_fences, + struct dma_fence *fence) +{ + /* As fallback shouldn't userspace allocate enough space */ + if (*num_fences >= wait_info->num_fences) + return dma_fence_wait(fence, true); + + fences[(*num_fences)++] = dma_fence_get(fence); + return 0; +} + static int amdgpu_userq_wait_return_fence_info(struct drm_file *filp, struct drm_amdgpu_userq_wait *wait_info, @@ -757,13 +770,12 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp, goto free_fences; dma_fence_unwrap_for_each(f, &iter, fence) { - if (num_fences >= wait_info->num_fences) { - r = -EINVAL; + r = amdgpu_userq_wait_add_fence(wait_info, fences, + &num_fences, f); + if (r) { dma_fence_put(fence); goto free_fences; } - - fences[num_fences++] = dma_fence_get(f); } dma_fence_put(fence); @@ -780,14 +792,12 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp, if (r) goto free_fences; - if (num_fences >= wait_info->num_fences) { - dma_fence_put(fence); - r = -EINVAL; + r = amdgpu_userq_wait_add_fence(wait_info, fences, + &num_fences, fence); + dma_fence_put(fence); + if (r) goto free_fences; - } - /* Give the reference to the fence array */ - fences[num_fences++] = fence; } /* Lock all the GEM objects */ @@ -817,12 +827,10 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp, dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, DMA_RESV_USAGE_READ, fence) { - if (num_fences >= wait_info->num_fences) { - r = -EINVAL; + r = amdgpu_userq_wait_add_fence(wait_info, fences, + &num_fences, fence); + if (r) goto error_unlock; - } - - fences[num_fences++] = dma_fence_get(fence); } } @@ -833,12 +841,10 @@ amdgpu_userq_wait_return_fence_info(struct drm_file *filp, dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, DMA_RESV_USAGE_WRITE, fence) { - if (num_fences >= wait_info->num_fences) { - r = -EINVAL; + r = amdgpu_userq_wait_add_fence(wait_info, fences, + &num_fences, fence); + if (r) goto error_unlock; - } - - fences[num_fences++] = dma_fence_get(fence); } }