]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: Add queue id support to the user queue wait IOCTL
authorArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Fri, 11 Apr 2025 09:38:30 +0000 (15:08 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 22 Apr 2025 12:51:44 +0000 (08:51 -0400)
Add queue id support to the user queue wait IOCTL
drm_amdgpu_userq_wait structure.

This is required to retrieve the wait user queue and maintain
the fence driver references in it so that the user queue in
the same context releases their reference to the fence drivers
at some point before queue destruction.

Otherwise, we would gather those references until we
don't have any more space left and crash.

v2: Modify the UAPI comment as per the mesa and libdrm UAPI comment.

Libdrm MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/408
Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34493

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
include/uapi/drm/amdgpu_drm.h

index 0a3032e01c3429af1ab5b07e1dc801cfd239fa57..ca198360cfdad9142ae6b1c2701449673cbcd6e5 100644 (file)
@@ -91,7 +91,6 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
        spin_lock_init(&fence_drv->fence_list_lock);
 
        fence_drv->adev = adev;
-       fence_drv->fence_drv_xa_ptr = &userq->fence_drv_xa;
        fence_drv->context = dma_fence_context_alloc(1);
        get_task_comm(fence_drv->timeline_name, current);
 
@@ -611,6 +610,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
        u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
        struct drm_amdgpu_userq_fence_info *fence_info = NULL;
        struct drm_amdgpu_userq_wait *wait_info = data;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+       struct amdgpu_usermode_queue *waitq;
        struct drm_gem_object **gobj_write;
        struct drm_gem_object **gobj_read;
        struct dma_fence **fences = NULL;
@@ -860,6 +862,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
                        fences[num_fences++] = fence;
                }
 
+               waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+               if (!waitq)
+                       goto free_fences;
+
                for (i = 0, cnt = 0; i < num_fences; i++) {
                        struct amdgpu_userq_fence_driver *fence_drv;
                        struct amdgpu_userq_fence *userq_fence;
@@ -888,14 +894,12 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
                         * Otherwise, we would gather those references until we don't
                         * have any more space left and crash.
                         */
-                       if (fence_drv->fence_drv_xa_ptr) {
-                               r = xa_alloc(fence_drv->fence_drv_xa_ptr, &index, fence_drv,
-                                            xa_limit_32b, GFP_KERNEL);
-                               if (r)
-                                       goto free_fences;
+                       r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+                                    xa_limit_32b, GFP_KERNEL);
+                       if (r)
+                               goto free_fences;
 
-                               amdgpu_userq_fence_driver_get(fence_drv);
-                       }
+                       amdgpu_userq_fence_driver_get(fence_drv);
 
                        /* Store drm syncobj's gpu va address and value */
                        fence_info[cnt].va = fence_drv->va;
index 7bbae238cca0b08719ea387217a1a877c39ba37d..2af4e0c157732be3504da98a67ec2771d87094f3 100644 (file)
@@ -55,7 +55,6 @@ struct amdgpu_userq_fence_driver {
        spinlock_t fence_list_lock;
        struct list_head fences;
        struct amdgpu_device *adev;
-       struct xarray *fence_drv_xa_ptr;
        char timeline_name[TASK_COMM_LEN];
 };
 
index 4e02d6cc66b537b13d3d315aaab89e3c879ce49d..8f6510b78deeec45afb7a5e8fa1beca1aee9eee7 100644 (file)
@@ -51,7 +51,6 @@ amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
        }
 
        uq_funcs->mqd_destroy(uq_mgr, queue);
-       queue->fence_drv->fence_drv_xa_ptr = NULL;
        amdgpu_userq_fence_driver_free(queue);
        idr_remove(&uq_mgr->userq_idr, queue_id);
        kfree(queue);
index 284ac25ab5c488b54b0431d5af1ee41b3dd7b3e4..1fd96474e64c06c46d9bebf90a00e18115bb376d 100644 (file)
@@ -515,6 +515,12 @@ struct drm_amdgpu_userq_fence_info {
 };
 
 struct drm_amdgpu_userq_wait {
+       /**
+        * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
+        * wait queue and maintain the fence driver references in it.
+        */
+       __u32   waitq_id;
+       __u32   pad;
        /**
         * @syncobj_handles: The list of syncobj handles submitted by the user queue
         * job to get the va/value pairs.