]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu/userq: move waiting for last fence before umap
authorAlex Deucher <alexander.deucher@amd.com>
Wed, 16 Apr 2025 18:40:25 +0000 (14:40 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 22 Apr 2025 12:51:45 +0000 (08:51 -0400)
Need to wait for the last fence before unmapping.  This
also fixes a memory leak in amdgpu_userqueue_cleanup()
when the fence isn't signalled.

Fixes: b0db33c8c50f ("drm/amdgpu/userq: rework front end call sequence")
Reviewed-by: Prike Liang <Prike.Liang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c

index d821e5d57417105c209adb4e9952445720cab8d7..b75b93e69c09aa079b845ac48cd01ed25290ec6e 100644 (file)
@@ -84,23 +84,27 @@ amdgpu_userqueue_map_helper(struct amdgpu_userq_mgr *uq_mgr,
 }
 
 static void
-amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
-                        struct amdgpu_usermode_queue *queue,
-                        int queue_id)
+amdgpu_userqueue_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+                                    struct amdgpu_usermode_queue *queue)
 {
        struct amdgpu_device *adev = uq_mgr->adev;
-       const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
        struct dma_fence *f = queue->last_fence;
        int ret;
 
        if (f && !dma_fence_is_signaled(f)) {
                ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
-               if (ret <= 0) {
-                       DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
-                                 f->context, f->seqno);
-                       return;
-               }
+               if (ret <= 0)
+                       dev_err(adev->dev, "Timed out waiting for fence f=%p\n", f);
        }
+}
+
+static void
+amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+                        struct amdgpu_usermode_queue *queue,
+                        int queue_id)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
 
        uq_funcs->mqd_destroy(uq_mgr, queue);
        amdgpu_userq_fence_driver_free(queue);
@@ -305,6 +309,7 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
                mutex_unlock(&uq_mgr->userq_mutex);
                return -EINVAL;
        }
+       amdgpu_userqueue_wait_for_last_fence(uq_mgr, queue);
        r = amdgpu_userqueue_unmap_helper(uq_mgr, queue);
        amdgpu_bo_unpin(queue->db_obj.obj);
        amdgpu_bo_unref(&queue->db_obj.obj);
@@ -780,6 +785,7 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
 
        mutex_lock(&userq_mgr->userq_mutex);
        idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+               amdgpu_userqueue_wait_for_last_fence(userq_mgr, queue);
                amdgpu_userqueue_unmap_helper(userq_mgr, queue);
                amdgpu_userqueue_cleanup(userq_mgr, queue, queue_id);
        }