]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amd: Fix error handling with multiple userq IDRs
authorMario Limonciello <mario.limonciello@amd.com>
Thu, 2 Oct 2025 17:42:44 +0000 (12:42 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 13 Oct 2025 18:14:34 +0000 (14:14 -0400)
If multiple userq IDR are in use and there is an error handling one
at suspend or resume it will be silently discarded.
Switch the suspend/resume() code to use guards and return immediately.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c

index 9ad366f2977676b9a1ceab7b78884569883e68f5..1400114dc9342383d75105ccd707c093c986efc6 100644 (file)
@@ -1076,27 +1076,25 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
-       int ret = 0, r;
+       int r;
 
        if (!ip_mask)
                return 0;
 
-       mutex_lock(&adev->userq_mutex);
+       guard(mutex)(&adev->userq_mutex);
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
                cancel_delayed_work_sync(&uqm->resume_work);
-               mutex_lock(&uqm->userq_mutex);
+               guard(mutex)(&uqm->userq_mutex);
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
                        if (adev->in_s0ix)
                                r = amdgpu_userq_preempt_helper(uqm, queue);
                        else
                                r = amdgpu_userq_unmap_helper(uqm, queue);
                        if (r)
-                               ret = r;
+                               return r;
                }
-               mutex_unlock(&uqm->userq_mutex);
        }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
+       return 0;
 }
 
 int amdgpu_userq_resume(struct amdgpu_device *adev)
@@ -1105,26 +1103,25 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
        struct amdgpu_usermode_queue *queue;
        struct amdgpu_userq_mgr *uqm, *tmp;
        int queue_id;
-       int ret = 0, r;
+       int r;
 
        if (!ip_mask)
                return 0;
 
-       mutex_lock(&adev->userq_mutex);
+       guard(mutex)(&adev->userq_mutex);
        list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               mutex_lock(&uqm->userq_mutex);
+               guard(mutex)(&uqm->userq_mutex);
                idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
                        if (adev->in_s0ix)
                                r = amdgpu_userq_restore_helper(uqm, queue);
                        else
                                r = amdgpu_userq_map_helper(uqm, queue);
                        if (r)
-                               ret = r;
+                               return r;
                }
-               mutex_unlock(&uqm->userq_mutex);
        }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
+
+       return 0;
 }
 
 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,