]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu/sdma: consolidate engine reset handling
authorAlex Deucher <alexander.deucher@amd.com>
Thu, 26 Jun 2025 12:49:07 +0000 (08:49 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 7 Jul 2025 17:48:20 +0000 (13:48 -0400)
Move the force completion handling into the common
engine reset function.  No need to duplicate it for
every IP version.

Reviewed-by: Jesse Zhang <Jesse.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c

index 7e26a44dcc1fd1306d892e4f9e16efc9c1c1d88c..56939bb1d1a951fadf9bd04c133880bb589443de 100644 (file)
@@ -590,9 +590,12 @@ exit:
         * to be submitted to the queues after the reset is complete.
         */
        if (!ret) {
+               amdgpu_fence_driver_force_completion(gfx_ring);
                drm_sched_wqueue_start(&gfx_ring->sched);
-               if (adev->sdma.has_page_queue)
+               if (adev->sdma.has_page_queue) {
+                       amdgpu_fence_driver_force_completion(page_ring);
                        drm_sched_wqueue_start(&page_ring->sched);
+               }
        }
        mutex_unlock(&sdma_instance->engine_reset_mutex);
 
index c05f3c1f50db4894ed2fc3b45623ac1734d77355..a7e1dbe03b2942006de7b8a9a451c0fde03074d9 100644 (file)
@@ -1714,7 +1714,7 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
 static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 inst_mask, tmp_mask;
+       u32 inst_mask;
        int i, r;
 
        inst_mask = 1 << ring->me;
@@ -1733,21 +1733,6 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
        }
 
        r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
-       if (r)
-               return r;
-
-       tmp_mask = inst_mask;
-       for_each_inst(i, tmp_mask) {
-               ring = &adev->sdma.instance[i].ring;
-
-               amdgpu_fence_driver_force_completion(ring);
-
-               if (adev->sdma.has_page_queue) {
-                       struct amdgpu_ring *page = &adev->sdma.instance[i].page;
-
-                       amdgpu_fence_driver_force_completion(page);
-               }
-       }
 
        return r;
 }
index 4d72b085b3dd77c185545ff0aea20ff1f134a8a7..ed1706da7deecd1521b74849055f7656af9efdf3 100644 (file)
@@ -1618,10 +1618,8 @@ static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
 
        r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-       if (r)
-               return r;
-       amdgpu_fence_driver_force_completion(ring);
-       return 0;
+
+       return r;
 }
 
 static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
index 42a25150f83ac2a7a048515a5f374a1e147f2933..b87a4b44fa939c549bbea9a513402c8374121789 100644 (file)
@@ -1534,10 +1534,8 @@ static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
        r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
 
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-       if (r)
-               return r;
-       amdgpu_fence_driver_force_completion(ring);
-       return 0;
+
+       return r;
 }
 
 static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)