]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: always backup and reemit fences
authorAlex Deucher <alexander.deucher@amd.com>
Thu, 13 Nov 2025 19:12:10 +0000 (14:12 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 5 Jan 2026 22:28:45 +0000 (17:28 -0500)
If when we backup the ring contents for reemit before a
ring reset, we skip jobs associated with the bad
context, however, we need to make sure the fences
are reemited as unprocessed submissions may depend on
them.

v2: clean up fence handling, make helpers static

Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 155a748f14bc0b72783994dea7c5a12276730342)

drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

index 4f74a02a9a05c4f09fffef28fc5bd5ab6183f3db..06c333b2213b0eb32de02d9540da297759ef9864 100644 (file)
@@ -89,6 +89,16 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
        return seq;
 }
 
+static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
+{
+       af->fence_wptr_start = af->ring->wptr;
+}
+
+static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
+{
+       af->fence_wptr_end = af->ring->wptr;
+}
+
 /**
  * amdgpu_fence_emit - emit a fence on the requested ring
  *
@@ -116,8 +126,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
                       &ring->fence_drv.lock,
                       adev->fence_context + ring->idx, seq);
 
+       amdgpu_fence_save_fence_wptr_start(af);
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, flags | AMDGPU_FENCE_FLAG_INT);
+       amdgpu_fence_save_fence_wptr_end(af);
        amdgpu_fence_save_wptr(af);
        pm_runtime_get_noresume(adev_to_drm(adev)->dev);
        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
@@ -742,10 +754,6 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
                /* if we've already reemitted once then just cancel everything */
                amdgpu_fence_driver_force_completion(af->ring);
                af->ring->ring_backup_entries_to_copy = 0;
-       } else {
-               /* signal the guilty fence */
-               amdgpu_fence_write(ring, (u32)af->base.seqno);
-               amdgpu_fence_process(ring);
        }
 }
 
@@ -795,9 +803,15 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
                         * just save the content from other contexts.
                         */
                        if (!fence->reemitted &&
-                           (!guilty_fence || (fence->context != guilty_fence->context)))
+                           (!guilty_fence || (fence->context != guilty_fence->context))) {
                                amdgpu_ring_backup_unprocessed_command(ring, wptr,
                                                                       fence->wptr);
+                       } else if (!fence->reemitted) {
+                               /* always save the fence */
+                               amdgpu_ring_backup_unprocessed_command(ring,
+                                                                      fence->fence_wptr_start,
+                                                                      fence->fence_wptr_end);
+                       }
                        wptr = fence->wptr;
                        fence->reemitted++;
                }
index 5044cf9e45fb7989577b1c039ad60d224dadcf56..055437d4edf9f03a47772545b0e016a673e6e6a9 100644 (file)
@@ -144,12 +144,15 @@ struct amdgpu_fence {
        struct amdgpu_ring              *ring;
        ktime_t                         start_timestamp;
 
-       /* wptr for the fence for resets */
+       /* wptr for the total submission for resets */
        u64                             wptr;
        /* fence context for resets */
        u64                             context;
        /* has this fence been reemitted */
        unsigned int                    reemitted;
+       /* wptr for the fence for the submission */
+       u64                             fence_wptr_start;
+       u64                             fence_wptr_end;
 };
 
 extern const struct drm_sched_backend_ops amdgpu_sched_ops;