return seq;
}
+static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
+{
+ af->fence_wptr_start = af->ring->wptr;
+}
+
+static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
+{
+ af->fence_wptr_end = af->ring->wptr;
+}
+
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
+ amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_fence_wptr_end(af);
amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
/* if we've already reemitted once then just cancel everything */
amdgpu_fence_driver_force_completion(af->ring);
af->ring->ring_backup_entries_to_copy = 0;
- } else {
- /* signal the guilty fence */
- amdgpu_fence_write(ring, (u32)af->base.seqno);
- amdgpu_fence_process(ring);
}
}
* just save the content from other contexts.
*/
if (!fence->reemitted &&
- (!guilty_fence || (fence->context != guilty_fence->context)))
+ (!guilty_fence || (fence->context != guilty_fence->context))) {
amdgpu_ring_backup_unprocessed_command(ring, wptr,
fence->wptr);
+ } else if (!fence->reemitted) {
+ /* always save the fence */
+ amdgpu_ring_backup_unprocessed_command(ring,
+ fence->fence_wptr_start,
+ fence->fence_wptr_end);
+ }
wptr = fence->wptr;
fence->reemitted++;
}
struct amdgpu_ring *ring;
ktime_t start_timestamp;
- /* wptr for the fence for resets */
+ /* wptr for the total submission for resets */
u64 wptr;
/* fence context for resets */
u64 context;
/* has this fence been reemitted */
unsigned int reemitted;
+ /* wptr for the fence for the submission */
+ u64 fence_wptr_start;
+ u64 fence_wptr_end;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;