]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: Make amdgpu_fence_emit() non-failing v2
authorSrinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Tue, 10 Feb 2026 14:55:05 +0000 (20:25 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 23 Feb 2026 19:16:31 +0000 (14:16 -0500)
dma_fence_wait(old, false) is not interruptible and cannot return an
error. Drop the unreachable error handling in amdgpu_fence_emit().

Since the function can no longer fail, convert amdgpu_fence_emit() to
return void and remove return value handling from all callers.

v2:
- Add comment explaining why dma_fence_wait(..., false)
  return value is ignored (Alex)

Suggested-by: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 1054d66c54facfea3fe01bd5f1f897f0ab5ba1c2..07568516c506656033c21d595f33825a5d26e7d0 100644 (file)
@@ -107,16 +107,14 @@ static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
  * @flags: flags to pass into the subordinate .emit_fence() call
  *
  * Emits a fence command on the requested ring (all asics).
- * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
-                     unsigned int flags)
+void amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+                      unsigned int flags)
 {
        struct amdgpu_device *adev = ring->adev;
        struct dma_fence *fence;
        struct dma_fence __rcu **ptr;
        uint32_t seq;
-       int r;
 
        fence = &af->base;
        af->ring = ring;
@@ -141,10 +139,13 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
                rcu_read_unlock();
 
                if (old) {
-                       r = dma_fence_wait(old, false);
+                       /*
+                        * dma_fence_wait(old, false) is not interruptible.
+                        * It will not return an error in this case.
+                        * So we can safely ignore the return value.
+                        */
+                       dma_fence_wait(old, false);
                        dma_fence_put(old);
-                       if (r)
-                               return r;
                }
        }
 
@@ -154,8 +155,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
         * emitting the fence would mess up the hardware ring buffer.
         */
        rcu_assign_pointer(*ptr, dma_fence_get(fence));
-
-       return 0;
 }
 
 /**
index bfccb03193d323baa354841272a7cbeb499ae6ee..647b752ed2b5e637246888171ab89eeeb1e1c775 100644 (file)
@@ -297,14 +297,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
                amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
        }
 
-       r = amdgpu_fence_emit(ring, af, fence_flags);
-       if (r) {
-               dev_err(adev->dev, "failed to emit fence (%d)\n", r);
-               if (job && job->vmid)
-                       amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
-               amdgpu_ring_undo(ring);
-               goto free_fence;
-       }
+       amdgpu_fence_emit(ring, af, fence_flags);
        *f = &af->base;
        /* get a ref for the job */
        if (job)
index 1abd8fdb5cef629fe2b33183ba156479a1f2242d..5a82db0888f0ae1dfeda6b71ef97302e6d77d93c 100644 (file)
@@ -172,8 +172,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
-                     unsigned int flags);
+void amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+                      unsigned int flags);
 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
                              uint32_t timeout);
 bool amdgpu_fence_process(struct amdgpu_ring *ring);
index 83b8a41f559c3fe03c4a1d77a777c1c39f9b7e12..33efcb24090bd0064c70567dc9b6f56a116f419e 100644 (file)
@@ -783,8 +783,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
        bool cleaner_shader_needed = false;
        bool pasid_mapping_needed = false;
        struct dma_fence *fence = NULL;
-       unsigned int patch;
-       int r;
+       unsigned int patch = 0;
 
        if (amdgpu_vmid_had_gpu_reset(adev, id)) {
                gds_switch_needed = true;
@@ -856,9 +855,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
        }
 
        if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
-               r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
-               if (r)
-                       return r;
+               amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
                fence = &job->hw_vm_fence->base;
                /* get a ref for the job */
                dma_fence_get(fence);