]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: Make amdgpu_vm_flush() non-failing in submission path
authorSrinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Thu, 12 Feb 2026 16:00:50 +0000 (21:30 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 23 Feb 2026 19:16:31 +0000 (14:16 -0500)
amdgpu_vm_flush() is used during job submission and is not expected to
fail. Convert it to return void and simplify the caller.

Initialize the COND_EXEC patch location to 0 so it is safe to call
amdgpu_ring_patch_cond_exec() when init_cond_exec is not supported.

Suggested-by: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index 647b752ed2b5e637246888171ab89eeeb1e1c775..276e0236db45621534bb494b4c06f7a3b51ee3e3 100644 (file)
@@ -215,13 +215,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
                dma_fence_put(tmp);
        }
 
-       if (job) {
-               r = amdgpu_vm_flush(ring, job, need_pipe_sync);
-               if (r) {
-                       amdgpu_ring_undo(ring);
-                       goto free_fence;
-               }
-       }
+       if (job)
+               amdgpu_vm_flush(ring, job, need_pipe_sync);
 
        amdgpu_ring_ib_begin(ring);
 
index 33efcb24090bd0064c70567dc9b6f56a116f419e..f31b2116a7dec6feffccdfeee1ac5bb75ae04ec4 100644 (file)
@@ -764,12 +764,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
  * @need_pipe_sync: is pipe sync needed
  *
  * Emit a VM flush when it is necessary.
- *
- * Returns:
- * 0 on success, errno otherwise.
  */
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
-                   bool need_pipe_sync)
+void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+                    bool need_pipe_sync)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
@@ -811,7 +808,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
 
        if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
            !cleaner_shader_needed)
-               return 0;
+               return;
 
        amdgpu_ring_ib_begin(ring);
 
@@ -900,7 +897,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
        }
 
        amdgpu_ring_ib_end(ring);
-       return 0;
 }
 
 /**
index 806d62ed61efff7be545ac3419e2657aa27704fe..dc4b0ec672ec0239a4dad373f53be52ea904b823 100644 (file)
@@ -513,7 +513,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                       struct ww_acquire_ctx *ticket,
                       int (*callback)(void *p, struct amdgpu_bo *bo),
                       void *param);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
+void amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm, bool immediate);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,