]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: use GPU_HDP_FLUSH for sriov
authorVictor Zhao <Victor.Zhao@amd.com>
Thu, 9 Oct 2025 02:42:48 +0000 (10:42 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 20 Oct 2025 22:25:41 +0000 (18:25 -0400)
Currently SRIOV runtime will use kiq to write HDP_MEM_FLUSH_CNTL for
hdp flush. This register need to be write from CPU for nbif to aware,
otherwise it will not work.

Implement amdgpu_kiq_hdp_flush and use kiq to do gpu hdp flush during
sriov runtime.

v2:
- fallback to amdgpu_asic_flush_hdp when amdgpu_kiq_hdp_flush failed
- add function amdgpu_mes_hdp_flush

v3:
- changed returned error

Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Victor Zhao <Victor.Zhao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h

index 038804d48341a2611fbd6fe36b4840d8ce559e91..a99185ed0642dac580f519c78801b4e062534497 100644 (file)
@@ -7315,10 +7315,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
        if (adev->gmc.xgmi.connected_to_cpu)
                return;
 
-       if (ring && ring->funcs->emit_hdp_flush)
+       if (ring && ring->funcs->emit_hdp_flush) {
                amdgpu_ring_emit_hdp_flush(ring);
-       else
-               amdgpu_asic_flush_hdp(adev, ring);
+               return;
+       }
+
+       if (!ring && amdgpu_sriov_runtime(adev)) {
+               if (!amdgpu_kiq_hdp_flush(adev))
+                       return;
+       }
+
+       amdgpu_asic_flush_hdp(adev, ring);
 }
 
 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
index ebe2b4c68b0f363348b8149a6c95b4fa477c6423..29c927f4d6df7cc73ce8990837bfbed7aa1ff3f0 100644 (file)
@@ -33,6 +33,7 @@
 #include "amdgpu_reset.h"
 #include "amdgpu_xcp.h"
 #include "amdgpu_xgmi.h"
+#include "amdgpu_mes.h"
 #include "nvd.h"
 
 /* delay 0.1 second to enable gfx off feature */
@@ -1194,6 +1195,75 @@ failed_kiq_write:
        dev_err(adev->dev, "failed to write reg:%x\n", reg);
 }
 
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
+{
+       signed long r, cnt = 0;
+       unsigned long flags;
+       uint32_t seq;
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_ring *ring = &kiq->ring;
+
+       if (amdgpu_device_skip_hw_access(adev))
+               return 0;
+
+       if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
+               return amdgpu_mes_hdp_flush(adev);
+
+       if (!ring->funcs->emit_hdp_flush) {
+               return -EOPNOTSUPP;
+       }
+
+       spin_lock_irqsave(&kiq->ring_lock, flags);
+       r = amdgpu_ring_alloc(ring, 32);
+       if (r)
+               goto failed_unlock;
+
+       amdgpu_ring_emit_hdp_flush(ring);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
+       amdgpu_ring_commit(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+       /* don't wait anymore for gpu reset case because this way may
+        * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+        * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+        * never return if we keep waiting in virt_kiq_rreg, which cause
+        * gpu_recover() hang there.
+        *
+        * also don't wait anymore for IRQ context
+        * */
+       if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+               goto failed_kiq_hdp_flush;
+
+       might_sleep();
+       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+               if (amdgpu_in_reset(adev))
+                       goto failed_kiq_hdp_flush;
+
+               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+       }
+
+       if (cnt > MAX_KIQ_REG_TRY) {
+               dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+
+failed_undo:
+       amdgpu_ring_undo(ring);
+failed_unlock:
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_hdp_flush:
+       dev_err(adev->dev, "failed to flush HDP via KIQ\n");
+       return r < 0 ? r : -EIO;
+}
+
 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
 {
        if (amdgpu_num_kcq == -1) {
@@ -2485,3 +2555,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
                            &amdgpu_debugfs_compute_sched_mask_fops);
 #endif
 }
+
index fb5f7a0ee029fd629a1d5ee1479b0e0f676fd3ad..efd61a1ccc661e6c987f028f1e206721ed362805 100644 (file)
@@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
                                  struct amdgpu_iv_entry *entry);
 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
 
index 45bf8309cb3758ef270efa533b524c8150937258..9c182ce501af44bdc7b65e86134e7c92d9cdb062 100644 (file)
@@ -528,6 +528,18 @@ error:
        return r;
 }
 
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
+{
+       uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+
+       hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
+       hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
+       ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+       return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
+                                            ref_and_mask, ref_and_mask);
+}
+
 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
                                uint64_t process_context_addr,
                                uint32_t spi_gdbg_per_vmid_cntl,
index 9c27a68cb82f425c66f737042438cfd152a1e6c9..e989225b354bb4260a5dd5bc754ab8eeea014541 100644 (file)
@@ -429,6 +429,7 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
                                  uint32_t reg0, uint32_t reg1,
                                  uint32_t ref, uint32_t mask);
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
                                uint64_t process_context_addr,
                                uint32_t spi_gdbg_per_vmid_cntl,