]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: remove hdp flush/invalidation completely for gfx12.1.0/sdma7.1.0
authorLe Ma <le.ma@amd.com>
Tue, 25 Feb 2025 11:24:33 +0000 (19:24 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Dec 2025 18:56:43 +0000 (13:56 -0500)
Remove the hdp operation and interfaces as the HDP hw does not exist.

v2: add checks to see if hdp funcs exists before do hdp flush/invalidation

Signed-off-by: Le Ma <le.ma@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c

index c27d0bc60523fb785a00bdc834cab88cbdb22ed2..6e4583d8fffb3dcb663c9744a68a25a6c78820a4 100644 (file)
@@ -2427,13 +2427,11 @@ static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       adev->hdp.funcs->flush_hdp(adev, NULL);
-
        value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
                false : true;
 
        adev->gfxhub.funcs->set_fault_enable_default(adev, value);
-       /* TODO investigate why this and the hdp flush above is needed,
+       /* TODO investigate why TLB flush is needed,
         * are we missing a flush somewhere else? */
        adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
 
@@ -3145,35 +3143,6 @@ static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring)
        }
 }
 
-static void gfx_v12_1_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
-       struct amdgpu_device *adev = ring->adev;
-       u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
-
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
-               reg_mem_engine = 1; /* pfp */
-       }
-
-       gfx_v12_1_wait_reg_mem(ring, reg_mem_engine, 0, 1,
-                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
-                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
-                              ref_and_mask, ref_and_mask, 0x20);
-}
-
 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                           struct amdgpu_job *job,
                                           struct amdgpu_ib *ib,
@@ -3658,8 +3627,6 @@ static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
        .get_wptr = gfx_v12_1_ring_get_wptr_compute,
        .set_wptr = gfx_v12_1_ring_set_wptr_compute,
        .emit_frame_size =
-               7 + /* gfx_v12_1_ring_emit_hdp_flush */
-               5 + /* hdp invalidate */
                7 + /* gfx_v12_1_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
@@ -3671,7 +3638,6 @@ static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = {
        .emit_fence = gfx_v12_1_ring_emit_fence,
        .emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
        .emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
-       .emit_hdp_flush = gfx_v12_1_ring_emit_hdp_flush,
        .test_ring = gfx_v12_1_ring_test_ring,
        .test_ib = gfx_v12_1_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
@@ -3691,8 +3657,6 @@ static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = {
        .get_wptr = gfx_v12_1_ring_get_wptr_compute,
        .set_wptr = gfx_v12_1_ring_set_wptr_compute,
        .emit_frame_size =
-               7 + /* gfx_v12_1_ring_emit_hdp_flush */
-               5 + /*hdp invalidate */
                7 + /* gfx_v12_1_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
index 89ce07ae18b4df1dc8cd0f1d6cdae2d8fc3d6e63..37f5095c1511eaa97be7a1f483719e96a6ba8a3c 100644 (file)
@@ -32,7 +32,6 @@
 
 #include "gc/gc_12_1_0_offset.h"
 #include "gc/gc_12_1_0_sh_mask.h"
-#include "hdp/hdp_6_0_0_offset.h"
 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
 
 #include "soc15_common.h"
@@ -312,33 +311,6 @@ static void sdma_v7_1_ring_emit_mem_sync(struct amdgpu_ring *ring)
 }
 
 
-/**
- * sdma_v7_1_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
- *
- * @ring: amdgpu ring pointer
- *
- * Emit an hdp flush packet on the requested DMA ring.
- */
-static void sdma_v7_1_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
-       struct amdgpu_device *adev = ring->adev;
-       u32 ref_and_mask = 0;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
-
-       ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
-                               << (ring->me % adev->sdma.num_inst_per_xcc);
-
-       amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
-                         SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
-       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
-       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
-       amdgpu_ring_write(ring, ref_and_mask); /* reference */
-       amdgpu_ring_write(ring, ref_and_mask); /* mask */
-       amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
-                         SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
-}
-
 /**
  * sdma_v7_1_ring_emit_fence - emit a fence on the DMA ring
  *
@@ -1215,7 +1187,6 @@ static void sdma_v7_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 
        /* wait for idle */
        amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
                          SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
                          SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
        amdgpu_ring_write(ring, addr & 0xfffffffc);
@@ -1257,7 +1228,6 @@ static void sdma_v7_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
                                         uint32_t val, uint32_t mask)
 {
        amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
                          SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
        amdgpu_ring_write(ring, reg << 2);
        amdgpu_ring_write(ring, 0);
@@ -1667,7 +1637,6 @@ static const struct amdgpu_ring_funcs sdma_v7_1_ring_funcs = {
        .set_wptr = sdma_v7_1_ring_set_wptr,
        .emit_frame_size =
                5 + /* sdma_v7_1_ring_init_cond_exec */
-               6 + /* sdma_v7_1_ring_emit_hdp_flush */
                6 + /* sdma_v7_1_ring_emit_pipeline_sync */
                /* sdma_v7_1_ring_emit_vm_flush */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
@@ -1679,7 +1648,6 @@ static const struct amdgpu_ring_funcs sdma_v7_1_ring_funcs = {
        .emit_fence = sdma_v7_1_ring_emit_fence,
        .emit_pipeline_sync = sdma_v7_1_ring_emit_pipeline_sync,
        .emit_vm_flush = sdma_v7_1_ring_emit_vm_flush,
-       .emit_hdp_flush = sdma_v7_1_ring_emit_hdp_flush,
        .test_ring = sdma_v7_1_ring_test_ring,
        .test_ib = sdma_v7_1_ring_test_ib,
        .insert_nop = sdma_v7_1_ring_insert_nop,