]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: fix mes packet params issue when flush hdp.
authorchong li <chongli2@amd.com>
Fri, 28 Nov 2025 02:51:51 +0000 (10:51 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 10 Dec 2025 22:38:21 +0000 (17:38 -0500)
v4:
use func "amdgpu_gfx_get_hdp_flush_mask" to get ref_and_mask for
gfx9 through gfx12.

v3:
Unify the get_ref_and_mask function in amdgpu_gfx_funcs,
to support both GFX11 and earlier generations

v2:
place "get_ref_and_mask" in amdgpu_gfx_funcs instead of amdgpu_ring,
since this function only assigns the cp entry.

v1:
both gfx ring and mes ring use cp0 to flush hdp, cause conflict.

use function get_ref_and_mask to assign the cp entry.
reassign mes to use cp8 instead.

Signed-off-by: chong li <chongli2@amd.com>
Acked-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c

index 0065e3b5588a37808cb7f3d328a4a99192553c8f..1683785f63cdbcf30f3e8f42d769eb65dd3f3e07 100644 (file)
@@ -1197,6 +1197,40 @@ failed_kiq_write:
        dev_err(adev->dev, "failed to write reg:%x\n", reg);
 }
 
+void amdgpu_gfx_get_hdp_flush_mask(struct amdgpu_ring *ring,
+               uint32_t *hdp_flush_mask, uint32_t *reg_mem_engine)
+{
+
+       if (!ring || !hdp_flush_mask || !reg_mem_engine) {
+               DRM_INFO("%s:invalid params\n", __func__);
+               return;
+       }
+
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = ring->adev->nbio.hdp_flush_reg;
+
+       switch (ring->funcs->type) {
+       case AMDGPU_RING_TYPE_GFX:
+               *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
+               *reg_mem_engine = 1; /* pfp */
+               break;
+       case AMDGPU_RING_TYPE_COMPUTE:
+               *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
+               *reg_mem_engine = 0;
+               break;
+       case AMDGPU_RING_TYPE_MES:
+               *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp8;
+               *reg_mem_engine = 0;
+               break;
+       case AMDGPU_RING_TYPE_KIQ:
+               *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp9;
+               *reg_mem_engine = 0;
+               break;
+       default:
+               DRM_ERROR("%s:unsupported ring type %d\n", __func__, ring->funcs->type);
+               return;
+       }
+}
+
 int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
 {
        signed long r, cnt = 0;
index eebad3378352d1629436390752570b01182400d1..281e03ad32556bd9492c206f1eaf80f9ff13d615 100644 (file)
@@ -358,6 +358,8 @@ struct amdgpu_gfx_funcs {
                                     int num_xccs_per_xcp);
        int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
        int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
+       void (*get_hdp_flush_mask)(struct amdgpu_ring *ring,
+                               uint32_t *ref_and_mask, uint32_t *reg_mem_engine);
 };
 
 struct sq_work {
@@ -617,6 +619,8 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
                                  struct amdgpu_iv_entry *entry);
 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+void amdgpu_gfx_get_hdp_flush_mask(struct amdgpu_ring *ring,
+               uint32_t *ref_and_mask, uint32_t *reg_mem_engine);
 int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
index 59e6b6f5233f4ca5d2f4772315fc17b7bd278311..dffa0f7276b7b0a35e1c9e9cc4191388d164592c 100644 (file)
@@ -557,11 +557,20 @@ error:
 
 int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
 {
-       uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+       uint32_t hdp_flush_req_offset, hdp_flush_done_offset;
+       struct amdgpu_ring *mes_ring;
+       uint32_t ref_and_mask = 0, reg_mem_engine = 0;
 
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "mes hdp flush is not supported.\n");
+               return -EINVAL;
+       }
+
+       mes_ring = &adev->mes.ring[0];
        hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
        hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
-       ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+       adev->gfx.funcs->get_hdp_flush_mask(mes_ring, &ref_and_mask, &reg_mem_engine);
 
        return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
                                             ref_and_mask, ref_and_mask, 0);
index aaed24f7e71682d9407bd1252c52479ea6b660df..31c2d33f02d5f5d4357e117cfddbdc504913339a 100644 (file)
@@ -4575,6 +4575,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
        .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
        .init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
        .update_perfmon_mgcg = &gfx_v10_0_update_perfmon_mgcg,
+       .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
 };
 
 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
@@ -8614,25 +8615,13 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
-               reg_mem_engine = 1; /* pfp */
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
index 39284b5ddefdddf04358e0b3c76e9573a4739530..79a6977d56b0e07a6ee1d20e44f73e4ba0f4ad20 100644 (file)
@@ -1085,6 +1085,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
        .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
        .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
        .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
+       .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
 };
 
 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5837,25 +5838,13 @@ static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
-               reg_mem_engine = 1; /* pfp */
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
index 3db2eecd723db69fb505d5c91c4bc35facc24c87..b4dd954363ee14c3b9e58f93af2ee4f18669021f 100644 (file)
@@ -942,6 +942,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
        .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
        .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
        .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
+       .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
 };
 
 static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -4393,25 +4394,13 @@ static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
-               reg_mem_engine = 1; /* pfp */
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
index 66a4e4998106f7006e490b26dd178aa751663237..9c0bcf836b2ee95b952242d80035a69e3861a541 100644 (file)
@@ -2068,23 +2068,15 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask;
-       int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+       int usepfp;
+       struct amdgpu_device *adev = ring->adev;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-       } else {
-               ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &usepfp);
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
        amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
                                 WAIT_REG_MEM_FUNCTION(3) |  /* == */
@@ -4075,12 +4067,49 @@ static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
        cik_srbm_select(adev, me, pipe, q, vm);
 }
 
+/**
+ * gfx_v7_0_get_hdp_flush_mask - get the reference and mask for HDP flush
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ref_and_mask: pointer to store the reference and mask
+ * @reg_mem_engine: pointer to store the register memory engine
+ *
+ * Calculates the reference and mask for HDP flush based on the ring type and me.
+ */
+static void gfx_v7_0_get_hdp_flush_mask(struct amdgpu_ring *ring,
+                                       uint32_t *ref_and_mask, uint32_t *reg_mem_engine)
+{
+       if (!ring || !ref_and_mask || !reg_mem_engine) {
+               DRM_INFO("%s:invalid params\n", __func__);
+               return;
+       }
+
+       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+               ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
+               switch (ring->me) {
+               case 1:
+                       *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
+                       break;
+               case 2:
+                       *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
+                       break;
+               default:
+                       return;
+               }
+               *reg_mem_engine = 0;
+       } else {
+               *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+               *reg_mem_engine = 1;
+       }
+}
+
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
        .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
        .select_se_sh = &gfx_v7_0_select_se_sh,
        .read_wave_data = &gfx_v7_0_read_wave_data,
        .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
-       .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
+       .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q,
+       .get_hdp_flush_mask = &gfx_v7_0_get_hdp_flush_mask,
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
index 5d6e8e0601cb75630e37ae844a226e6a8a9c6610..5c1f8230be8ce106f76b5df9412d05f0e300577e 100644 (file)
@@ -5211,13 +5211,49 @@ static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id
                start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
 }
 
+/**
+ * gfx_v8_0_get_hdp_flush_mask - get the reference and mask for HDP flush
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ref_and_mask: pointer to store the reference and mask
+ * @reg_mem_engine: pointer to store the register memory engine
+ *
+ * Calculates the reference and mask for HDP flush based on the ring type and me.
+ */
+static void gfx_v8_0_get_hdp_flush_mask(struct amdgpu_ring *ring,
+                                       uint32_t *ref_and_mask, uint32_t *reg_mem_engine)
+{
+       if (!ring || !ref_and_mask || !reg_mem_engine) {
+               DRM_INFO("%s:invalid params\n", __func__);
+               return;
+       }
+
+       if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
+           (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
+               switch (ring->me) {
+               case 1:
+                       *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
+                       break;
+               case 2:
+                       *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
+                       break;
+               default:
+                       return;
+               }
+               *reg_mem_engine = 0;
+       } else {
+               *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
+               *reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
+       }
+}
 
 static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
        .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
        .select_se_sh = &gfx_v8_0_select_se_sh,
        .read_wave_data = &gfx_v8_0_read_wave_data,
        .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
-       .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
+       .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q,
+       .get_hdp_flush_mask = &gfx_v8_0_get_hdp_flush_mask,
 };
 
 static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block)
@@ -6000,25 +6036,14 @@ static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
 static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask, reg_mem_engine;
+       struct amdgpu_device *adev = ring->adev;
 
-       if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
-           (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
-               reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
        amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
                                 WAIT_REG_MEM_FUNCTION(3) |  /* == */
index e6187be27385ab3e736654d423b6b4fd799b51d7..bb1465a98c7cabe9b3278c98992f90b4e5421a32 100644 (file)
@@ -2004,6 +2004,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+       .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
 };
 
 const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
@@ -5380,25 +5381,13 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
-               reg_mem_engine = 1; /* pfp */
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
index 89253df5ffc85add31f48f7301af71404a56d1dd..4e4d88d6357f9644ba82b663ce13da94f755e82a 100644 (file)
@@ -848,6 +848,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
        .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
        .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
        .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
+       .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
 };
 
 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
@@ -2818,25 +2819,13 @@ static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               switch (ring->me) {
-               case 1:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
-                       break;
-               case 2:
-                       ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
-                       break;
-               default:
-                       return;
-               }
-               reg_mem_engine = 0;
-       } else {
-               ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
-               reg_mem_engine = 1; /* pfp */
+       if (!adev->gfx.funcs->get_hdp_flush_mask) {
+               dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+               return;
        }
 
+       adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),