]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu/: drm/amdgpu: Register the new sdma function pointers for sdma_v5_0
authorJesse.zhang@amd.com <Jesse.zhang@amd.com>
Mon, 14 Apr 2025 08:05:33 +0000 (16:05 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 21 Apr 2025 14:56:58 +0000 (10:56 -0400)
Register stop/start/soft_reset queue functions for SDMA IP versions v5.0.

Suggested-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c

index e1348b6d9c6a6bdde7e31697a1cf79b1930decc6..48ee6dc1e3dcc405d84375f8c0bd74fda3970bd8 100644 (file)
@@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
 
 static const struct soc15_reg_golden golden_settings_sdma_5[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
@@ -1323,6 +1325,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
        amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
 }
 
+static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
+{
+       u32 grbm_soft_reset;
+       u32 tmp;
+
+       grbm_soft_reset = REG_SET_FIELD(0,
+                                       GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+                                       1);
+       grbm_soft_reset <<= instance_id;
+
+       tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+       tmp |= grbm_soft_reset;
+       DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+       WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+       tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+       udelay(50);
+
+       tmp &= ~grbm_soft_reset;
+       WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+       tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+       return 0;
+}
+
+static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
+       .stop_kernel_queue = &sdma_v5_0_stop_queue,
+       .start_kernel_queue = &sdma_v5_0_restore_queue,
+       .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
+};
+
 static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
@@ -1365,6 +1397,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
                return r;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
+               adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
                ring = &adev->sdma.instance[i].ring;
                ring->ring_obj = NULL;
                ring->use_doorbell = true;
@@ -1506,8 +1539,16 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
 static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
 {
        struct amdgpu_device *adev = ring->adev;
-       int i, j, r;
-       u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+       u32 inst_id = ring->me;
+
+       return amdgpu_sdma_reset_engine(adev, inst_id);
+}
+
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
+{
+       u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg;
+       struct amdgpu_device *adev = ring->adev;
+       int i, j, r = 0;
 
        if (amdgpu_sriov_vf(adev))
                return -EINVAL;
@@ -1562,30 +1603,25 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
        cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
        cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
        WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+err0:
+       amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+       return r;
+}
 
-       /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
-       preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
-       preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
-       soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
-       soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
-       WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
-       udelay(50);
-
-       soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
-       WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 inst_id = ring->me;
+       u32 freeze;
+       int r;
 
+       amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
        /* unfreeze*/
-       freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+       freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
        freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+       WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
 
-       r = sdma_v5_0_gfx_resume_instance(adev, i, true);
-
-err0:
+       r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
        return r;
 }