]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amd/pm: implement dpm vcn reset function
authorRuili Ji <ruiliji2@amd.com>
Mon, 24 Mar 2025 05:08:50 +0000 (01:08 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 7 Apr 2025 22:01:08 +0000 (18:01 -0400)
Implement VCN engine reset by sending MSG_ResetVCN
on smu 13.0.6.

v2: fix format for code and message

Reviewed-by: Sonny Jiang <sonny.jiang@amd.com>
Reviewed-by: Leo Liu <leo.liu@amd.com>
Signed-off-by: Ruili Ji <ruiliji2@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c

index 9f5768fa97dd0e9e12a235a7dfa80f0a61cf8b53..740ee2435f54035fd38d67c734b34c833b75301c 100644 (file)
@@ -789,6 +789,21 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
        return ret;
 }
 
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret;
+
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_reset_vcn(smu, inst_mask);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
                                  enum pp_clock_type type,
                                  uint32_t *min,
index 747019ed2bf5c58f803cbbbf6ca9994190796b6e..4445d54f3322f1fff075c61c94ecec5954b23bbe 100644 (file)
@@ -607,5 +607,6 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
                                      enum pp_pm_policy p_type, char *buf);
 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
 
 #endif
index a01b6244d99cd5a1180595e2a4d876db16f10dc7..1da9e1287d2e4a5541c7a48b58b47af4667d945c 100644 (file)
@@ -3967,3 +3967,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
 
        return ret;
 }
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+       if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+               smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+       return 0;
+}
index 27cecf9688ccbb3605abd3321450c73ae95b4a91..ae89801adeb8b5d385babd9ac6b23fdd97596872 100644 (file)
@@ -1396,6 +1396,11 @@ struct pptable_funcs {
         */
        bool (*reset_sdma_is_supported)(struct smu_context *smu);
 
+       /**
+        * @reset_vcn: message SMU to soft reset vcn instance.
+        */
+       int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
        /**
         * @get_ecc_table:  message SMU to get ECC INFO table.
         */
@@ -1659,6 +1664,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
 int smu_send_rma_reason(struct smu_context *smu);
 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
 bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
                      int level);
 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
index 288b2576432bd0c0c33980c33cf7d6a1b2d583c0..348d06a3200c23627d4c115e59d250c42a18d857 100644 (file)
@@ -94,7 +94,8 @@
 #define PPSMC_MSG_RmaDueToBadPageThreshold          0x43
 #define PPSMC_MSG_SetThrottlingPolicy               0x44
 #define PPSMC_MSG_ResetSDMA                         0x4D
-#define PPSMC_Message_Count                         0x4E
+#define PPSMC_MSG_ResetVCN                          0x4E
+#define PPSMC_Message_Count                         0x4F
 
 //PPSMC Reset Types for driver msg argument
 #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET        0x1
index c9dee09395e3b841af7b08bb7c4cf9a2e40ec4a8..eefdaa0b5df65ef3a916f1c7b6dfc31e72438063 100644 (file)
        __SMU_DUMMY_MAP(MALLPowerController), \
        __SMU_DUMMY_MAP(MALLPowerState), \
        __SMU_DUMMY_MAP(ResetSDMA), \
+       __SMU_DUMMY_MAP(ResetVCN), \
        __SMU_DUMMY_MAP(GetStaticMetricsTable),
 
 #undef __SMU_DUMMY_MAP
index 8900c94bda0ec5395c20646bc385d63205484cff..34ffaa0cfeeb04110441ac99d47cb7cd50abf0a1 100644 (file)
@@ -176,6 +176,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
        MSG_MAP(RmaDueToBadPageThreshold,            PPSMC_MSG_RmaDueToBadPageThreshold,        0),
        MSG_MAP(SetThrottlingPolicy,                 PPSMC_MSG_SetThrottlingPolicy,             0),
        MSG_MAP(ResetSDMA,                           PPSMC_MSG_ResetSDMA,                       0),
+       MSG_MAP(ResetVCN,                            PPSMC_MSG_ResetVCN,                       0),
 };
 
 // clang-format on
@@ -2941,6 +2942,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
        return ret;
 }
 
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+       int ret = 0;
+
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+       if (ret)
+               dev_err(smu->adev->dev,
+                       "failed to send ResetVCN event with mask 0x%x\n",
+                       inst_mask);
+       return ret;
+}
+
+
 static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
 {
        struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3615,6 +3629,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
        .send_rma_reason = smu_v13_0_6_send_rma_reason,
        .reset_sdma = smu_v13_0_6_reset_sdma,
        .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
+       .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
 };
 
 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)