Implement sdma soft reset by sending MSG_ResetSDMA on smu 13.0.6.
v2: Add firmware version for the reset message.
v3: Add ip version check. Print inst_mask on failure.
Signed-off-by: Jiadong Zhu <Jiadong.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
return ret;
}
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
int policy_level);
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
return ret;
}
+
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
+ ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
+
+ return ret;
+}
*/
int (*send_rma_reason)(struct smu_context *smu);
+ /**
+ * @reset_sdma: message SMU to soft reset sdma instance.
+ */
+ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
+
/**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
__SMU_DUMMY_MAP(SelectPstatePolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
- __SMU_DUMMY_MAP(MALLPowerState),
+ __SMU_DUMMY_MAP(MALLPowerState), \
+ __SMU_DUMMY_MAP(ResetSDMA),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
};
// clang-format on
return ret;
}
+static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ /* the message is only valid on SMU 13.0.6 with pmfw 85.121.00 and above */
+ if ((adev->flags & AMD_IS_APU) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6) ||
+ smu->smc_fw_version < 0x00557900)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetSDMA event with mask 0x%x\n",
+ inst_mask);
+
+ return ret;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
+ .reset_sdma = smu_v13_0_6_reset_sdma,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)