Remove usage of legacy message related fields from SMUv13 SOCs.
Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Asad Kamal <asad.kamal@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
-void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
void smu_v13_0_init_msg_ctl(struct smu_context *smu,
const struct cmn2asic_msg_mapping *message_map);
void aldebaran_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &aldebaran_ppt_funcs;
- smu->message_map = aldebaran_message_map;
smu->clock_map = aldebaran_clk_map;
smu->feature_map = aldebaran_feature_mask_map;
smu->table_map = aldebaran_table_map;
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
- smu_v13_0_set_smu_mailbox_registers(smu);
smu_v13_0_init_msg_ctl(smu, aldebaran_message_map);
}
smu_table->clocks_table, false);
}
-void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
-
- smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
- smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
- smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
void smu_v13_0_init_msg_ctl(struct smu_context *smu,
const struct cmn2asic_msg_mapping *message_map)
{
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
-#define mmMP1_SMN_C2PMSG_66 0x0282
-#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_82 0x0292
-#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_90 0x029a
-#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
-
#define mmMP1_SMN_C2PMSG_75 0x028b
#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
{
struct amdgpu_device *adev = smu->adev;
- smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
- smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
- smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53);
smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75);
smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54);
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &smu_v13_0_0_ppt_funcs;
- smu->message_map = smu_v13_0_0_message_map;
smu->clock_map = smu_v13_0_0_clk_map;
smu->feature_map = smu_v13_0_0_feature_mask_map;
smu->table_map = smu_v13_0_0_table_map;
.set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu,
};
-static void smu_v13_0_4_set_smu_mailbox_registers(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
-
- smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
- smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
- smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
static void smu_v13_0_4_init_msg_ctl(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_device *adev = smu->adev;
smu->ppt_funcs = &smu_v13_0_4_ppt_funcs;
- smu->message_map = smu_v13_0_4_message_map;
smu->feature_map = smu_v13_0_4_feature_mask_map;
smu->table_map = smu_v13_0_4_table_map;
smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION;
smu->is_apu = true;
- if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 4)) {
- smu_v13_0_4_set_smu_mailbox_registers(smu);
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 4))
smu_v13_0_4_init_msg_ctl(smu);
- } else {
- smu_v13_0_set_smu_mailbox_registers(smu);
+ else
smu_v13_0_init_msg_ctl(smu, smu_v13_0_4_message_map);
- }
}
void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
-
smu->ppt_funcs = &smu_v13_0_5_ppt_funcs;
- smu->message_map = smu_v13_0_5_message_map;
smu->feature_map = smu_v13_0_5_feature_mask_map;
smu->table_map = smu_v13_0_5_table_map;
smu->is_apu = true;
smu->smc_driver_if_version = SMU13_0_5_DRIVER_IF_VERSION;
- smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_34);
- smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_2);
- smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_33);
smu_v13_0_5_init_msg_ctl(smu);
}
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
{
+ const struct cmn2asic_msg_mapping *message_map;
+
smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
- smu->message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
+ message_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_message_map : smu_v13_0_6_message_map;
smu->clock_map = smu_v13_0_6_clk_map;
smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
- smu_v13_0_set_smu_mailbox_registers(smu);
- smu_v13_0_init_msg_ctl(smu, smu->message_map);
+ smu_v13_0_init_msg_ctl(smu, message_map);
smu_v13_0_6_set_temp_funcs(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &smu_v13_0_7_ppt_funcs;
- smu->message_map = smu_v13_0_7_message_map;
smu->clock_map = smu_v13_0_7_clk_map;
smu->feature_map = smu_v13_0_7_feature_mask_map;
smu->table_map = smu_v13_0_7_table_map;
smu->pwr_src_map = smu_v13_0_7_pwr_src_map;
smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
- smu_v13_0_set_smu_mailbox_registers(smu);
smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map);
}
void yellow_carp_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &yellow_carp_ppt_funcs;
- smu->message_map = yellow_carp_message_map;
smu->feature_map = yellow_carp_feature_mask_map;
smu->table_map = yellow_carp_table_map;
smu->is_apu = true;
smu->smc_driver_if_version = SMU13_YELLOW_CARP_DRIVER_IF_VERSION;
- smu_v13_0_set_smu_mailbox_registers(smu);
smu_v13_0_init_msg_ctl(smu, yellow_carp_message_map);
}