smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
+static void smu_v14_0_0_init_msg_ctl(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+
+ ctl->smu = smu;
+ mutex_init(&ctl->lock);
+ ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ ctl->config.num_arg_regs = 1;
+ ctl->ops = &smu_msg_v1_ops;
+ ctl->default_timeout = adev->usec_timeout * 20;
+ ctl->message_map = smu_v14_0_0_message_map;
+}
+
void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu)
{
smu->is_apu = true;
smu_v14_0_0_set_smu_mailbox_registers(smu);
+ smu_v14_0_0_init_msg_ctl(smu);
}
smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
}
+static void smu_v14_0_2_init_msg_ctl(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+
+ ctl->smu = smu;
+ mutex_init(&ctl->lock);
+ ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
+ ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
+ ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
+ ctl->config.num_arg_regs = 1;
+ ctl->ops = &smu_msg_v1_ops;
+ ctl->default_timeout = adev->usec_timeout * 20;
+ ctl->message_map = smu_v14_0_2_message_map;
+}
+
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
void **table)
{
smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
smu->workload_map = smu_v14_0_2_workload_map;
smu_v14_0_2_set_smu_mailbox_registers(smu);
+ smu_v14_0_2_init_msg_ctl(smu);
}