static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
- int ret = 0, index;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
int timeout = 100;
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_DriverMode2Reset);
-
- mutex_lock(&smu->message_lock);
+ mutex_lock(&ctl->lock);
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
- SMU_RESET_MODE_2);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_DriverMode2Reset,
+ SMU_RESET_MODE_2);
+ if (ret)
+ goto out;
- ret = smu_cmn_wait_for_response(smu);
+ ret = smu_msg_wait_response(ctl, 0);
while (ret != 0 && timeout) {
- ret = smu_cmn_wait_for_response(smu);
+ ret = smu_msg_wait_response(ctl, 0);
/* Wait a bit more time for getting ACK */
if (ret != 0) {
--timeout;
goto out;
}
- dev_info(smu->adev->dev, "restore config space...\n");
+ dev_info(adev->dev, "restore config space...\n");
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
out:
- mutex_unlock(&smu->message_lock);
+ mutex_unlock(&ctl->lock);
return ret;
}
static int vangogh_mode_reset(struct smu_context *smu, int type)
{
- int ret = 0, index = 0;
-
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index == -EACCES ? 0 : index;
-
- mutex_lock(&smu->message_lock);
-
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
+ int ret;
- mutex_unlock(&smu->message_lock);
+ mutex_lock(&ctl->lock);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_GfxDeviceDriverReset, type);
+ mutex_unlock(&ctl->lock);
mdelay(10);
static int aldebaran_mode2_reset(struct smu_context *smu)
{
- int ret = 0, index;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
int timeout = 10;
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0 )
- return -EINVAL;
- mutex_lock(&smu->message_lock);
+ mutex_lock(&ctl->lock);
+
if (smu->smc_fw_version >= 0x00441400) {
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2);
+ if (ret)
+ goto out;
+
/* This is similar to FLR, wait till max FLR timeout */
msleep(100);
- dev_dbg(smu->adev->dev, "restore config space...\n");
+ dev_dbg(adev->dev, "restore config space...\n");
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
- dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ dev_dbg(adev->dev, "wait for reset ack\n");
while (ret == -ETIME && timeout) {
- ret = smu_cmn_wait_for_response(smu);
+ ret = smu_msg_wait_response(ctl, 0);
/* Wait a bit more time for getting ACK */
if (ret == -ETIME) {
--timeout;
if (ret == 1)
ret = 0;
out:
- mutex_unlock(&smu->message_lock);
+ mutex_unlock(&ctl->lock);
return ret;
}
int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
{
- uint16_t index;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
}
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_EnableGfxImu);
- return smu_cmn_send_msg_without_waiting(smu, index,
- ENABLE_IMU_ARG_GFXOFF_ENABLE);
+ mutex_lock(&ctl->lock);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE);
+ mutex_unlock(&ctl->lock);
+
+ return ret;
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
- int ret = 0, index;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
int timeout = 10;
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index;
-
- mutex_lock(&smu->message_lock);
+ mutex_lock(&ctl->lock);
- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
- SMU_RESET_MODE_2);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2);
+ if (ret)
+ goto out;
/* Reset takes a bit longer, wait for 200ms. */
msleep(200);
- dev_dbg(smu->adev->dev, "restore config space...\n");
+ dev_dbg(adev->dev, "restore config space...\n");
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
if (!(adev->flags & AMD_IS_APU))
smu_v13_0_6_restore_pci_config(smu);
- dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ dev_dbg(adev->dev, "wait for reset ack\n");
do {
- ret = smu_cmn_wait_for_response(smu);
+ ret = smu_msg_wait_response(ctl, 0);
/* Wait a bit more time for getting ACK */
if (ret == -ETIME) {
--timeout;
} while (ret == -ETIME && timeout);
out:
- mutex_unlock(&smu->message_lock);
+ mutex_unlock(&ctl->lock);
if (ret)
dev_err(adev->dev, "failed to send mode2 reset, error code %d",
int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
{
- uint16_t index;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
}
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_EnableGfxImu);
- return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
+ mutex_lock(&ctl->lock);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE);
+ mutex_unlock(&ctl->lock);
+
+ return ret;
}
int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
int smu_v15_0_set_gfx_power_up_by_imu(struct smu_context *smu)
{
- uint16_t index;
+ struct smu_msg_ctl *ctl = &smu->msg_ctl;
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
}
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_EnableGfxImu);
- return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
+ mutex_lock(&ctl->lock);
+ ret = smu_msg_send_async_locked(ctl, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE);
+ mutex_unlock(&ctl->lock);
+
+ return ret;
}
int smu_v15_0_set_default_dpm_tables(struct smu_context *smu)