hwmgr->dyn_state.vddc_dependency_on_mclk;
struct phm_cac_leakage_table *std_voltage_table =
hwmgr->dyn_state.cac_leakage_table;
- uint32_t i;
+ uint32_t i, clk;
PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
data->dpm_table.sclk_table.count = 0;
for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ clk = min(allowed_vdd_sclk_table->entries[i].clk, data->sclk_cap);
+
if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
- allowed_vdd_sclk_table->entries[i].clk) {
+ clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- allowed_vdd_sclk_table->entries[i].clk;
+ clk;
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.sclk_table.count++;
}
return 0;
}
+static void smu7_set_sclk_cap(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->sclk_cap = 0xffffffff;
+
+ if (hwmgr->od_enabled)
+ return;
+
+ /* R9 390X board: last sclk dpm level is unstable, use lower sclk */
+ if (adev->pdev->device == 0x67B0 &&
+ adev->pdev->subsystem_vendor == 0x1043)
+ data->sclk_cap = 104000; /* 1040 MHz */
+
+ if (data->sclk_cap != 0xffffffff)
+ dev_info(adev->dev, "sclk cap: %u kHz on quirky ASIC\n", data->sclk_cap * 10);
+}
+
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return -ENOMEM;
hwmgr->backend = data;
+ smu7_set_sclk_cap(hwmgr);
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = memory_clock;
- performance_level->engine_clock = engine_clock;
+ performance_level->engine_clock = min(engine_clock, data->sclk_cap);
pcie_gen_from_bios = visland_clk_info->ucPCIEGen;