if (tmp)
return -EINVAL;
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_display_clock);
+ if (tmp)
+ return -EINVAL;
+
tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
if (tmp)
return -EINVAL;
{
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ kfree(hwmgr->dyn_state.vddc_dependency_on_display_clock);
+ hwmgr->dyn_state.vddc_dependency_on_display_clock = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
return ret;
}
+static int smu7_init_voltage_dependency_on_display_clock_table(struct pp_hwmgr *hwmgr)
+{
+ struct phm_clock_voltage_dependency_table *table;
+
+ if (!amdgpu_device_ip_get_ip_block(hwmgr->adev, AMD_IP_BLOCK_TYPE_DCE))
+ return 0;
+
+ table = kzalloc(struct_size(table, entries, 4), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ if (hwmgr->chip_id >= CHIP_POLARIS10) {
+ table->entries[0].clk = 38918;
+ table->entries[1].clk = 45900;
+ table->entries[2].clk = 66700;
+ table->entries[3].clk = 113200;
+
+ table->entries[0].v = 700;
+ table->entries[1].v = 740;
+ table->entries[2].v = 800;
+ table->entries[3].v = 900;
+ } else {
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CZ) {
+ table->entries[0].clk = 35200;
+ table->entries[1].clk = 35200;
+ table->entries[2].clk = 46700;
+ table->entries[3].clk = 64300;
+ } else {
+ table->entries[0].clk = 0;
+ table->entries[1].clk = 35200;
+ table->entries[2].clk = 54000;
+ table->entries[3].clk = 62500;
+ }
+
+ table->entries[0].v = 0;
+ table->entries[1].v = 720;
+ table->entries[2].v = 810;
+ table->entries[3].v = 900;
+ }
+
+ table->count = 4;
+ hwmgr->dyn_state.vddc_dependency_on_display_clock = table;
+ return 0;
+}
+
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
smu7_get_elb_voltages(hwmgr);
}
+ result = smu7_init_voltage_dependency_on_display_clock_table(hwmgr);
+ if (result)
+ goto fail;
+
if (hwmgr->pp_table_version == PP_TABLE_V1) {
smu7_complete_dependency_tables(hwmgr);
smu7_set_private_data_based_on_pptable_v1(hwmgr);
return 0;
}
+static uint32_t smu7_lookup_vddc_from_dispclk(struct pp_hwmgr *hwmgr)
+{
+ const struct amd_pp_display_configuration *cfg = hwmgr->display_config;
+ const struct phm_clock_voltage_dependency_table *vddc_dep_on_dispclk =
+ hwmgr->dyn_state.vddc_dependency_on_display_clock;
+ uint32_t i;
+
+ if (!vddc_dep_on_dispclk || !vddc_dep_on_dispclk->count ||
+ !cfg || !cfg->num_display || !cfg->display_clk)
+ return 0;
+
+ /* Start from 1 because ClocksStateUltraLow should not be used according to DC. */
+ for (i = 1; i < vddc_dep_on_dispclk->count; ++i)
+ if (vddc_dep_on_dispclk->entries[i].clk >= cfg->display_clk)
+ return vddc_dep_on_dispclk->entries[i].v;
+
+ return vddc_dep_on_dispclk->entries[vddc_dep_on_dispclk->count - 1].v;
+}
+
+static void smu7_apply_minimum_dce_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ uint32_t req_vddc = smu7_lookup_vddc_from_dispclk(hwmgr);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VddC_Request,
+ req_vddc * VOLTAGE_SCALE,
+ NULL);
+}
+
static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->pp_table_version == PP_TABLE_V1)
- phm_apply_dal_min_voltage_request(hwmgr);
-/* TO DO for v0 iceland and Ci*/
+ smu7_apply_minimum_dce_voltage_request(hwmgr);
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)