]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drm/amd/powerplay: correct the argument for PPSMC_MSG_SetUclkFastSwitch
[thirdparty/linux.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega20_hwmgr.c
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega20_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega20_powertune.h"
37 #include "vega20_inc.h"
38 #include "pppcielanes.h"
39 #include "vega20_hwmgr.h"
40 #include "vega20_processpptables.h"
41 #include "vega20_pptable.h"
42 #include "vega20_thermal.h"
43 #include "vega20_ppsmc.h"
44 #include "pp_debug.h"
45 #include "amd_pcie_helpers.h"
46 #include "ppinterrupt.h"
47 #include "pp_overdriver.h"
48 #include "pp_thermal.h"
49
50 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
51 {
52 struct vega20_hwmgr *data =
53 (struct vega20_hwmgr *)(hwmgr->backend);
54
55 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
56 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
57 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
58 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
59 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
60
61 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
62 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
63 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
64 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
65 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
66 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
67 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
68 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
69 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
70 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
71 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
72 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
73 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
74
75 data->registry_data.disallowed_features = 0x0;
76 data->registry_data.od_state_in_dc_support = 0;
77 data->registry_data.thermal_support = 1;
78 data->registry_data.skip_baco_hardware = 0;
79
80 data->registry_data.log_avfs_param = 0;
81 data->registry_data.sclk_throttle_low_notification = 1;
82 data->registry_data.force_dpm_high = 0;
83 data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
84
85 data->registry_data.didt_support = 0;
86 if (data->registry_data.didt_support) {
87 data->registry_data.didt_mode = 6;
88 data->registry_data.sq_ramping_support = 1;
89 data->registry_data.db_ramping_support = 0;
90 data->registry_data.td_ramping_support = 0;
91 data->registry_data.tcp_ramping_support = 0;
92 data->registry_data.dbr_ramping_support = 0;
93 data->registry_data.edc_didt_support = 1;
94 data->registry_data.gc_didt_support = 0;
95 data->registry_data.psm_didt_support = 0;
96 }
97
98 data->registry_data.pcie_lane_override = 0xff;
99 data->registry_data.pcie_speed_override = 0xff;
100 data->registry_data.pcie_clock_override = 0xffffffff;
101 data->registry_data.regulator_hot_gpio_support = 1;
102 data->registry_data.ac_dc_switch_gpio_support = 0;
103 data->registry_data.quick_transition_support = 0;
104 data->registry_data.zrpm_start_temp = 0xffff;
105 data->registry_data.zrpm_stop_temp = 0xffff;
106 data->registry_data.od8_feature_enable = 1;
107 data->registry_data.disable_water_mark = 0;
108 data->registry_data.disable_pp_tuning = 0;
109 data->registry_data.disable_xlpp_tuning = 0;
110 data->registry_data.disable_workload_policy = 0;
111 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
112 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
113 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
114 data->registry_data.force_workload_policy_mask = 0;
115 data->registry_data.disable_3d_fs_detection = 0;
116 data->registry_data.fps_support = 1;
117 data->registry_data.disable_auto_wattman = 1;
118 data->registry_data.auto_wattman_debug = 0;
119 data->registry_data.auto_wattman_sample_period = 100;
120 data->registry_data.auto_wattman_threshold = 50;
121 data->registry_data.gfxoff_controlled_by_driver = 1;
122 data->gfxoff_allowed = false;
123 data->counter_gfxoff = 0;
124 }
125
126 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
127 {
128 struct vega20_hwmgr *data =
129 (struct vega20_hwmgr *)(hwmgr->backend);
130 struct amdgpu_device *adev = hwmgr->adev;
131
132 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
133 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
134 PHM_PlatformCaps_ControlVDDCI);
135
136 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
137 PHM_PlatformCaps_TablelessHardwareInterface);
138
139 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
140 PHM_PlatformCaps_EnableSMU7ThermalManagement);
141
142 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
143 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
144 PHM_PlatformCaps_UVDPowerGating);
145
146 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
147 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
148 PHM_PlatformCaps_VCEPowerGating);
149
150 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
151 PHM_PlatformCaps_UnTabledHardwareInterface);
152
153 if (data->registry_data.od8_feature_enable)
154 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
155 PHM_PlatformCaps_OD8inACSupport);
156
157 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
158 PHM_PlatformCaps_ActivityReporting);
159 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
160 PHM_PlatformCaps_FanSpeedInTableIsRPM);
161
162 if (data->registry_data.od_state_in_dc_support) {
163 if (data->registry_data.od8_feature_enable)
164 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
165 PHM_PlatformCaps_OD8inDCSupport);
166 }
167
168 if (data->registry_data.thermal_support &&
169 data->registry_data.fuzzy_fan_control_support &&
170 hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
171 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
172 PHM_PlatformCaps_ODFuzzyFanControlSupport);
173
174 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
175 PHM_PlatformCaps_DynamicPowerManagement);
176 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
177 PHM_PlatformCaps_SMC);
178 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
179 PHM_PlatformCaps_ThermalPolicyDelay);
180
181 if (data->registry_data.force_dpm_high)
182 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
183 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
184
185 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
186 PHM_PlatformCaps_DynamicUVDState);
187
188 if (data->registry_data.sclk_throttle_low_notification)
189 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190 PHM_PlatformCaps_SclkThrottleLowNotification);
191
192 /* power tune caps */
193 /* assume disabled */
194 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
195 PHM_PlatformCaps_PowerContainment);
196 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_DiDtSupport);
198 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
199 PHM_PlatformCaps_SQRamping);
200 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
201 PHM_PlatformCaps_DBRamping);
202 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_TDRamping);
204 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
205 PHM_PlatformCaps_TCPRamping);
206 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
207 PHM_PlatformCaps_DBRRamping);
208 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_DiDtEDCEnable);
210 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
211 PHM_PlatformCaps_GCEDC);
212 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
213 PHM_PlatformCaps_PSM);
214
215 if (data->registry_data.didt_support) {
216 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217 PHM_PlatformCaps_DiDtSupport);
218 if (data->registry_data.sq_ramping_support)
219 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_SQRamping);
221 if (data->registry_data.db_ramping_support)
222 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
223 PHM_PlatformCaps_DBRamping);
224 if (data->registry_data.td_ramping_support)
225 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_TDRamping);
227 if (data->registry_data.tcp_ramping_support)
228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_TCPRamping);
230 if (data->registry_data.dbr_ramping_support)
231 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_DBRRamping);
233 if (data->registry_data.edc_didt_support)
234 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
235 PHM_PlatformCaps_DiDtEDCEnable);
236 if (data->registry_data.gc_didt_support)
237 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_GCEDC);
239 if (data->registry_data.psm_didt_support)
240 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_PSM);
242 }
243
244 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_RegulatorHot);
246
247 if (data->registry_data.ac_dc_switch_gpio_support) {
248 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_AutomaticDCTransition);
250 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
252 }
253
254 if (data->registry_data.quick_transition_support) {
255 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
256 PHM_PlatformCaps_AutomaticDCTransition);
257 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
258 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
259 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
260 PHM_PlatformCaps_Falcon_QuickTransition);
261 }
262
263 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
264 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_LowestUclkReservedForUlv);
266 if (data->lowest_uclk_reserved_for_ulv == 1)
267 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
268 PHM_PlatformCaps_LowestUclkReservedForUlv);
269 }
270
271 if (data->registry_data.custom_fan_support)
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
273 PHM_PlatformCaps_CustomFanControlSupport);
274
275 return 0;
276 }
277
278 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
279 {
280 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
281 int i;
282
283 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
284 FEATURE_DPM_PREFETCHER_BIT;
285 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
286 FEATURE_DPM_GFXCLK_BIT;
287 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
288 FEATURE_DPM_UCLK_BIT;
289 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
290 FEATURE_DPM_SOCCLK_BIT;
291 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
292 FEATURE_DPM_UVD_BIT;
293 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
294 FEATURE_DPM_VCE_BIT;
295 data->smu_features[GNLD_ULV].smu_feature_id =
296 FEATURE_ULV_BIT;
297 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
298 FEATURE_DPM_MP0CLK_BIT;
299 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
300 FEATURE_DPM_LINK_BIT;
301 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
302 FEATURE_DPM_DCEFCLK_BIT;
303 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
304 FEATURE_DS_GFXCLK_BIT;
305 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
306 FEATURE_DS_SOCCLK_BIT;
307 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
308 FEATURE_DS_LCLK_BIT;
309 data->smu_features[GNLD_PPT].smu_feature_id =
310 FEATURE_PPT_BIT;
311 data->smu_features[GNLD_TDC].smu_feature_id =
312 FEATURE_TDC_BIT;
313 data->smu_features[GNLD_THERMAL].smu_feature_id =
314 FEATURE_THERMAL_BIT;
315 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
316 FEATURE_GFX_PER_CU_CG_BIT;
317 data->smu_features[GNLD_RM].smu_feature_id =
318 FEATURE_RM_BIT;
319 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
320 FEATURE_DS_DCEFCLK_BIT;
321 data->smu_features[GNLD_ACDC].smu_feature_id =
322 FEATURE_ACDC_BIT;
323 data->smu_features[GNLD_VR0HOT].smu_feature_id =
324 FEATURE_VR0HOT_BIT;
325 data->smu_features[GNLD_VR1HOT].smu_feature_id =
326 FEATURE_VR1HOT_BIT;
327 data->smu_features[GNLD_FW_CTF].smu_feature_id =
328 FEATURE_FW_CTF_BIT;
329 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
330 FEATURE_LED_DISPLAY_BIT;
331 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
332 FEATURE_FAN_CONTROL_BIT;
333 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
334 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
335 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
336 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
337 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
338 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
339 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
340 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
341
342 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
343 data->smu_features[i].smu_feature_bitmap =
344 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
345 data->smu_features[i].allowed =
346 ((data->registry_data.disallowed_features >> i) & 1) ?
347 false : true;
348 }
349 }
350
351 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
352 {
353 return 0;
354 }
355
356 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
357 {
358 kfree(hwmgr->backend);
359 hwmgr->backend = NULL;
360
361 return 0;
362 }
363
364 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
365 {
366 struct vega20_hwmgr *data;
367 struct amdgpu_device *adev = hwmgr->adev;
368
369 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
370 if (data == NULL)
371 return -ENOMEM;
372
373 hwmgr->backend = data;
374
375 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
376 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
377 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
378
379 vega20_set_default_registry_data(hwmgr);
380
381 data->disable_dpm_mask = 0xff;
382
383 /* need to set voltage control types before EVV patching */
384 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
385 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
386 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
387
388 data->water_marks_bitmap = 0;
389 data->avfs_exist = false;
390
391 vega20_set_features_platform_caps(hwmgr);
392
393 vega20_init_dpm_defaults(hwmgr);
394
395 /* Parse pptable data read from VBIOS */
396 vega20_set_private_data_based_on_pptable(hwmgr);
397
398 data->is_tlu_enabled = false;
399
400 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
401 VEGA20_MAX_HARDWARE_POWERLEVELS;
402 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
403 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
404
405 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
406 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
407 hwmgr->platform_descriptor.clockStep.engineClock = 500;
408 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
409
410 data->total_active_cus = adev->gfx.cu_info.number;
411
412 return 0;
413 }
414
415 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
416 {
417 struct vega20_hwmgr *data =
418 (struct vega20_hwmgr *)(hwmgr->backend);
419
420 data->low_sclk_interrupt_threshold = 0;
421
422 return 0;
423 }
424
425 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
426 {
427 int ret = 0;
428
429 ret = vega20_init_sclk_threshold(hwmgr);
430 PP_ASSERT_WITH_CODE(!ret,
431 "Failed to init sclk threshold!",
432 return ret);
433
434 return 0;
435 }
436
437 /*
438 * @fn vega20_init_dpm_state
439 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
440 *
441 * @param dpm_state - the address of the DPM Table to initiailize.
442 * @return None.
443 */
444 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
445 {
446 dpm_state->soft_min_level = 0x0;
447 dpm_state->soft_max_level = 0xffff;
448 dpm_state->hard_min_level = 0x0;
449 dpm_state->hard_max_level = 0xffff;
450 }
451
452 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
453 PPCLK_e clk_id, uint32_t *num_of_levels)
454 {
455 int ret = 0;
456
457 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
458 PPSMC_MSG_GetDpmFreqByIndex,
459 (clk_id << 16 | 0xFF));
460 PP_ASSERT_WITH_CODE(!ret,
461 "[GetNumOfDpmLevel] failed to get dpm levels!",
462 return ret);
463
464 vega20_read_arg_from_smc(hwmgr, num_of_levels);
465 PP_ASSERT_WITH_CODE(*num_of_levels > 0,
466 "[GetNumOfDpmLevel] number of clk levels is invalid!",
467 return -EINVAL);
468
469 return ret;
470 }
471
472 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
473 PPCLK_e clk_id, uint32_t index, uint32_t *clk)
474 {
475 int ret = 0;
476
477 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
478 PPSMC_MSG_GetDpmFreqByIndex,
479 (clk_id << 16 | index));
480 PP_ASSERT_WITH_CODE(!ret,
481 "[GetDpmFreqByIndex] failed to get dpm freq by index!",
482 return ret);
483
484 vega20_read_arg_from_smc(hwmgr, clk);
485 PP_ASSERT_WITH_CODE(*clk,
486 "[GetDpmFreqByIndex] clk value is invalid!",
487 return -EINVAL);
488
489 return ret;
490 }
491
492 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
493 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
494 {
495 int ret = 0;
496 uint32_t i, num_of_levels, clk;
497
498 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
499 PP_ASSERT_WITH_CODE(!ret,
500 "[SetupSingleDpmTable] failed to get clk levels!",
501 return ret);
502
503 dpm_table->count = num_of_levels;
504
505 for (i = 0; i < num_of_levels; i++) {
506 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
507 PP_ASSERT_WITH_CODE(!ret,
508 "[SetupSingleDpmTable] failed to get clk of specific level!",
509 return ret);
510 dpm_table->dpm_levels[i].value = clk;
511 dpm_table->dpm_levels[i].enabled = true;
512 }
513
514 return ret;
515 }
516
517
518 /*
519 * This function is to initialize all DPM state tables
520 * for SMU based on the dependency table.
521 * Dynamic state patching function will then trim these
522 * state tables to the allowed range based
523 * on the power policy or external client requests,
524 * such as UVD request, etc.
525 */
526 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
527 {
528 struct vega20_hwmgr *data =
529 (struct vega20_hwmgr *)(hwmgr->backend);
530 struct vega20_single_dpm_table *dpm_table;
531 int ret = 0;
532
533 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
534
535 /* socclk */
536 dpm_table = &(data->dpm_table.soc_table);
537 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
538 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
539 PP_ASSERT_WITH_CODE(!ret,
540 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
541 return ret);
542 } else {
543 dpm_table->count = 1;
544 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
545 }
546 vega20_init_dpm_state(&(dpm_table->dpm_state));
547
548 /* gfxclk */
549 dpm_table = &(data->dpm_table.gfx_table);
550 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
551 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
552 PP_ASSERT_WITH_CODE(!ret,
553 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
554 return ret);
555 } else {
556 dpm_table->count = 1;
557 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
558 }
559 vega20_init_dpm_state(&(dpm_table->dpm_state));
560
561 /* memclk */
562 dpm_table = &(data->dpm_table.mem_table);
563 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
564 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
565 PP_ASSERT_WITH_CODE(!ret,
566 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
567 return ret);
568 } else {
569 dpm_table->count = 1;
570 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
571 }
572 vega20_init_dpm_state(&(dpm_table->dpm_state));
573
574 /* eclk */
575 dpm_table = &(data->dpm_table.eclk_table);
576 if (data->smu_features[GNLD_DPM_VCE].enabled) {
577 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
578 PP_ASSERT_WITH_CODE(!ret,
579 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
580 return ret);
581 } else {
582 dpm_table->count = 1;
583 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
584 }
585 vega20_init_dpm_state(&(dpm_table->dpm_state));
586
587 /* vclk */
588 dpm_table = &(data->dpm_table.vclk_table);
589 if (data->smu_features[GNLD_DPM_UVD].enabled) {
590 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
591 PP_ASSERT_WITH_CODE(!ret,
592 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
593 return ret);
594 } else {
595 dpm_table->count = 1;
596 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
597 }
598 vega20_init_dpm_state(&(dpm_table->dpm_state));
599
600 /* dclk */
601 dpm_table = &(data->dpm_table.dclk_table);
602 if (data->smu_features[GNLD_DPM_UVD].enabled) {
603 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
604 PP_ASSERT_WITH_CODE(!ret,
605 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
606 return ret);
607 } else {
608 dpm_table->count = 1;
609 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
610 }
611 vega20_init_dpm_state(&(dpm_table->dpm_state));
612
613 /* dcefclk */
614 dpm_table = &(data->dpm_table.dcef_table);
615 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
616 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
617 PP_ASSERT_WITH_CODE(!ret,
618 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
619 return ret);
620 } else {
621 dpm_table->count = 1;
622 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
623 }
624 vega20_init_dpm_state(&(dpm_table->dpm_state));
625
626 /* pixclk */
627 dpm_table = &(data->dpm_table.pixel_table);
628 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
629 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
630 PP_ASSERT_WITH_CODE(!ret,
631 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
632 return ret);
633 } else
634 dpm_table->count = 0;
635 vega20_init_dpm_state(&(dpm_table->dpm_state));
636
637 /* dispclk */
638 dpm_table = &(data->dpm_table.display_table);
639 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
640 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
641 PP_ASSERT_WITH_CODE(!ret,
642 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
643 return ret);
644 } else
645 dpm_table->count = 0;
646 vega20_init_dpm_state(&(dpm_table->dpm_state));
647
648 /* phyclk */
649 dpm_table = &(data->dpm_table.phy_table);
650 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
651 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
652 PP_ASSERT_WITH_CODE(!ret,
653 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
654 return ret);
655 } else
656 dpm_table->count = 0;
657 vega20_init_dpm_state(&(dpm_table->dpm_state));
658
659 /* fclk */
660 dpm_table = &(data->dpm_table.fclk_table);
661 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
662 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
663 PP_ASSERT_WITH_CODE(!ret,
664 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
665 return ret);
666 } else
667 dpm_table->count = 0;
668 vega20_init_dpm_state(&(dpm_table->dpm_state));
669
670 /* save a copy of the default DPM table */
671 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
672 sizeof(struct vega20_dpm_table));
673
674 return 0;
675 }
676
677 /**
678 * Initializes the SMC table and uploads it
679 *
680 * @param hwmgr the address of the powerplay hardware manager.
681 * @param pInput the pointer to input data (PowerState)
682 * @return always 0
683 */
684 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
685 {
686 int result;
687 struct vega20_hwmgr *data =
688 (struct vega20_hwmgr *)(hwmgr->backend);
689 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
690 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
691 struct phm_ppt_v3_information *pptable_information =
692 (struct phm_ppt_v3_information *)hwmgr->pptable;
693
694 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
695 PP_ASSERT_WITH_CODE(!result,
696 "[InitSMCTable] Failed to get vbios bootup values!",
697 return result);
698
699 data->vbios_boot_state.vddc = boot_up_values.usVddc;
700 data->vbios_boot_state.vddci = boot_up_values.usVddci;
701 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
702 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
703 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
704 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
705 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
706 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
707 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
708 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
709 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
710
711 smum_send_msg_to_smc_with_parameter(hwmgr,
712 PPSMC_MSG_SetMinDeepSleepDcefclk,
713 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
714
715 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
716
717 result = vega20_copy_table_to_smc(hwmgr,
718 (uint8_t *)pp_table, TABLE_PPTABLE);
719 PP_ASSERT_WITH_CODE(!result,
720 "[InitSMCTable] Failed to upload PPtable!",
721 return result);
722
723 return 0;
724 }
725
726 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
727 {
728 struct vega20_hwmgr *data =
729 (struct vega20_hwmgr *)(hwmgr->backend);
730 uint32_t allowed_features_low = 0, allowed_features_high = 0;
731 int i;
732 int ret = 0;
733
734 for (i = 0; i < GNLD_FEATURES_MAX; i++)
735 if (data->smu_features[i].allowed)
736 data->smu_features[i].smu_feature_id > 31 ?
737 (allowed_features_high |=
738 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
739 & 0xFFFFFFFF)) :
740 (allowed_features_low |=
741 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
742 & 0xFFFFFFFF));
743
744 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
745 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
746 PP_ASSERT_WITH_CODE(!ret,
747 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
748 return ret);
749
750 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
751 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
752 PP_ASSERT_WITH_CODE(!ret,
753 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
754 return ret);
755
756 return 0;
757 }
758
759 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
760 {
761 struct vega20_hwmgr *data =
762 (struct vega20_hwmgr *)(hwmgr->backend);
763 uint64_t features_enabled;
764 int i;
765 bool enabled;
766 int ret = 0;
767
768 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
769 PPSMC_MSG_EnableAllSmuFeatures)) == 0,
770 "[EnableAllSMUFeatures] Failed to enable all smu features!",
771 return ret);
772
773 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
774 PP_ASSERT_WITH_CODE(!ret,
775 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
776 return ret);
777
778 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
779 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
780 true : false;
781 data->smu_features[i].enabled = enabled;
782 data->smu_features[i].supported = enabled;
783
784 #if 0
785 if (data->smu_features[i].allowed && !enabled)
786 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
787 else if (!data->smu_features[i].allowed && enabled)
788 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
789 #endif
790 }
791
792 return 0;
793 }
794
795 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
796 {
797 struct vega20_hwmgr *data =
798 (struct vega20_hwmgr *)(hwmgr->backend);
799 uint64_t features_enabled;
800 int i;
801 bool enabled;
802 int ret = 0;
803
804 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
805 PPSMC_MSG_DisableAllSmuFeatures)) == 0,
806 "[DisableAllSMUFeatures] Failed to disable all smu features!",
807 return ret);
808
809 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
810 PP_ASSERT_WITH_CODE(!ret,
811 "[DisableAllSMUFeatures] Failed to get enabled smc features!",
812 return ret);
813
814 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
815 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
816 true : false;
817 data->smu_features[i].enabled = enabled;
818 data->smu_features[i].supported = enabled;
819 }
820
821 return 0;
822 }
823
824 static int vega20_od8_set_feature_capabilities(
825 struct pp_hwmgr *hwmgr)
826 {
827 struct phm_ppt_v3_information *pptable_information =
828 (struct phm_ppt_v3_information *)hwmgr->pptable;
829 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
830 struct vega20_od8_settings *od_settings = &(data->od8_settings);
831
832 od_settings->overdrive8_capabilities = 0;
833
834 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
835 if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 &&
836 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 &&
837 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0 &&
838 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0)
839 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
840
841 if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 &&
842 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 &&
843 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 &&
844 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 &&
845 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 &&
846 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 &&
847 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 &&
848 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 &&
849 pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0 &&
850 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 &&
851 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 &&
852 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0)
853 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
854 }
855
856 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
857 if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0 &&
858 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0)
859 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
860 }
861
862 if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] > 0 &&
863 pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] <= 100)
864 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
865
866 if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
867 if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMMIN] > 0)
868 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
869
870 if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT] > 0)
871 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
872 }
873
874 if (data->smu_features[GNLD_THERMAL].enabled) {
875 if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE] > 0)
876 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
877
878 if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX] > 0)
879 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
880 }
881
882 return 0;
883 }
884
885 static int vega20_od8_set_feature_id(
886 struct pp_hwmgr *hwmgr)
887 {
888 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
889 struct vega20_od8_settings *od_settings = &(data->od8_settings);
890
891 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
892 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
893 OD8_GFXCLK_LIMITS;
894 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
895 OD8_GFXCLK_LIMITS;
896 } else {
897 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
898 0;
899 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
900 0;
901 }
902
903 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
904 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
905 OD8_GFXCLK_CURVE;
906 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
907 OD8_GFXCLK_CURVE;
908 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
909 OD8_GFXCLK_CURVE;
910 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
911 OD8_GFXCLK_CURVE;
912 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
913 OD8_GFXCLK_CURVE;
914 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
915 OD8_GFXCLK_CURVE;
916 } else {
917 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
918 0;
919 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
920 0;
921 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
922 0;
923 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
924 0;
925 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
926 0;
927 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
928 0;
929 }
930
931 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
932 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
933 else
934 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
935
936 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
937 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
938 else
939 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
940
941 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
942 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
943 OD8_ACOUSTIC_LIMIT_SCLK;
944 else
945 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
946 0;
947
948 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
949 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
950 OD8_FAN_SPEED_MIN;
951 else
952 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
953 0;
954
955 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
956 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
957 OD8_TEMPERATURE_FAN;
958 else
959 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
960 0;
961
962 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
963 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
964 OD8_TEMPERATURE_SYSTEM;
965 else
966 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
967 0;
968
969 return 0;
970 }
971
972 static int vega20_od8_initialize_default_settings(
973 struct pp_hwmgr *hwmgr)
974 {
975 struct phm_ppt_v3_information *pptable_information =
976 (struct phm_ppt_v3_information *)hwmgr->pptable;
977 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
978 struct vega20_od8_settings *od8_settings = &(data->od8_settings);
979 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
980 int i, ret = 0;
981
982 /* Set Feature Capabilities */
983 vega20_od8_set_feature_capabilities(hwmgr);
984
985 /* Map FeatureID to individual settings */
986 vega20_od8_set_feature_id(hwmgr);
987
988 /* Set default values */
989 ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE);
990 PP_ASSERT_WITH_CODE(!ret,
991 "Failed to export over drive table!",
992 return ret);
993
994 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
995 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
996 od_table->GfxclkFmin;
997 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
998 od_table->GfxclkFmax;
999 } else {
1000 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1001 0;
1002 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1003 0;
1004 }
1005
1006 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1007 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1008 od_table->GfxclkFreq1;
1009 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1010 od_table->GfxclkOffsetVolt1;
1011 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1012 od_table->GfxclkFreq2;
1013 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1014 od_table->GfxclkOffsetVolt2;
1015 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1016 od_table->GfxclkFreq3;
1017 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1018 od_table->GfxclkOffsetVolt3;
1019 } else {
1020 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1021 0;
1022 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1023 0;
1024 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1025 0;
1026 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1027 0;
1028 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1029 0;
1030 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1031 0;
1032 }
1033
1034 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1035 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1036 od_table->UclkFmax;
1037 else
1038 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1039 0;
1040
1041 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1042 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1043 od_table->OverDrivePct;
1044 else
1045 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1046 0;
1047
1048 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1049 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1050 od_table->FanMaximumRpm;
1051 else
1052 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1053 0;
1054
1055 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1056 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1057 od_table->FanMinimumPwm;
1058 else
1059 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1060 0;
1061
1062 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1063 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1064 od_table->FanTargetTemperature;
1065 else
1066 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1067 0;
1068
1069 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1070 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1071 od_table->MaxOpTemp;
1072 else
1073 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1074 0;
1075
1076 for (i = 0; i < OD8_SETTING_COUNT; i++) {
1077 if (od8_settings->od8_settings_array[i].feature_id) {
1078 od8_settings->od8_settings_array[i].min_value =
1079 pptable_information->od_settings_min[i];
1080 od8_settings->od8_settings_array[i].max_value =
1081 pptable_information->od_settings_max[i];
1082 od8_settings->od8_settings_array[i].current_value =
1083 od8_settings->od8_settings_array[i].default_value;
1084 } else {
1085 od8_settings->od8_settings_array[i].min_value =
1086 0;
1087 od8_settings->od8_settings_array[i].max_value =
1088 0;
1089 od8_settings->od8_settings_array[i].current_value =
1090 0;
1091 }
1092 }
1093
1094 return 0;
1095 }
1096
1097 static int vega20_od8_set_settings(
1098 struct pp_hwmgr *hwmgr,
1099 uint32_t index,
1100 uint32_t value)
1101 {
1102 OverDriveTable_t od_table;
1103 int ret = 0;
1104
1105 ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
1106 PP_ASSERT_WITH_CODE(!ret,
1107 "Failed to export over drive table!",
1108 return ret);
1109
1110 switch(index) {
1111 case OD8_SETTING_GFXCLK_FMIN:
1112 od_table.GfxclkFmin = (uint16_t)value;
1113 break;
1114 case OD8_SETTING_GFXCLK_FMAX:
1115 od_table.GfxclkFmax = (uint16_t)value;
1116 break;
1117 case OD8_SETTING_GFXCLK_FREQ1:
1118 od_table.GfxclkFreq1 = (uint16_t)value;
1119 break;
1120 case OD8_SETTING_GFXCLK_VOLTAGE1:
1121 od_table.GfxclkOffsetVolt1 = (uint16_t)value;
1122 break;
1123 case OD8_SETTING_GFXCLK_FREQ2:
1124 od_table.GfxclkFreq2 = (uint16_t)value;
1125 break;
1126 case OD8_SETTING_GFXCLK_VOLTAGE2:
1127 od_table.GfxclkOffsetVolt2 = (uint16_t)value;
1128 break;
1129 case OD8_SETTING_GFXCLK_FREQ3:
1130 od_table.GfxclkFreq3 = (uint16_t)value;
1131 break;
1132 case OD8_SETTING_GFXCLK_VOLTAGE3:
1133 od_table.GfxclkOffsetVolt3 = (uint16_t)value;
1134 break;
1135 case OD8_SETTING_UCLK_FMAX:
1136 od_table.UclkFmax = (uint16_t)value;
1137 break;
1138 case OD8_SETTING_POWER_PERCENTAGE:
1139 od_table.OverDrivePct = (int16_t)value;
1140 break;
1141 case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
1142 od_table.FanMaximumRpm = (uint16_t)value;
1143 break;
1144 case OD8_SETTING_FAN_MIN_SPEED:
1145 od_table.FanMinimumPwm = (uint16_t)value;
1146 break;
1147 case OD8_SETTING_FAN_TARGET_TEMP:
1148 od_table.FanTargetTemperature = (uint16_t)value;
1149 break;
1150 case OD8_SETTING_OPERATING_TEMP_MAX:
1151 od_table.MaxOpTemp = (uint16_t)value;
1152 break;
1153 }
1154
1155 ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE);
1156 PP_ASSERT_WITH_CODE(!ret,
1157 "Failed to import over drive table!",
1158 return ret);
1159
1160 return 0;
1161 }
1162
1163 static int vega20_get_sclk_od(
1164 struct pp_hwmgr *hwmgr)
1165 {
1166 struct vega20_hwmgr *data = hwmgr->backend;
1167 struct vega20_single_dpm_table *sclk_table =
1168 &(data->dpm_table.gfx_table);
1169 struct vega20_single_dpm_table *golden_sclk_table =
1170 &(data->golden_dpm_table.gfx_table);
1171 int value;
1172
1173 /* od percentage */
1174 value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
1175 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
1176 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
1177
1178 return value;
1179 }
1180
1181 static int vega20_set_sclk_od(
1182 struct pp_hwmgr *hwmgr, uint32_t value)
1183 {
1184 struct vega20_hwmgr *data = hwmgr->backend;
1185 struct vega20_single_dpm_table *sclk_table =
1186 &(data->dpm_table.gfx_table);
1187 struct vega20_single_dpm_table *golden_sclk_table =
1188 &(data->golden_dpm_table.gfx_table);
1189 uint32_t od_sclk;
1190 int ret = 0;
1191
1192 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
1193 do_div(od_sclk, 100);
1194 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
1195
1196 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
1197 PP_ASSERT_WITH_CODE(!ret,
1198 "[SetSclkOD] failed to set od gfxclk!",
1199 return ret);
1200
1201 /* refresh gfxclk table */
1202 ret = vega20_setup_single_dpm_table(hwmgr, sclk_table, PPCLK_GFXCLK);
1203 PP_ASSERT_WITH_CODE(!ret,
1204 "[SetSclkOD] failed to refresh gfxclk table!",
1205 return ret);
1206
1207 return 0;
1208 }
1209
1210 static int vega20_get_mclk_od(
1211 struct pp_hwmgr *hwmgr)
1212 {
1213 struct vega20_hwmgr *data = hwmgr->backend;
1214 struct vega20_single_dpm_table *mclk_table =
1215 &(data->dpm_table.mem_table);
1216 struct vega20_single_dpm_table *golden_mclk_table =
1217 &(data->golden_dpm_table.mem_table);
1218 int value;
1219
1220 /* od percentage */
1221 value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
1222 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
1223 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
1224
1225 return value;
1226 }
1227
1228 static int vega20_set_mclk_od(
1229 struct pp_hwmgr *hwmgr, uint32_t value)
1230 {
1231 struct vega20_hwmgr *data = hwmgr->backend;
1232 struct vega20_single_dpm_table *mclk_table =
1233 &(data->dpm_table.mem_table);
1234 struct vega20_single_dpm_table *golden_mclk_table =
1235 &(data->golden_dpm_table.mem_table);
1236 uint32_t od_mclk;
1237 int ret = 0;
1238
1239 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
1240 do_div(od_mclk, 100);
1241 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
1242
1243 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
1244 PP_ASSERT_WITH_CODE(!ret,
1245 "[SetMclkOD] failed to set od memclk!",
1246 return ret);
1247
1248 /* refresh memclk table */
1249 ret = vega20_setup_single_dpm_table(hwmgr, mclk_table, PPCLK_UCLK);
1250 PP_ASSERT_WITH_CODE(!ret,
1251 "[SetMclkOD] failed to refresh memclk table!",
1252 return ret);
1253
1254 return 0;
1255 }
1256
1257 static int vega20_populate_umdpstate_clocks(
1258 struct pp_hwmgr *hwmgr)
1259 {
1260 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1261 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
1262 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
1263
1264 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
1265 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
1266
1267 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
1268 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
1269 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
1270 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
1271 }
1272
1273 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
1274 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
1275
1276 return 0;
1277 }
1278
1279 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
1280 PP_Clock *clock, PPCLK_e clock_select)
1281 {
1282 int ret = 0;
1283
1284 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1285 PPSMC_MSG_GetDcModeMaxDpmFreq,
1286 (clock_select << 16))) == 0,
1287 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1288 return ret);
1289 vega20_read_arg_from_smc(hwmgr, clock);
1290
1291 /* if DC limit is zero, return AC limit */
1292 if (*clock == 0) {
1293 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1294 PPSMC_MSG_GetMaxDpmFreq,
1295 (clock_select << 16))) == 0,
1296 "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1297 return ret);
1298 vega20_read_arg_from_smc(hwmgr, clock);
1299 }
1300
1301 return 0;
1302 }
1303
1304 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
1305 {
1306 struct vega20_hwmgr *data =
1307 (struct vega20_hwmgr *)(hwmgr->backend);
1308 struct vega20_max_sustainable_clocks *max_sustainable_clocks =
1309 &(data->max_sustainable_clocks);
1310 int ret = 0;
1311
1312 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
1313 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
1314 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
1315 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1316 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1317 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1318
1319 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1320 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1321 &(max_sustainable_clocks->uclock),
1322 PPCLK_UCLK)) == 0,
1323 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1324 return ret);
1325
1326 if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
1327 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1328 &(max_sustainable_clocks->soc_clock),
1329 PPCLK_SOCCLK)) == 0,
1330 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1331 return ret);
1332
1333 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1334 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1335 &(max_sustainable_clocks->dcef_clock),
1336 PPCLK_DCEFCLK)) == 0,
1337 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1338 return ret);
1339 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1340 &(max_sustainable_clocks->display_clock),
1341 PPCLK_DISPCLK)) == 0,
1342 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1343 return ret);
1344 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1345 &(max_sustainable_clocks->phy_clock),
1346 PPCLK_PHYCLK)) == 0,
1347 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1348 return ret);
1349 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1350 &(max_sustainable_clocks->pixel_clock),
1351 PPCLK_PIXCLK)) == 0,
1352 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1353 return ret);
1354 }
1355
1356 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1357 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1358
1359 if (max_sustainable_clocks->uclock < max_sustainable_clocks->dcef_clock)
1360 max_sustainable_clocks->dcef_clock = max_sustainable_clocks->uclock;
1361
1362 return 0;
1363 }
1364
1365 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1366 {
1367 struct vega20_hwmgr *data =
1368 (struct vega20_hwmgr *)(hwmgr->backend);
1369
1370 data->uvd_power_gated = true;
1371 data->vce_power_gated = true;
1372
1373 if (data->smu_features[GNLD_DPM_UVD].enabled)
1374 data->uvd_power_gated = false;
1375
1376 if (data->smu_features[GNLD_DPM_VCE].enabled)
1377 data->vce_power_gated = false;
1378 }
1379
1380 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1381 {
1382 int result = 0;
1383
1384 smum_send_msg_to_smc_with_parameter(hwmgr,
1385 PPSMC_MSG_NumOfDisplays, 0);
1386
1387 result = vega20_set_allowed_featuresmask(hwmgr);
1388 PP_ASSERT_WITH_CODE(!result,
1389 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1390 return result);
1391
1392 result = vega20_init_smc_table(hwmgr);
1393 PP_ASSERT_WITH_CODE(!result,
1394 "[EnableDPMTasks] Failed to initialize SMC table!",
1395 return result);
1396
1397 result = vega20_enable_all_smu_features(hwmgr);
1398 PP_ASSERT_WITH_CODE(!result,
1399 "[EnableDPMTasks] Failed to enable all smu features!",
1400 return result);
1401
1402 /* Initialize UVD/VCE powergating state */
1403 vega20_init_powergate_state(hwmgr);
1404
1405 result = vega20_setup_default_dpm_tables(hwmgr);
1406 PP_ASSERT_WITH_CODE(!result,
1407 "[EnableDPMTasks] Failed to setup default DPM tables!",
1408 return result);
1409
1410 result = vega20_init_max_sustainable_clocks(hwmgr);
1411 PP_ASSERT_WITH_CODE(!result,
1412 "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1413 return result);
1414
1415 result = vega20_power_control_set_level(hwmgr);
1416 PP_ASSERT_WITH_CODE(!result,
1417 "[EnableDPMTasks] Failed to power control set level!",
1418 return result);
1419
1420 result = vega20_od8_initialize_default_settings(hwmgr);
1421 PP_ASSERT_WITH_CODE(!result,
1422 "[EnableDPMTasks] Failed to initialize odn settings!",
1423 return result);
1424
1425 result = vega20_populate_umdpstate_clocks(hwmgr);
1426 PP_ASSERT_WITH_CODE(!result,
1427 "[EnableDPMTasks] Failed to populate umdpstate clocks!",
1428 return result);
1429
1430 return 0;
1431 }
1432
1433 static uint32_t vega20_find_lowest_dpm_level(
1434 struct vega20_single_dpm_table *table)
1435 {
1436 uint32_t i;
1437
1438 for (i = 0; i < table->count; i++) {
1439 if (table->dpm_levels[i].enabled)
1440 break;
1441 }
1442 if (i >= table->count) {
1443 i = 0;
1444 table->dpm_levels[i].enabled = true;
1445 }
1446
1447 return i;
1448 }
1449
1450 static uint32_t vega20_find_highest_dpm_level(
1451 struct vega20_single_dpm_table *table)
1452 {
1453 int i = 0;
1454
1455 PP_ASSERT_WITH_CODE(table != NULL,
1456 "[FindHighestDPMLevel] DPM Table does not exist!",
1457 return 0);
1458 PP_ASSERT_WITH_CODE(table->count > 0,
1459 "[FindHighestDPMLevel] DPM Table has no entry!",
1460 return 0);
1461 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1462 "[FindHighestDPMLevel] DPM Table has too many entries!",
1463 return MAX_REGULAR_DPM_NUMBER - 1);
1464
1465 for (i = table->count - 1; i >= 0; i--) {
1466 if (table->dpm_levels[i].enabled)
1467 break;
1468 }
1469 if (i < 0) {
1470 i = 0;
1471 table->dpm_levels[i].enabled = true;
1472 }
1473
1474 return i;
1475 }
1476
1477 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1478 {
1479 struct vega20_hwmgr *data =
1480 (struct vega20_hwmgr *)(hwmgr->backend);
1481 uint32_t min_freq;
1482 int ret = 0;
1483
1484 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1485 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1486 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1487 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1488 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
1489 "Failed to set soft min gfxclk !",
1490 return ret);
1491 }
1492
1493 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1494 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1495 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1496 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1497 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1498 "Failed to set soft min memclk !",
1499 return ret);
1500
1501 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1502 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1503 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1504 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1505 "Failed to set hard min memclk !",
1506 return ret);
1507 }
1508
1509 if (data->smu_features[GNLD_DPM_UVD].enabled) {
1510 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1511
1512 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1513 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1514 (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
1515 "Failed to set soft min vclk!",
1516 return ret);
1517
1518 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1519
1520 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1521 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1522 (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
1523 "Failed to set soft min dclk!",
1524 return ret);
1525 }
1526
1527 if (data->smu_features[GNLD_DPM_VCE].enabled) {
1528 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1529
1530 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1531 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1532 (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
1533 "Failed to set soft min eclk!",
1534 return ret);
1535 }
1536
1537 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1538 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1539
1540 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1541 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1542 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
1543 "Failed to set soft min socclk!",
1544 return ret);
1545 }
1546
1547 return ret;
1548 }
1549
1550 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1551 {
1552 struct vega20_hwmgr *data =
1553 (struct vega20_hwmgr *)(hwmgr->backend);
1554 uint32_t max_freq;
1555 int ret = 0;
1556
1557 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1558 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1559
1560 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1561 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1562 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
1563 "Failed to set soft max gfxclk!",
1564 return ret);
1565 }
1566
1567 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1568 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1569
1570 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1571 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1572 (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
1573 "Failed to set soft max memclk!",
1574 return ret);
1575 }
1576
1577 if (data->smu_features[GNLD_DPM_UVD].enabled) {
1578 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1579
1580 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1581 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1582 (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
1583 "Failed to set soft max vclk!",
1584 return ret);
1585
1586 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1587 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1588 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1589 (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
1590 "Failed to set soft max dclk!",
1591 return ret);
1592 }
1593
1594 if (data->smu_features[GNLD_DPM_VCE].enabled) {
1595 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1596
1597 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1598 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1599 (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
1600 "Failed to set soft max eclk!",
1601 return ret);
1602 }
1603
1604 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1605 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1606
1607 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1608 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1609 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
1610 "Failed to set soft max socclk!",
1611 return ret);
1612 }
1613
1614 return ret;
1615 }
1616
1617 int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1618 {
1619 struct vega20_hwmgr *data =
1620 (struct vega20_hwmgr *)(hwmgr->backend);
1621 int ret = 0;
1622
1623 if (data->smu_features[GNLD_DPM_VCE].supported) {
1624 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
1625 if (enable)
1626 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
1627 else
1628 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
1629 }
1630
1631 ret = vega20_enable_smc_features(hwmgr,
1632 enable,
1633 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
1634 PP_ASSERT_WITH_CODE(!ret,
1635 "Attempt to Enable/Disable DPM VCE Failed!",
1636 return ret);
1637 data->smu_features[GNLD_DPM_VCE].enabled = enable;
1638 }
1639
1640 return 0;
1641 }
1642
1643 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
1644 uint32_t *clock,
1645 PPCLK_e clock_select,
1646 bool max)
1647 {
1648 int ret;
1649 *clock = 0;
1650
1651 if (max) {
1652 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1653 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
1654 "[GetClockRanges] Failed to get max clock from SMC!",
1655 return ret);
1656 vega20_read_arg_from_smc(hwmgr, clock);
1657 } else {
1658 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1659 PPSMC_MSG_GetMinDpmFreq,
1660 (clock_select << 16))) == 0,
1661 "[GetClockRanges] Failed to get min clock from SMC!",
1662 return ret);
1663 vega20_read_arg_from_smc(hwmgr, clock);
1664 }
1665
1666 return 0;
1667 }
1668
1669 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1670 {
1671 struct vega20_hwmgr *data =
1672 (struct vega20_hwmgr *)(hwmgr->backend);
1673 uint32_t gfx_clk;
1674 int ret = 0;
1675
1676 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
1677 "[GetSclks]: gfxclk dpm not enabled!\n",
1678 return -EPERM);
1679
1680 if (low) {
1681 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
1682 PP_ASSERT_WITH_CODE(!ret,
1683 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1684 return ret);
1685 } else {
1686 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
1687 PP_ASSERT_WITH_CODE(!ret,
1688 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1689 return ret);
1690 }
1691
1692 return (gfx_clk * 100);
1693 }
1694
1695 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1696 {
1697 struct vega20_hwmgr *data =
1698 (struct vega20_hwmgr *)(hwmgr->backend);
1699 uint32_t mem_clk;
1700 int ret = 0;
1701
1702 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
1703 "[MemMclks]: memclk dpm not enabled!\n",
1704 return -EPERM);
1705
1706 if (low) {
1707 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
1708 PP_ASSERT_WITH_CODE(!ret,
1709 "[GetMclks]: fail to get min PPCLK_UCLK\n",
1710 return ret);
1711 } else {
1712 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
1713 PP_ASSERT_WITH_CODE(!ret,
1714 "[GetMclks]: fail to get max PPCLK_UCLK\n",
1715 return ret);
1716 }
1717
1718 return (mem_clk * 100);
1719 }
1720
1721 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
1722 uint32_t *query)
1723 {
1724 int ret = 0;
1725 SmuMetrics_t metrics_table;
1726
1727 ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
1728 PP_ASSERT_WITH_CODE(!ret,
1729 "Failed to export SMU METRICS table!",
1730 return ret);
1731
1732 *query = metrics_table.CurrSocketPower << 8;
1733
1734 return ret;
1735 }
1736
1737 static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1738 {
1739 uint32_t gfx_clk = 0;
1740 int ret = 0;
1741
1742 *gfx_freq = 0;
1743
1744 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1745 PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
1746 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1747 return ret);
1748 vega20_read_arg_from_smc(hwmgr, &gfx_clk);
1749
1750 *gfx_freq = gfx_clk * 100;
1751
1752 return 0;
1753 }
1754
1755 static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1756 {
1757 uint32_t mem_clk = 0;
1758 int ret = 0;
1759
1760 *mclk_freq = 0;
1761
1762 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1763 PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
1764 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1765 return ret);
1766 vega20_read_arg_from_smc(hwmgr, &mem_clk);
1767
1768 *mclk_freq = mem_clk * 100;
1769
1770 return 0;
1771 }
1772
1773 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
1774 uint32_t *activity_percent)
1775 {
1776 int ret = 0;
1777 SmuMetrics_t metrics_table;
1778
1779 ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS);
1780 PP_ASSERT_WITH_CODE(!ret,
1781 "Failed to export SMU METRICS table!",
1782 return ret);
1783
1784 *activity_percent = metrics_table.AverageGfxActivity;
1785
1786 return ret;
1787 }
1788
1789 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1790 void *value, int *size)
1791 {
1792 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1793 int ret = 0;
1794
1795 switch (idx) {
1796 case AMDGPU_PP_SENSOR_GFX_SCLK:
1797 ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1798 if (!ret)
1799 *size = 4;
1800 break;
1801 case AMDGPU_PP_SENSOR_GFX_MCLK:
1802 ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1803 if (!ret)
1804 *size = 4;
1805 break;
1806 case AMDGPU_PP_SENSOR_GPU_LOAD:
1807 ret = vega20_get_current_activity_percent(hwmgr, (uint32_t *)value);
1808 if (!ret)
1809 *size = 4;
1810 break;
1811 case AMDGPU_PP_SENSOR_GPU_TEMP:
1812 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
1813 *size = 4;
1814 break;
1815 case AMDGPU_PP_SENSOR_UVD_POWER:
1816 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1817 *size = 4;
1818 break;
1819 case AMDGPU_PP_SENSOR_VCE_POWER:
1820 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1821 *size = 4;
1822 break;
1823 case AMDGPU_PP_SENSOR_GPU_POWER:
1824 *size = 16;
1825 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
1826 break;
1827 default:
1828 ret = -EINVAL;
1829 break;
1830 }
1831 return ret;
1832 }
1833
1834 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1835 bool has_disp)
1836 {
1837 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1838
1839 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1840 return smum_send_msg_to_smc_with_parameter(hwmgr,
1841 PPSMC_MSG_SetUclkFastSwitch,
1842 has_disp ? 1 : 0);
1843
1844 return 0;
1845 }
1846
1847 int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1848 struct pp_display_clock_request *clock_req)
1849 {
1850 int result = 0;
1851 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1852 enum amd_pp_clock_type clk_type = clock_req->clock_type;
1853 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1854 PPCLK_e clk_select = 0;
1855 uint32_t clk_request = 0;
1856
1857 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1858 switch (clk_type) {
1859 case amd_pp_dcef_clock:
1860 clk_freq = clock_req->clock_freq_in_khz / 100;
1861 clk_select = PPCLK_DCEFCLK;
1862 break;
1863 case amd_pp_disp_clock:
1864 clk_select = PPCLK_DISPCLK;
1865 break;
1866 case amd_pp_pixel_clock:
1867 clk_select = PPCLK_PIXCLK;
1868 break;
1869 case amd_pp_phy_clock:
1870 clk_select = PPCLK_PHYCLK;
1871 break;
1872 default:
1873 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1874 result = -EINVAL;
1875 break;
1876 }
1877
1878 if (!result) {
1879 clk_request = (clk_select << 16) | clk_freq;
1880 result = smum_send_msg_to_smc_with_parameter(hwmgr,
1881 PPSMC_MSG_SetHardMinByFreq,
1882 clk_request);
1883 }
1884 }
1885
1886 return result;
1887 }
1888
1889 static int vega20_notify_smc_display_config_after_ps_adjustment(
1890 struct pp_hwmgr *hwmgr)
1891 {
1892 struct vega20_hwmgr *data =
1893 (struct vega20_hwmgr *)(hwmgr->backend);
1894 struct PP_Clocks min_clocks = {0};
1895 struct pp_display_clock_request clock_req;
1896 int ret = 0;
1897
1898 if ((hwmgr->display_config->num_display > 1) &&
1899 !hwmgr->display_config->multi_monitor_in_sync)
1900 vega20_notify_smc_display_change(hwmgr, false);
1901 else
1902 vega20_notify_smc_display_change(hwmgr, true);
1903
1904 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
1905 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
1906 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
1907
1908 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1909 clock_req.clock_type = amd_pp_dcef_clock;
1910 clock_req.clock_freq_in_khz = min_clocks.dcefClock;
1911 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
1912 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1913 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
1914 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1915 min_clocks.dcefClockInSR / 100)) == 0,
1916 "Attempt to set divider for DCEFCLK Failed!",
1917 return ret);
1918 } else {
1919 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1920 }
1921 }
1922
1923 return 0;
1924 }
1925
1926 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
1927 {
1928 struct vega20_hwmgr *data =
1929 (struct vega20_hwmgr *)(hwmgr->backend);
1930 uint32_t soft_level;
1931 int ret = 0;
1932
1933 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1934
1935 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1936 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1937 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1938
1939 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
1940
1941 data->dpm_table.mem_table.dpm_state.soft_min_level =
1942 data->dpm_table.mem_table.dpm_state.soft_max_level =
1943 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1944
1945 ret = vega20_upload_dpm_min_level(hwmgr);
1946 PP_ASSERT_WITH_CODE(!ret,
1947 "Failed to upload boot level to highest!",
1948 return ret);
1949
1950 ret = vega20_upload_dpm_max_level(hwmgr);
1951 PP_ASSERT_WITH_CODE(!ret,
1952 "Failed to upload dpm max level to highest!",
1953 return ret);
1954
1955 return 0;
1956 }
1957
1958 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1959 {
1960 struct vega20_hwmgr *data =
1961 (struct vega20_hwmgr *)(hwmgr->backend);
1962 uint32_t soft_level;
1963 int ret = 0;
1964
1965 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1966
1967 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1968 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1969 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1970
1971 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1972
1973 data->dpm_table.mem_table.dpm_state.soft_min_level =
1974 data->dpm_table.mem_table.dpm_state.soft_max_level =
1975 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1976
1977 ret = vega20_upload_dpm_min_level(hwmgr);
1978 PP_ASSERT_WITH_CODE(!ret,
1979 "Failed to upload boot level to highest!",
1980 return ret);
1981
1982 ret = vega20_upload_dpm_max_level(hwmgr);
1983 PP_ASSERT_WITH_CODE(!ret,
1984 "Failed to upload dpm max level to highest!",
1985 return ret);
1986
1987 return 0;
1988
1989 }
1990
1991 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1992 {
1993 int ret = 0;
1994
1995 ret = vega20_upload_dpm_min_level(hwmgr);
1996 PP_ASSERT_WITH_CODE(!ret,
1997 "Failed to upload DPM Bootup Levels!",
1998 return ret);
1999
2000 ret = vega20_upload_dpm_max_level(hwmgr);
2001 PP_ASSERT_WITH_CODE(!ret,
2002 "Failed to upload DPM Max Levels!",
2003 return ret);
2004
2005 return 0;
2006 }
2007
2008 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2009 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
2010 {
2011 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2012 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
2013 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
2014 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
2015
2016 *sclk_mask = 0;
2017 *mclk_mask = 0;
2018 *soc_mask = 0;
2019
2020 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
2021 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
2022 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
2023 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
2024 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
2025 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
2026 }
2027
2028 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2029 *sclk_mask = 0;
2030 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2031 *mclk_mask = 0;
2032 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2033 *sclk_mask = gfx_dpm_table->count - 1;
2034 *mclk_mask = mem_dpm_table->count - 1;
2035 *soc_mask = soc_dpm_table->count - 1;
2036 }
2037
2038 return 0;
2039 }
2040
2041 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2042 enum pp_clock_type type, uint32_t mask)
2043 {
2044 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2045 uint32_t soft_min_level, soft_max_level;
2046 int ret = 0;
2047
2048 switch (type) {
2049 case PP_SCLK:
2050 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2051 soft_max_level = mask ? (fls(mask) - 1) : 0;
2052
2053 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2054 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2055 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2056 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2057
2058 ret = vega20_upload_dpm_min_level(hwmgr);
2059 PP_ASSERT_WITH_CODE(!ret,
2060 "Failed to upload boot level to lowest!",
2061 return ret);
2062
2063 ret = vega20_upload_dpm_max_level(hwmgr);
2064 PP_ASSERT_WITH_CODE(!ret,
2065 "Failed to upload dpm max level to highest!",
2066 return ret);
2067 break;
2068
2069 case PP_MCLK:
2070 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2071 soft_max_level = mask ? (fls(mask) - 1) : 0;
2072
2073 data->dpm_table.mem_table.dpm_state.soft_min_level =
2074 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2075 data->dpm_table.mem_table.dpm_state.soft_max_level =
2076 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2077
2078 ret = vega20_upload_dpm_min_level(hwmgr);
2079 PP_ASSERT_WITH_CODE(!ret,
2080 "Failed to upload boot level to lowest!",
2081 return ret);
2082
2083 ret = vega20_upload_dpm_max_level(hwmgr);
2084 PP_ASSERT_WITH_CODE(!ret,
2085 "Failed to upload dpm max level to highest!",
2086 return ret);
2087
2088 break;
2089
2090 case PP_PCIE:
2091 break;
2092
2093 default:
2094 break;
2095 }
2096
2097 return 0;
2098 }
2099
2100 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2101 enum amd_dpm_forced_level level)
2102 {
2103 int ret = 0;
2104 uint32_t sclk_mask, mclk_mask, soc_mask;
2105
2106 switch (level) {
2107 case AMD_DPM_FORCED_LEVEL_HIGH:
2108 ret = vega20_force_dpm_highest(hwmgr);
2109 break;
2110
2111 case AMD_DPM_FORCED_LEVEL_LOW:
2112 ret = vega20_force_dpm_lowest(hwmgr);
2113 break;
2114
2115 case AMD_DPM_FORCED_LEVEL_AUTO:
2116 ret = vega20_unforce_dpm_levels(hwmgr);
2117 break;
2118
2119 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2120 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2121 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2122 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2123 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
2124 if (ret)
2125 return ret;
2126 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2127 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2128 break;
2129
2130 case AMD_DPM_FORCED_LEVEL_MANUAL:
2131 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2132 default:
2133 break;
2134 }
2135
2136 return ret;
2137 }
2138
2139 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
2140 {
2141 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2142
2143 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
2144 return AMD_FAN_CTRL_MANUAL;
2145 else
2146 return AMD_FAN_CTRL_AUTO;
2147 }
2148
2149 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
2150 struct amd_pp_simple_clock_info *info)
2151 {
2152 #if 0
2153 struct phm_ppt_v2_information *table_info =
2154 (struct phm_ppt_v2_information *)hwmgr->pptable;
2155 struct phm_clock_and_voltage_limits *max_limits =
2156 &table_info->max_clock_voltage_on_ac;
2157
2158 info->engine_max_clock = max_limits->sclk;
2159 info->memory_max_clock = max_limits->mclk;
2160 #endif
2161 return 0;
2162 }
2163
2164
2165 static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2166 struct pp_clock_levels_with_latency *clocks)
2167 {
2168 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2169 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2170 int i, count;
2171
2172 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
2173 "[GetSclks]: gfxclk dpm not enabled!\n",
2174 return -EPERM);
2175
2176 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2177 clocks->num_levels = count;
2178
2179 for (i = 0; i < count; i++) {
2180 clocks->data[i].clocks_in_khz =
2181 dpm_table->dpm_levels[i].value * 100;
2182 clocks->data[i].latency_in_us = 0;
2183 }
2184
2185 return 0;
2186 }
2187
2188 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
2189 uint32_t clock)
2190 {
2191 return 25;
2192 }
2193
2194 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2195 struct pp_clock_levels_with_latency *clocks)
2196 {
2197 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2198 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2199 int i, count;
2200
2201 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
2202 "[GetMclks]: uclk dpm not enabled!\n",
2203 return -EPERM);
2204
2205 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2206 clocks->num_levels = data->mclk_latency_table.count = count;
2207
2208 for (i = 0; i < count; i++) {
2209 clocks->data[i].clocks_in_khz =
2210 data->mclk_latency_table.entries[i].frequency =
2211 dpm_table->dpm_levels[i].value * 100;
2212 clocks->data[i].latency_in_us =
2213 data->mclk_latency_table.entries[i].latency =
2214 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
2215 }
2216
2217 return 0;
2218 }
2219
2220 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2221 struct pp_clock_levels_with_latency *clocks)
2222 {
2223 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2224 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2225 int i, count;
2226
2227 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled,
2228 "[GetDcfclocks]: dcefclk dpm not enabled!\n",
2229 return -EPERM);
2230
2231 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2232 clocks->num_levels = count;
2233
2234 for (i = 0; i < count; i++) {
2235 clocks->data[i].clocks_in_khz =
2236 dpm_table->dpm_levels[i].value * 100;
2237 clocks->data[i].latency_in_us = 0;
2238 }
2239
2240 return 0;
2241 }
2242
2243 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2244 struct pp_clock_levels_with_latency *clocks)
2245 {
2246 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2247 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2248 int i, count;
2249
2250 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled,
2251 "[GetSocclks]: socclk dpm not enabled!\n",
2252 return -EPERM);
2253
2254 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2255 clocks->num_levels = count;
2256
2257 for (i = 0; i < count; i++) {
2258 clocks->data[i].clocks_in_khz =
2259 dpm_table->dpm_levels[i].value * 100;
2260 clocks->data[i].latency_in_us = 0;
2261 }
2262
2263 return 0;
2264
2265 }
2266
2267 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
2268 enum amd_pp_clock_type type,
2269 struct pp_clock_levels_with_latency *clocks)
2270 {
2271 int ret;
2272
2273 switch (type) {
2274 case amd_pp_sys_clock:
2275 ret = vega20_get_sclks(hwmgr, clocks);
2276 break;
2277 case amd_pp_mem_clock:
2278 ret = vega20_get_memclocks(hwmgr, clocks);
2279 break;
2280 case amd_pp_dcef_clock:
2281 ret = vega20_get_dcefclocks(hwmgr, clocks);
2282 break;
2283 case amd_pp_soc_clock:
2284 ret = vega20_get_socclocks(hwmgr, clocks);
2285 break;
2286 default:
2287 return -EINVAL;
2288 }
2289
2290 return ret;
2291 }
2292
2293 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
2294 enum amd_pp_clock_type type,
2295 struct pp_clock_levels_with_voltage *clocks)
2296 {
2297 clocks->num_levels = 0;
2298
2299 return 0;
2300 }
2301
2302 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
2303 void *clock_ranges)
2304 {
2305 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2306 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2307 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2308
2309 if (!data->registry_data.disable_water_mark &&
2310 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2311 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2312 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2313 data->water_marks_bitmap |= WaterMarksExist;
2314 data->water_marks_bitmap &= ~WaterMarksLoaded;
2315 }
2316
2317 return 0;
2318 }
2319
2320 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
2321 enum pp_clock_type type, char *buf)
2322 {
2323 int i, now, size = 0;
2324 struct pp_clock_levels_with_latency clocks;
2325 int ret = 0;
2326
2327 switch (type) {
2328 case PP_SCLK:
2329 ret = vega20_get_current_gfx_clk_freq(hwmgr, &now);
2330 PP_ASSERT_WITH_CODE(!ret,
2331 "Attempt to get current gfx clk Failed!",
2332 return ret);
2333
2334 ret = vega20_get_sclks(hwmgr, &clocks);
2335 PP_ASSERT_WITH_CODE(!ret,
2336 "Attempt to get gfx clk levels Failed!",
2337 return ret);
2338
2339 for (i = 0; i < clocks.num_levels; i++)
2340 size += sprintf(buf + size, "%d: %uMhz %s\n",
2341 i, clocks.data[i].clocks_in_khz / 100,
2342 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
2343 break;
2344
2345 case PP_MCLK:
2346 ret = vega20_get_current_mclk_freq(hwmgr, &now);
2347 PP_ASSERT_WITH_CODE(!ret,
2348 "Attempt to get current mclk freq Failed!",
2349 return ret);
2350
2351 ret = vega20_get_memclocks(hwmgr, &clocks);
2352 PP_ASSERT_WITH_CODE(!ret,
2353 "Attempt to get memory clk levels Failed!",
2354 return ret);
2355
2356 for (i = 0; i < clocks.num_levels; i++)
2357 size += sprintf(buf + size, "%d: %uMhz %s\n",
2358 i, clocks.data[i].clocks_in_khz / 100,
2359 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
2360 break;
2361
2362 case PP_PCIE:
2363 break;
2364
2365 default:
2366 break;
2367 }
2368 return size;
2369 }
2370
2371 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
2372 struct vega20_single_dpm_table *dpm_table)
2373 {
2374 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2375 int ret = 0;
2376
2377 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2378 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
2379 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2380 return -EINVAL);
2381 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
2382 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2383 return -EINVAL);
2384
2385 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2386 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2387 PPSMC_MSG_SetHardMinByFreq,
2388 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
2389 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2390 return ret);
2391 }
2392
2393 return ret;
2394 }
2395
2396 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2397 {
2398 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2399 int ret = 0;
2400
2401 smum_send_msg_to_smc_with_parameter(hwmgr,
2402 PPSMC_MSG_NumOfDisplays, 0);
2403
2404 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
2405 &data->dpm_table.mem_table);
2406
2407 return ret;
2408 }
2409
2410 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2411 {
2412 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2413 int result = 0;
2414 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
2415
2416 if ((data->water_marks_bitmap & WaterMarksExist) &&
2417 !(data->water_marks_bitmap & WaterMarksLoaded)) {
2418 result = vega20_copy_table_to_smc(hwmgr,
2419 (uint8_t *)wm_table, TABLE_WATERMARKS);
2420 PP_ASSERT_WITH_CODE(!result,
2421 "Failed to update WMTABLE!",
2422 return result);
2423 data->water_marks_bitmap |= WaterMarksLoaded;
2424 }
2425
2426 if ((data->water_marks_bitmap & WaterMarksExist) &&
2427 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2428 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2429 result = smum_send_msg_to_smc_with_parameter(hwmgr,
2430 PPSMC_MSG_NumOfDisplays,
2431 hwmgr->display_config->num_display);
2432 }
2433
2434 return result;
2435 }
2436
2437 int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2438 {
2439 struct vega20_hwmgr *data =
2440 (struct vega20_hwmgr *)(hwmgr->backend);
2441 int ret = 0;
2442
2443 if (data->smu_features[GNLD_DPM_UVD].supported) {
2444 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
2445 if (enable)
2446 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
2447 else
2448 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
2449 }
2450
2451 ret = vega20_enable_smc_features(hwmgr,
2452 enable,
2453 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
2454 PP_ASSERT_WITH_CODE(!ret,
2455 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
2456 return ret);
2457 data->smu_features[GNLD_DPM_UVD].enabled = enable;
2458 }
2459
2460 return 0;
2461 }
2462
2463 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2464 {
2465 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2466
2467 if (data->vce_power_gated == bgate)
2468 return ;
2469
2470 data->vce_power_gated = bgate;
2471 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
2472 }
2473
2474 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2475 {
2476 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2477
2478 if (data->uvd_power_gated == bgate)
2479 return ;
2480
2481 data->uvd_power_gated = bgate;
2482 vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
2483 }
2484
2485 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
2486 {
2487 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2488 struct vega20_single_dpm_table *dpm_table;
2489 bool vblank_too_short = false;
2490 bool disable_mclk_switching;
2491 uint32_t i, latency;
2492
2493 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2494 !hwmgr->display_config->multi_monitor_in_sync) ||
2495 vblank_too_short;
2496 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
2497
2498 /* gfxclk */
2499 dpm_table = &(data->dpm_table.gfx_table);
2500 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2501 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2502 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2503 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2504
2505 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2506 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
2507 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
2508 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
2509 }
2510
2511 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2512 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2513 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2514 }
2515
2516 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2517 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2518 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2519 }
2520 }
2521
2522 /* memclk */
2523 dpm_table = &(data->dpm_table.mem_table);
2524 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2525 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2526 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2527 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2528
2529 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2530 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
2531 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
2532 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
2533 }
2534
2535 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2536 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2537 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2538 }
2539
2540 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2541 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2542 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2543 }
2544 }
2545
2546 /* honour DAL's UCLK Hardmin */
2547 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
2548 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
2549
2550 /* Hardmin is dependent on displayconfig */
2551 if (disable_mclk_switching) {
2552 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2553 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
2554 if (data->mclk_latency_table.entries[i].latency <= latency) {
2555 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
2556 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
2557 break;
2558 }
2559 }
2560 }
2561 }
2562
2563 if (hwmgr->display_config->nb_pstate_switch_disable)
2564 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2565
2566 /* vclk */
2567 dpm_table = &(data->dpm_table.vclk_table);
2568 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2569 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2570 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2571 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2572
2573 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2574 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2575 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
2576 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
2577 }
2578
2579 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2580 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2581 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2582 }
2583 }
2584
2585 /* dclk */
2586 dpm_table = &(data->dpm_table.dclk_table);
2587 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2588 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2589 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2590 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2591
2592 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2593 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2594 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
2595 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
2596 }
2597
2598 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2599 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2600 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2601 }
2602 }
2603
2604 /* socclk */
2605 dpm_table = &(data->dpm_table.soc_table);
2606 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2607 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2608 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2609 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2610
2611 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2612 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
2613 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
2614 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
2615 }
2616
2617 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2618 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2619 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2620 }
2621 }
2622
2623 /* eclk */
2624 dpm_table = &(data->dpm_table.eclk_table);
2625 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2626 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2627 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2628 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2629
2630 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2631 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
2632 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
2633 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
2634 }
2635
2636 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2637 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2638 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2639 }
2640 }
2641
2642 return 0;
2643 }
2644
2645 static bool
2646 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2647 {
2648 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2649 bool is_update_required = false;
2650
2651 if (data->display_timing.num_existing_displays !=
2652 hwmgr->display_config->num_display)
2653 is_update_required = true;
2654
2655 if (data->registry_data.gfx_clk_deep_sleep_support &&
2656 (data->display_timing.min_clock_in_sr !=
2657 hwmgr->display_config->min_core_set_clock_in_sr))
2658 is_update_required = true;
2659
2660 return is_update_required;
2661 }
2662
2663 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2664 {
2665 int ret = 0;
2666
2667 ret = vega20_disable_all_smu_features(hwmgr);
2668 PP_ASSERT_WITH_CODE(!ret,
2669 "[DisableDpmTasks] Failed to disable all smu features!",
2670 return ret);
2671
2672 return 0;
2673 }
2674
2675 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
2676 {
2677 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2678 int result;
2679
2680 result = vega20_disable_dpm_tasks(hwmgr);
2681 PP_ASSERT_WITH_CODE((0 == result),
2682 "[PowerOffAsic] Failed to disable DPM!",
2683 );
2684 data->water_marks_bitmap &= ~(WaterMarksLoaded);
2685
2686 return result;
2687 }
2688
2689 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
2690 {
2691 DpmActivityMonitorCoeffInt_t activity_monitor;
2692 uint32_t i, size = 0;
2693 uint16_t workload_type = 0;
2694 static const char *profile_name[] = {
2695 "3D_FULL_SCREEN",
2696 "POWER_SAVING",
2697 "VIDEO",
2698 "VR",
2699 "COMPUTE",
2700 "CUSTOM"};
2701 static const char *title[] = {
2702 "PROFILE_INDEX(NAME)",
2703 "CLOCK_TYPE(NAME)",
2704 "FPS",
2705 "UseRlcBusy",
2706 "MinActiveFreqType",
2707 "MinActiveFreq",
2708 "BoosterFreqType",
2709 "BoosterFreq",
2710 "PD_Data_limit_c",
2711 "PD_Data_error_coeff",
2712 "PD_Data_error_rate_coeff"};
2713 int result = 0;
2714
2715 if (!buf)
2716 return -EINVAL;
2717
2718 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
2719 title[0], title[1], title[2], title[3], title[4], title[5],
2720 title[6], title[7], title[8], title[9], title[10]);
2721
2722 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
2723 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
2724 workload_type = i + 1;
2725 result = vega20_get_activity_monitor_coeff(hwmgr,
2726 (uint8_t *)(&activity_monitor), workload_type);
2727 PP_ASSERT_WITH_CODE(!result,
2728 "[GetPowerProfile] Failed to get activity monitor!",
2729 return result);
2730
2731 size += sprintf(buf + size, "%2d(%14s%s)\n",
2732 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
2733
2734 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
2735 " ",
2736 0,
2737 "GFXCLK",
2738 activity_monitor.Gfx_FPS,
2739 activity_monitor.Gfx_UseRlcBusy,
2740 activity_monitor.Gfx_MinActiveFreqType,
2741 activity_monitor.Gfx_MinActiveFreq,
2742 activity_monitor.Gfx_BoosterFreqType,
2743 activity_monitor.Gfx_BoosterFreq,
2744 activity_monitor.Gfx_PD_Data_limit_c,
2745 activity_monitor.Gfx_PD_Data_error_coeff,
2746 activity_monitor.Gfx_PD_Data_error_rate_coeff);
2747
2748 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
2749 " ",
2750 1,
2751 "SOCCLK",
2752 activity_monitor.Soc_FPS,
2753 activity_monitor.Soc_UseRlcBusy,
2754 activity_monitor.Soc_MinActiveFreqType,
2755 activity_monitor.Soc_MinActiveFreq,
2756 activity_monitor.Soc_BoosterFreqType,
2757 activity_monitor.Soc_BoosterFreq,
2758 activity_monitor.Soc_PD_Data_limit_c,
2759 activity_monitor.Soc_PD_Data_error_coeff,
2760 activity_monitor.Soc_PD_Data_error_rate_coeff);
2761
2762 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
2763 " ",
2764 2,
2765 "UCLK",
2766 activity_monitor.Mem_FPS,
2767 activity_monitor.Mem_UseRlcBusy,
2768 activity_monitor.Mem_MinActiveFreqType,
2769 activity_monitor.Mem_MinActiveFreq,
2770 activity_monitor.Mem_BoosterFreqType,
2771 activity_monitor.Mem_BoosterFreq,
2772 activity_monitor.Mem_PD_Data_limit_c,
2773 activity_monitor.Mem_PD_Data_error_coeff,
2774 activity_monitor.Mem_PD_Data_error_rate_coeff);
2775
2776 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
2777 " ",
2778 3,
2779 "FCLK",
2780 activity_monitor.Fclk_FPS,
2781 activity_monitor.Fclk_UseRlcBusy,
2782 activity_monitor.Fclk_MinActiveFreqType,
2783 activity_monitor.Fclk_MinActiveFreq,
2784 activity_monitor.Fclk_BoosterFreqType,
2785 activity_monitor.Fclk_BoosterFreq,
2786 activity_monitor.Fclk_PD_Data_limit_c,
2787 activity_monitor.Fclk_PD_Data_error_coeff,
2788 activity_monitor.Fclk_PD_Data_error_rate_coeff);
2789 }
2790
2791 return size;
2792 }
2793
2794 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
2795 {
2796 DpmActivityMonitorCoeffInt_t activity_monitor;
2797 int result = 0;
2798
2799 hwmgr->power_profile_mode = input[size];
2800
2801 if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
2802 if (size < 10)
2803 return -EINVAL;
2804
2805 result = vega20_get_activity_monitor_coeff(hwmgr,
2806 (uint8_t *)(&activity_monitor),
2807 WORKLOAD_PPLIB_CUSTOM_BIT);
2808 PP_ASSERT_WITH_CODE(!result,
2809 "[SetPowerProfile] Failed to get activity monitor!",
2810 return result);
2811
2812 switch (input[0]) {
2813 case 0: /* Gfxclk */
2814 activity_monitor.Gfx_FPS = input[1];
2815 activity_monitor.Gfx_UseRlcBusy = input[2];
2816 activity_monitor.Gfx_MinActiveFreqType = input[3];
2817 activity_monitor.Gfx_MinActiveFreq = input[4];
2818 activity_monitor.Gfx_BoosterFreqType = input[5];
2819 activity_monitor.Gfx_BoosterFreq = input[6];
2820 activity_monitor.Gfx_PD_Data_limit_c = input[7];
2821 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
2822 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
2823 break;
2824 case 1: /* Socclk */
2825 activity_monitor.Soc_FPS = input[1];
2826 activity_monitor.Soc_UseRlcBusy = input[2];
2827 activity_monitor.Soc_MinActiveFreqType = input[3];
2828 activity_monitor.Soc_MinActiveFreq = input[4];
2829 activity_monitor.Soc_BoosterFreqType = input[5];
2830 activity_monitor.Soc_BoosterFreq = input[6];
2831 activity_monitor.Soc_PD_Data_limit_c = input[7];
2832 activity_monitor.Soc_PD_Data_error_coeff = input[8];
2833 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
2834 break;
2835 case 2: /* Uclk */
2836 activity_monitor.Mem_FPS = input[1];
2837 activity_monitor.Mem_UseRlcBusy = input[2];
2838 activity_monitor.Mem_MinActiveFreqType = input[3];
2839 activity_monitor.Mem_MinActiveFreq = input[4];
2840 activity_monitor.Mem_BoosterFreqType = input[5];
2841 activity_monitor.Mem_BoosterFreq = input[6];
2842 activity_monitor.Mem_PD_Data_limit_c = input[7];
2843 activity_monitor.Mem_PD_Data_error_coeff = input[8];
2844 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
2845 break;
2846 case 3: /* Fclk */
2847 activity_monitor.Fclk_FPS = input[1];
2848 activity_monitor.Fclk_UseRlcBusy = input[2];
2849 activity_monitor.Fclk_MinActiveFreqType = input[3];
2850 activity_monitor.Fclk_MinActiveFreq = input[4];
2851 activity_monitor.Fclk_BoosterFreqType = input[5];
2852 activity_monitor.Fclk_BoosterFreq = input[6];
2853 activity_monitor.Fclk_PD_Data_limit_c = input[7];
2854 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
2855 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
2856 break;
2857 }
2858
2859 result = vega20_set_activity_monitor_coeff(hwmgr,
2860 (uint8_t *)(&activity_monitor),
2861 WORKLOAD_PPLIB_CUSTOM_BIT);
2862 PP_ASSERT_WITH_CODE(!result,
2863 "[SetPowerProfile] Failed to set activity monitor!",
2864 return result);
2865 }
2866
2867 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
2868 1 << hwmgr->power_profile_mode);
2869
2870 return 0;
2871 }
2872
2873 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2874 uint32_t virtual_addr_low,
2875 uint32_t virtual_addr_hi,
2876 uint32_t mc_addr_low,
2877 uint32_t mc_addr_hi,
2878 uint32_t size)
2879 {
2880 smum_send_msg_to_smc_with_parameter(hwmgr,
2881 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2882 virtual_addr_hi);
2883 smum_send_msg_to_smc_with_parameter(hwmgr,
2884 PPSMC_MSG_SetSystemVirtualDramAddrLow,
2885 virtual_addr_low);
2886 smum_send_msg_to_smc_with_parameter(hwmgr,
2887 PPSMC_MSG_DramLogSetDramAddrHigh,
2888 mc_addr_hi);
2889
2890 smum_send_msg_to_smc_with_parameter(hwmgr,
2891 PPSMC_MSG_DramLogSetDramAddrLow,
2892 mc_addr_low);
2893
2894 smum_send_msg_to_smc_with_parameter(hwmgr,
2895 PPSMC_MSG_DramLogSetDramSize,
2896 size);
2897 return 0;
2898 }
2899
2900 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2901 struct PP_TemperatureRange *thermal_data)
2902 {
2903 struct phm_ppt_v3_information *pptable_information =
2904 (struct phm_ppt_v3_information *)hwmgr->pptable;
2905
2906 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2907
2908 thermal_data->max = pptable_information->us_software_shutdown_temp *
2909 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2910
2911 return 0;
2912 }
2913
2914 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
2915 /* init/fini related */
2916 .backend_init =
2917 vega20_hwmgr_backend_init,
2918 .backend_fini =
2919 vega20_hwmgr_backend_fini,
2920 .asic_setup =
2921 vega20_setup_asic_task,
2922 .power_off_asic =
2923 vega20_power_off_asic,
2924 .dynamic_state_management_enable =
2925 vega20_enable_dpm_tasks,
2926 .dynamic_state_management_disable =
2927 vega20_disable_dpm_tasks,
2928 /* power state related */
2929 .apply_clocks_adjust_rules =
2930 vega20_apply_clocks_adjust_rules,
2931 .pre_display_config_changed =
2932 vega20_pre_display_configuration_changed_task,
2933 .display_config_changed =
2934 vega20_display_configuration_changed_task,
2935 .check_smc_update_required_for_display_configuration =
2936 vega20_check_smc_update_required_for_display_configuration,
2937 .notify_smc_display_config_after_ps_adjustment =
2938 vega20_notify_smc_display_config_after_ps_adjustment,
2939 /* export to DAL */
2940 .get_sclk =
2941 vega20_dpm_get_sclk,
2942 .get_mclk =
2943 vega20_dpm_get_mclk,
2944 .get_dal_power_level =
2945 vega20_get_dal_power_level,
2946 .get_clock_by_type_with_latency =
2947 vega20_get_clock_by_type_with_latency,
2948 .get_clock_by_type_with_voltage =
2949 vega20_get_clock_by_type_with_voltage,
2950 .set_watermarks_for_clocks_ranges =
2951 vega20_set_watermarks_for_clocks_ranges,
2952 .display_clock_voltage_request =
2953 vega20_display_clock_voltage_request,
2954 /* UMD pstate, profile related */
2955 .force_dpm_level =
2956 vega20_dpm_force_dpm_level,
2957 .get_power_profile_mode =
2958 vega20_get_power_profile_mode,
2959 .set_power_profile_mode =
2960 vega20_set_power_profile_mode,
2961 /* od related */
2962 .set_power_limit =
2963 vega20_set_power_limit,
2964 .get_sclk_od =
2965 vega20_get_sclk_od,
2966 .set_sclk_od =
2967 vega20_set_sclk_od,
2968 .get_mclk_od =
2969 vega20_get_mclk_od,
2970 .set_mclk_od =
2971 vega20_set_mclk_od,
2972 /* for sysfs to retrive/set gfxclk/memclk */
2973 .force_clock_level =
2974 vega20_force_clock_level,
2975 .print_clock_levels =
2976 vega20_print_clock_levels,
2977 .read_sensor =
2978 vega20_read_sensor,
2979 /* powergate related */
2980 .powergate_uvd =
2981 vega20_power_gate_uvd,
2982 .powergate_vce =
2983 vega20_power_gate_vce,
2984 /* thermal related */
2985 .start_thermal_controller =
2986 vega20_start_thermal_controller,
2987 .stop_thermal_controller =
2988 vega20_thermal_stop_thermal_controller,
2989 .get_thermal_temperature_range =
2990 vega20_get_thermal_temperature_range,
2991 .register_irq_handlers =
2992 smu9_register_irq_handlers,
2993 .disable_smc_firmware_ctf =
2994 vega20_thermal_disable_alert,
2995 /* fan control related */
2996 .get_fan_speed_info =
2997 vega20_fan_ctrl_get_fan_speed_info,
2998 .get_fan_speed_rpm =
2999 vega20_fan_ctrl_get_fan_speed_rpm,
3000 .get_fan_control_mode =
3001 vega20_get_fan_control_mode,
3002 /* smu memory related */
3003 .notify_cac_buffer_info =
3004 vega20_notify_cac_buffer_info,
3005 };
3006
3007 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
3008 {
3009 hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
3010 hwmgr->pptable_func = &vega20_pptable_funcs;
3011
3012 return 0;
3013 }