]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
timer/migration: Fix quick check reporting late expiry
[thirdparty/linux.git] / drivers / gpu / drm / amd / pm / swsmu / smu13 / smu_v13_0_7_ppt.c
1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v13_0.h"
35 #include "smu13_driver_if_v13_0_7.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v13_0_7_ppt.h"
39 #include "smu_v13_0_7_pptable.h"
40 #include "smu_v13_0_7_ppsmc.h"
41 #include "nbio/nbio_4_3_0_offset.h"
42 #include "nbio/nbio_4_3_0_sh_mask.h"
43 #include "mp/mp_13_0_0_offset.h"
44 #include "mp/mp_13_0_0_sh_mask.h"
45
46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h"
47 #include "smu_cmn.h"
48 #include "amdgpu_ras.h"
49
50 /*
51 * DO NOT use these for err/warn/info/debug messages.
52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53 * They are more MGPU friendly.
54 */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61
62 #define FEATURE_MASK(feature) (1ULL << feature)
63 #define SMC_DPM_FEATURE ( \
64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
70
71 #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028
72
73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
74
75 #define PP_OD_FEATURE_GFXCLK_FMIN 0
76 #define PP_OD_FEATURE_GFXCLK_FMAX 1
77 #define PP_OD_FEATURE_UCLK_FMIN 2
78 #define PP_OD_FEATURE_UCLK_FMAX 3
79 #define PP_OD_FEATURE_GFX_VF_CURVE 4
80 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5
81 #define PP_OD_FEATURE_FAN_CURVE_PWM 6
82 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
83 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
84 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
85 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
86
87 #define LINK_SPEED_MAX 3
88
89 static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = {
90 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
91 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
92 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
93 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
94 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
95 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
96 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
97 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
98 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
99 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
100 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
101 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
102 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
103 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
104 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
105 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
106 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
107 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
108 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
109 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
110 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
111 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
112 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
113 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
114 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
115 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
116 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
117 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
118 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
119 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
120 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
121 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
122 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
123 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
124 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
125 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
126 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
127 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
128 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
129 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
130 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
131 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
132 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
133 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
134 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
135 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
136 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
137 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
138 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
139 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
140 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
141 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
142 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
143 MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
144 };
145
146 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
147 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
148 CLK_MAP(SCLK, PPCLK_GFXCLK),
149 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
150 CLK_MAP(FCLK, PPCLK_FCLK),
151 CLK_MAP(UCLK, PPCLK_UCLK),
152 CLK_MAP(MCLK, PPCLK_UCLK),
153 CLK_MAP(VCLK, PPCLK_VCLK_0),
154 CLK_MAP(VCLK1, PPCLK_VCLK_1),
155 CLK_MAP(DCLK, PPCLK_DCLK_0),
156 CLK_MAP(DCLK1, PPCLK_DCLK_1),
157 CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
158 };
159
160 static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = {
161 FEA_MAP(FW_DATA_READ),
162 FEA_MAP(DPM_GFXCLK),
163 FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
164 FEA_MAP(DPM_UCLK),
165 FEA_MAP(DPM_FCLK),
166 FEA_MAP(DPM_SOCCLK),
167 FEA_MAP(DPM_MP0CLK),
168 FEA_MAP(DPM_LINK),
169 FEA_MAP(DPM_DCN),
170 FEA_MAP(VMEMP_SCALING),
171 FEA_MAP(VDDIO_MEM_SCALING),
172 FEA_MAP(DS_GFXCLK),
173 FEA_MAP(DS_SOCCLK),
174 FEA_MAP(DS_FCLK),
175 FEA_MAP(DS_LCLK),
176 FEA_MAP(DS_DCFCLK),
177 FEA_MAP(DS_UCLK),
178 FEA_MAP(GFX_ULV),
179 FEA_MAP(FW_DSTATE),
180 FEA_MAP(GFXOFF),
181 FEA_MAP(BACO),
182 FEA_MAP(MM_DPM),
183 FEA_MAP(SOC_MPCLK_DS),
184 FEA_MAP(BACO_MPCLK_DS),
185 FEA_MAP(THROTTLERS),
186 FEA_MAP(SMARTSHIFT),
187 FEA_MAP(GTHR),
188 FEA_MAP(ACDC),
189 FEA_MAP(VR0HOT),
190 FEA_MAP(FW_CTF),
191 FEA_MAP(FAN_CONTROL),
192 FEA_MAP(GFX_DCS),
193 FEA_MAP(GFX_READ_MARGIN),
194 FEA_MAP(LED_DISPLAY),
195 FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
196 FEA_MAP(OUT_OF_BAND_MONITOR),
197 FEA_MAP(OPTIMIZED_VMIN),
198 FEA_MAP(GFX_IMU),
199 FEA_MAP(BOOT_TIME_CAL),
200 FEA_MAP(GFX_PCC_DFLL),
201 FEA_MAP(SOC_CG),
202 FEA_MAP(DF_CSTATE),
203 FEA_MAP(GFX_EDC),
204 FEA_MAP(BOOT_POWER_OPT),
205 FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
206 FEA_MAP(DS_VCN),
207 FEA_MAP(BACO_CG),
208 FEA_MAP(MEM_TEMP_READ),
209 FEA_MAP(ATHUB_MMHUB_PG),
210 FEA_MAP(SOC_PCC),
211 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
212 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
213 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
214 };
215
216 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
217 TAB_MAP(PPTABLE),
218 TAB_MAP(WATERMARKS),
219 TAB_MAP(AVFS_PSM_DEBUG),
220 TAB_MAP(PMSTATUSLOG),
221 TAB_MAP(SMU_METRICS),
222 TAB_MAP(DRIVER_SMU_CONFIG),
223 TAB_MAP(ACTIVITY_MONITOR_COEFF),
224 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
225 TAB_MAP(OVERDRIVE),
226 TAB_MAP(WIFIBAND),
227 };
228
229 static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
230 PWR_MAP(AC),
231 PWR_MAP(DC),
232 };
233
234 static struct cmn2asic_mapping smu_v13_0_7_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
235 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
236 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
243 };
244
245 static const uint8_t smu_v13_0_7_throttler_map[] = {
246 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
247 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
248 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
249 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
250 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
251 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
252 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
253 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
254 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
255 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
256 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
257 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
258 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
259 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
260 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
261 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
262 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
263 };
264
265 static int
266 smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
267 uint32_t *feature_mask, uint32_t num)
268 {
269 struct amdgpu_device *adev = smu->adev;
270
271 if (num > 2)
272 return -EINVAL;
273
274 memset(feature_mask, 0, sizeof(uint32_t) * num);
275
276 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
277
278 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
279 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
282 }
283
284 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
285 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
286
287 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
288 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
289 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
290 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
291 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
292 }
293
294 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
295
296 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
297 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
298
299 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
300 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
301
302 if (adev->pm.pp_feature & PP_ULV_MASK)
303 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
304
305 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
306 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
307 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
308 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
309 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
310 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
311 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
312 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
313 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
314 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
315 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
316 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT);
317 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
318 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
319 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
320 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
321 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT);
322 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
323 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
324
325 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
326 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
327
328 if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
329 (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
330 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
331
332 return 0;
333 }
334
335 static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
336 {
337 struct smu_table_context *table_context = &smu->smu_table;
338 struct smu_13_0_7_powerplay_table *powerplay_table =
339 table_context->power_play_table;
340 struct smu_baco_context *smu_baco = &smu->smu_baco;
341 PPTable_t *smc_pptable = table_context->driver_pptable;
342 BoardTable_t *BoardTable = &smc_pptable->BoardTable;
343 const OverDriveLimits_t * const overdrive_upperlimits =
344 &smc_pptable->SkuTable.OverDriveLimitsBasicMax;
345 const OverDriveLimits_t * const overdrive_lowerlimits =
346 &smc_pptable->SkuTable.OverDriveLimitsMin;
347
348 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
349 smu->dc_controlled_by_gpio = true;
350
351 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
352 smu_baco->platform_support = true;
353
354 if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
355 && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
356 smu_baco->maco_support = true;
357 }
358
359 if (!overdrive_lowerlimits->FeatureCtrlMask ||
360 !overdrive_upperlimits->FeatureCtrlMask)
361 smu->od_enabled = false;
362
363 table_context->thermal_controller_type =
364 powerplay_table->thermal_controller_type;
365
366 /*
367 * Instead of having its own buffer space and get overdrive_table copied,
368 * smu->od_settings just points to the actual overdrive_table
369 */
370 smu->od_settings = &powerplay_table->overdrive_table;
371
372 return 0;
373 }
374
375 static int smu_v13_0_7_store_powerplay_table(struct smu_context *smu)
376 {
377 struct smu_table_context *table_context = &smu->smu_table;
378 struct smu_13_0_7_powerplay_table *powerplay_table =
379 table_context->power_play_table;
380 struct amdgpu_device *adev = smu->adev;
381
382 if (adev->pdev->device == 0x51)
383 powerplay_table->smc_pptable.SkuTable.DebugOverrides |= 0x00000080;
384
385 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
386 sizeof(PPTable_t));
387
388 return 0;
389 }
390
391 static int smu_v13_0_7_check_fw_status(struct smu_context *smu)
392 {
393 struct amdgpu_device *adev = smu->adev;
394 uint32_t mp1_fw_flags;
395
396 mp1_fw_flags = RREG32_PCIE(MP1_Public |
397 (smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 & 0xffffffff));
398
399 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
400 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
401 return 0;
402
403 return -EIO;
404 }
405
406 #ifndef atom_smc_dpm_info_table_13_0_7
407 struct atom_smc_dpm_info_table_13_0_7 {
408 struct atom_common_table_header table_header;
409 BoardTable_t BoardTable;
410 };
411 #endif
412
413 static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
414 {
415 struct smu_table_context *table_context = &smu->smu_table;
416
417 PPTable_t *smc_pptable = table_context->driver_pptable;
418
419 struct atom_smc_dpm_info_table_13_0_7 *smc_dpm_table;
420
421 BoardTable_t *BoardTable = &smc_pptable->BoardTable;
422
423 int index, ret;
424
425 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
426 smc_dpm_info);
427
428 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
429 (uint8_t **)&smc_dpm_table);
430 if (ret)
431 return ret;
432
433 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
434
435 return 0;
436 }
437
438 static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
439 void **table,
440 uint32_t *size)
441 {
442 struct smu_table_context *smu_table = &smu->smu_table;
443 void *combo_pptable = smu_table->combo_pptable;
444 int ret = 0;
445
446 ret = smu_cmn_get_combo_pptable(smu);
447 if (ret)
448 return ret;
449
450 *table = combo_pptable;
451 *size = sizeof(struct smu_13_0_7_powerplay_table);
452
453 return 0;
454 }
455
456 static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
457 {
458 struct smu_table_context *smu_table = &smu->smu_table;
459 struct amdgpu_device *adev = smu->adev;
460 int ret = 0;
461
462 /*
463 * With SCPM enabled, the pptable used will be signed. It cannot
464 * be used directly by driver. To get the raw pptable, we need to
465 * rely on the combo pptable(and its revelant SMU message).
466 */
467 ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
468 &smu_table->power_play_table,
469 &smu_table->power_play_table_size);
470 if (ret)
471 return ret;
472
473 ret = smu_v13_0_7_store_powerplay_table(smu);
474 if (ret)
475 return ret;
476
477 /*
478 * With SCPM enabled, the operation below will be handled
479 * by PSP. Driver involvment is unnecessary and useless.
480 */
481 if (!adev->scpm_enabled) {
482 ret = smu_v13_0_7_append_powerplay_table(smu);
483 if (ret)
484 return ret;
485 }
486
487 ret = smu_v13_0_7_check_powerplay_table(smu);
488 if (ret)
489 return ret;
490
491 return ret;
492 }
493
494 static int smu_v13_0_7_tables_init(struct smu_context *smu)
495 {
496 struct smu_table_context *smu_table = &smu->smu_table;
497 struct smu_table *tables = smu_table->tables;
498
499 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
500 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
501
502 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
503 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
504 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
505 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
506 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
507 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
508 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t),
509 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
510 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
511 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
512 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
513 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
514 AMDGPU_GEM_DOMAIN_VRAM);
515 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
516 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
517 SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
518 sizeof(WifiBandEntryTable_t), PAGE_SIZE,
519 AMDGPU_GEM_DOMAIN_VRAM);
520
521 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
522 if (!smu_table->metrics_table)
523 goto err0_out;
524 smu_table->metrics_time = 0;
525
526 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
527 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
528 if (!smu_table->gpu_metrics_table)
529 goto err1_out;
530
531 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
532 if (!smu_table->watermarks_table)
533 goto err2_out;
534
535 return 0;
536
537 err2_out:
538 kfree(smu_table->gpu_metrics_table);
539 err1_out:
540 kfree(smu_table->metrics_table);
541 err0_out:
542 return -ENOMEM;
543 }
544
545 static int smu_v13_0_7_allocate_dpm_context(struct smu_context *smu)
546 {
547 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
548
549 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
550 GFP_KERNEL);
551 if (!smu_dpm->dpm_context)
552 return -ENOMEM;
553
554 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
555
556 return 0;
557 }
558
559 static int smu_v13_0_7_init_smc_tables(struct smu_context *smu)
560 {
561 int ret = 0;
562
563 ret = smu_v13_0_7_tables_init(smu);
564 if (ret)
565 return ret;
566
567 ret = smu_v13_0_7_allocate_dpm_context(smu);
568 if (ret)
569 return ret;
570
571 return smu_v13_0_init_smc_tables(smu);
572 }
573
574 static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
575 {
576 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
577 PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
578 SkuTable_t *skutable = &driver_ppt->SkuTable;
579 struct smu_13_0_dpm_table *dpm_table;
580 struct smu_13_0_pcie_table *pcie_table;
581 uint32_t link_level;
582 int ret = 0;
583
584 /* socclk dpm table setup */
585 dpm_table = &dpm_context->dpm_tables.soc_table;
586 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
587 ret = smu_v13_0_set_single_dpm_table(smu,
588 SMU_SOCCLK,
589 dpm_table);
590 if (ret)
591 return ret;
592 } else {
593 dpm_table->count = 1;
594 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
595 dpm_table->dpm_levels[0].enabled = true;
596 dpm_table->min = dpm_table->dpm_levels[0].value;
597 dpm_table->max = dpm_table->dpm_levels[0].value;
598 }
599
600 /* gfxclk dpm table setup */
601 dpm_table = &dpm_context->dpm_tables.gfx_table;
602 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
603 ret = smu_v13_0_set_single_dpm_table(smu,
604 SMU_GFXCLK,
605 dpm_table);
606 if (ret)
607 return ret;
608
609 if (skutable->DriverReportedClocks.GameClockAc &&
610 (dpm_table->dpm_levels[dpm_table->count - 1].value >
611 skutable->DriverReportedClocks.GameClockAc)) {
612 dpm_table->dpm_levels[dpm_table->count - 1].value =
613 skutable->DriverReportedClocks.GameClockAc;
614 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
615 }
616 } else {
617 dpm_table->count = 1;
618 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
619 dpm_table->dpm_levels[0].enabled = true;
620 dpm_table->min = dpm_table->dpm_levels[0].value;
621 dpm_table->max = dpm_table->dpm_levels[0].value;
622 }
623
624 /* uclk dpm table setup */
625 dpm_table = &dpm_context->dpm_tables.uclk_table;
626 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
627 ret = smu_v13_0_set_single_dpm_table(smu,
628 SMU_UCLK,
629 dpm_table);
630 if (ret)
631 return ret;
632 } else {
633 dpm_table->count = 1;
634 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
635 dpm_table->dpm_levels[0].enabled = true;
636 dpm_table->min = dpm_table->dpm_levels[0].value;
637 dpm_table->max = dpm_table->dpm_levels[0].value;
638 }
639
640 /* fclk dpm table setup */
641 dpm_table = &dpm_context->dpm_tables.fclk_table;
642 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
643 ret = smu_v13_0_set_single_dpm_table(smu,
644 SMU_FCLK,
645 dpm_table);
646 if (ret)
647 return ret;
648 } else {
649 dpm_table->count = 1;
650 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
651 dpm_table->dpm_levels[0].enabled = true;
652 dpm_table->min = dpm_table->dpm_levels[0].value;
653 dpm_table->max = dpm_table->dpm_levels[0].value;
654 }
655
656 /* vclk dpm table setup */
657 dpm_table = &dpm_context->dpm_tables.vclk_table;
658 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
659 ret = smu_v13_0_set_single_dpm_table(smu,
660 SMU_VCLK,
661 dpm_table);
662 if (ret)
663 return ret;
664 } else {
665 dpm_table->count = 1;
666 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
667 dpm_table->dpm_levels[0].enabled = true;
668 dpm_table->min = dpm_table->dpm_levels[0].value;
669 dpm_table->max = dpm_table->dpm_levels[0].value;
670 }
671
672 /* dclk dpm table setup */
673 dpm_table = &dpm_context->dpm_tables.dclk_table;
674 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
675 ret = smu_v13_0_set_single_dpm_table(smu,
676 SMU_DCLK,
677 dpm_table);
678 if (ret)
679 return ret;
680 } else {
681 dpm_table->count = 1;
682 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
683 dpm_table->dpm_levels[0].enabled = true;
684 dpm_table->min = dpm_table->dpm_levels[0].value;
685 dpm_table->max = dpm_table->dpm_levels[0].value;
686 }
687
688 /* lclk dpm table setup */
689 pcie_table = &dpm_context->dpm_tables.pcie_table;
690 pcie_table->num_of_link_levels = 0;
691 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
692 if (!skutable->PcieGenSpeed[link_level] &&
693 !skutable->PcieLaneCount[link_level] &&
694 !skutable->LclkFreq[link_level])
695 continue;
696
697 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
698 skutable->PcieGenSpeed[link_level];
699 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
700 skutable->PcieLaneCount[link_level];
701 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
702 skutable->LclkFreq[link_level];
703 pcie_table->num_of_link_levels++;
704 }
705
706 /* dcefclk dpm table setup */
707 dpm_table = &dpm_context->dpm_tables.dcef_table;
708 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
709 ret = smu_v13_0_set_single_dpm_table(smu,
710 SMU_DCEFCLK,
711 dpm_table);
712 if (ret)
713 return ret;
714 } else {
715 dpm_table->count = 1;
716 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
717 dpm_table->dpm_levels[0].enabled = true;
718 dpm_table->min = dpm_table->dpm_levels[0].value;
719 dpm_table->max = dpm_table->dpm_levels[0].value;
720 }
721
722 return 0;
723 }
724
725 static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu)
726 {
727 int ret = 0;
728 uint64_t feature_enabled;
729
730 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
731 if (ret)
732 return false;
733
734 return !!(feature_enabled & SMC_DPM_FEATURE);
735 }
736
737 static void smu_v13_0_7_dump_pptable(struct smu_context *smu)
738 {
739 struct smu_table_context *table_context = &smu->smu_table;
740 PPTable_t *pptable = table_context->driver_pptable;
741 SkuTable_t *skutable = &pptable->SkuTable;
742
743 dev_info(smu->adev->dev, "Dumped PPTable:\n");
744
745 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
746 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
747 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
748 }
749
750 static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics)
751 {
752 uint32_t throttler_status = 0;
753 int i;
754
755 for (i = 0; i < THROTTLER_COUNT; i++)
756 throttler_status |=
757 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
758
759 return throttler_status;
760 }
761
762 #define SMU_13_0_7_BUSY_THRESHOLD 15
763 static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
764 MetricsMember_t member,
765 uint32_t *value)
766 {
767 struct smu_table_context *smu_table = &smu->smu_table;
768 SmuMetrics_t *metrics =
769 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
770 int ret = 0;
771
772 ret = smu_cmn_get_metrics_table(smu,
773 NULL,
774 false);
775 if (ret)
776 return ret;
777
778 switch (member) {
779 case METRICS_CURR_GFXCLK:
780 *value = metrics->CurrClock[PPCLK_GFXCLK];
781 break;
782 case METRICS_CURR_SOCCLK:
783 *value = metrics->CurrClock[PPCLK_SOCCLK];
784 break;
785 case METRICS_CURR_UCLK:
786 *value = metrics->CurrClock[PPCLK_UCLK];
787 break;
788 case METRICS_CURR_VCLK:
789 *value = metrics->CurrClock[PPCLK_VCLK_0];
790 break;
791 case METRICS_CURR_VCLK1:
792 *value = metrics->CurrClock[PPCLK_VCLK_1];
793 break;
794 case METRICS_CURR_DCLK:
795 *value = metrics->CurrClock[PPCLK_DCLK_0];
796 break;
797 case METRICS_CURR_DCLK1:
798 *value = metrics->CurrClock[PPCLK_DCLK_1];
799 break;
800 case METRICS_CURR_FCLK:
801 *value = metrics->CurrClock[PPCLK_FCLK];
802 break;
803 case METRICS_CURR_DCEFCLK:
804 *value = metrics->CurrClock[PPCLK_DCFCLK];
805 break;
806 case METRICS_AVERAGE_GFXCLK:
807 *value = metrics->AverageGfxclkFrequencyPreDs;
808 break;
809 case METRICS_AVERAGE_FCLK:
810 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
811 *value = metrics->AverageFclkFrequencyPostDs;
812 else
813 *value = metrics->AverageFclkFrequencyPreDs;
814 break;
815 case METRICS_AVERAGE_UCLK:
816 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
817 *value = metrics->AverageMemclkFrequencyPostDs;
818 else
819 *value = metrics->AverageMemclkFrequencyPreDs;
820 break;
821 case METRICS_AVERAGE_VCLK:
822 *value = metrics->AverageVclk0Frequency;
823 break;
824 case METRICS_AVERAGE_DCLK:
825 *value = metrics->AverageDclk0Frequency;
826 break;
827 case METRICS_AVERAGE_VCLK1:
828 *value = metrics->AverageVclk1Frequency;
829 break;
830 case METRICS_AVERAGE_DCLK1:
831 *value = metrics->AverageDclk1Frequency;
832 break;
833 case METRICS_AVERAGE_GFXACTIVITY:
834 *value = metrics->AverageGfxActivity;
835 break;
836 case METRICS_AVERAGE_MEMACTIVITY:
837 *value = metrics->AverageUclkActivity;
838 break;
839 case METRICS_AVERAGE_SOCKETPOWER:
840 *value = metrics->AverageSocketPower << 8;
841 break;
842 case METRICS_TEMPERATURE_EDGE:
843 *value = metrics->AvgTemperature[TEMP_EDGE] *
844 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
845 break;
846 case METRICS_TEMPERATURE_HOTSPOT:
847 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
848 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
849 break;
850 case METRICS_TEMPERATURE_MEM:
851 *value = metrics->AvgTemperature[TEMP_MEM] *
852 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
853 break;
854 case METRICS_TEMPERATURE_VRGFX:
855 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
856 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
857 break;
858 case METRICS_TEMPERATURE_VRSOC:
859 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
860 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
861 break;
862 case METRICS_THROTTLER_STATUS:
863 *value = smu_v13_0_7_get_throttler_status(metrics);
864 break;
865 case METRICS_CURR_FANSPEED:
866 *value = metrics->AvgFanRpm;
867 break;
868 case METRICS_CURR_FANPWM:
869 *value = metrics->AvgFanPwm;
870 break;
871 case METRICS_VOLTAGE_VDDGFX:
872 *value = metrics->AvgVoltage[SVI_PLANE_GFX];
873 break;
874 case METRICS_PCIE_RATE:
875 *value = metrics->PcieRate;
876 break;
877 case METRICS_PCIE_WIDTH:
878 *value = metrics->PcieWidth;
879 break;
880 default:
881 *value = UINT_MAX;
882 break;
883 }
884
885 return ret;
886 }
887
888 static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
889 enum smu_clk_type clk_type,
890 uint32_t *min,
891 uint32_t *max)
892 {
893 struct smu_13_0_dpm_context *dpm_context =
894 smu->smu_dpm.dpm_context;
895 struct smu_13_0_dpm_table *dpm_table;
896
897 switch (clk_type) {
898 case SMU_MCLK:
899 case SMU_UCLK:
900 /* uclk dpm table */
901 dpm_table = &dpm_context->dpm_tables.uclk_table;
902 break;
903 case SMU_GFXCLK:
904 case SMU_SCLK:
905 /* gfxclk dpm table */
906 dpm_table = &dpm_context->dpm_tables.gfx_table;
907 break;
908 case SMU_SOCCLK:
909 /* socclk dpm table */
910 dpm_table = &dpm_context->dpm_tables.soc_table;
911 break;
912 case SMU_FCLK:
913 /* fclk dpm table */
914 dpm_table = &dpm_context->dpm_tables.fclk_table;
915 break;
916 case SMU_VCLK:
917 case SMU_VCLK1:
918 /* vclk dpm table */
919 dpm_table = &dpm_context->dpm_tables.vclk_table;
920 break;
921 case SMU_DCLK:
922 case SMU_DCLK1:
923 /* dclk dpm table */
924 dpm_table = &dpm_context->dpm_tables.dclk_table;
925 break;
926 default:
927 dev_err(smu->adev->dev, "Unsupported clock type!\n");
928 return -EINVAL;
929 }
930
931 if (min)
932 *min = dpm_table->min;
933 if (max)
934 *max = dpm_table->max;
935
936 return 0;
937 }
938
939 static int smu_v13_0_7_read_sensor(struct smu_context *smu,
940 enum amd_pp_sensors sensor,
941 void *data,
942 uint32_t *size)
943 {
944 struct smu_table_context *table_context = &smu->smu_table;
945 PPTable_t *smc_pptable = table_context->driver_pptable;
946 int ret = 0;
947
948 switch (sensor) {
949 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
950 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm;
951 *size = 4;
952 break;
953 case AMDGPU_PP_SENSOR_MEM_LOAD:
954 ret = smu_v13_0_7_get_smu_metrics_data(smu,
955 METRICS_AVERAGE_MEMACTIVITY,
956 (uint32_t *)data);
957 *size = 4;
958 break;
959 case AMDGPU_PP_SENSOR_GPU_LOAD:
960 ret = smu_v13_0_7_get_smu_metrics_data(smu,
961 METRICS_AVERAGE_GFXACTIVITY,
962 (uint32_t *)data);
963 *size = 4;
964 break;
965 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
966 ret = smu_v13_0_7_get_smu_metrics_data(smu,
967 METRICS_AVERAGE_SOCKETPOWER,
968 (uint32_t *)data);
969 *size = 4;
970 break;
971 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
972 ret = smu_v13_0_7_get_smu_metrics_data(smu,
973 METRICS_TEMPERATURE_HOTSPOT,
974 (uint32_t *)data);
975 *size = 4;
976 break;
977 case AMDGPU_PP_SENSOR_EDGE_TEMP:
978 ret = smu_v13_0_7_get_smu_metrics_data(smu,
979 METRICS_TEMPERATURE_EDGE,
980 (uint32_t *)data);
981 *size = 4;
982 break;
983 case AMDGPU_PP_SENSOR_MEM_TEMP:
984 ret = smu_v13_0_7_get_smu_metrics_data(smu,
985 METRICS_TEMPERATURE_MEM,
986 (uint32_t *)data);
987 *size = 4;
988 break;
989 case AMDGPU_PP_SENSOR_GFX_MCLK:
990 ret = smu_v13_0_7_get_smu_metrics_data(smu,
991 METRICS_CURR_UCLK,
992 (uint32_t *)data);
993 *(uint32_t *)data *= 100;
994 *size = 4;
995 break;
996 case AMDGPU_PP_SENSOR_GFX_SCLK:
997 ret = smu_v13_0_7_get_smu_metrics_data(smu,
998 METRICS_AVERAGE_GFXCLK,
999 (uint32_t *)data);
1000 *(uint32_t *)data *= 100;
1001 *size = 4;
1002 break;
1003 case AMDGPU_PP_SENSOR_VDDGFX:
1004 ret = smu_v13_0_7_get_smu_metrics_data(smu,
1005 METRICS_VOLTAGE_VDDGFX,
1006 (uint32_t *)data);
1007 *size = 4;
1008 break;
1009 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
1010 default:
1011 ret = -EOPNOTSUPP;
1012 break;
1013 }
1014
1015 return ret;
1016 }
1017
1018 static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu,
1019 enum smu_clk_type clk_type,
1020 uint32_t *value)
1021 {
1022 MetricsMember_t member_type;
1023 int clk_id = 0;
1024
1025 clk_id = smu_cmn_to_asic_specific_index(smu,
1026 CMN2ASIC_MAPPING_CLK,
1027 clk_type);
1028 if (clk_id < 0)
1029 return -EINVAL;
1030
1031 switch (clk_id) {
1032 case PPCLK_GFXCLK:
1033 member_type = METRICS_AVERAGE_GFXCLK;
1034 break;
1035 case PPCLK_UCLK:
1036 member_type = METRICS_CURR_UCLK;
1037 break;
1038 case PPCLK_FCLK:
1039 member_type = METRICS_CURR_FCLK;
1040 break;
1041 case PPCLK_SOCCLK:
1042 member_type = METRICS_CURR_SOCCLK;
1043 break;
1044 case PPCLK_VCLK_0:
1045 member_type = METRICS_CURR_VCLK;
1046 break;
1047 case PPCLK_DCLK_0:
1048 member_type = METRICS_CURR_DCLK;
1049 break;
1050 case PPCLK_VCLK_1:
1051 member_type = METRICS_CURR_VCLK1;
1052 break;
1053 case PPCLK_DCLK_1:
1054 member_type = METRICS_CURR_DCLK1;
1055 break;
1056 case PPCLK_DCFCLK:
1057 member_type = METRICS_CURR_DCEFCLK;
1058 break;
1059 default:
1060 return -EINVAL;
1061 }
1062
1063 return smu_v13_0_7_get_smu_metrics_data(smu,
1064 member_type,
1065 value);
1066 }
1067
1068 static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu,
1069 int od_feature_bit)
1070 {
1071 PPTable_t *pptable = smu->smu_table.driver_pptable;
1072 const OverDriveLimits_t * const overdrive_upperlimits =
1073 &pptable->SkuTable.OverDriveLimitsBasicMax;
1074
1075 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
1076 }
1077
1078 static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
1079 int od_feature_bit,
1080 int32_t *min,
1081 int32_t *max)
1082 {
1083 PPTable_t *pptable = smu->smu_table.driver_pptable;
1084 const OverDriveLimits_t * const overdrive_upperlimits =
1085 &pptable->SkuTable.OverDriveLimitsBasicMax;
1086 const OverDriveLimits_t * const overdrive_lowerlimits =
1087 &pptable->SkuTable.OverDriveLimitsMin;
1088 int32_t od_min_setting, od_max_setting;
1089
1090 switch (od_feature_bit) {
1091 case PP_OD_FEATURE_GFXCLK_FMIN:
1092 od_min_setting = overdrive_lowerlimits->GfxclkFmin;
1093 od_max_setting = overdrive_upperlimits->GfxclkFmin;
1094 break;
1095 case PP_OD_FEATURE_GFXCLK_FMAX:
1096 od_min_setting = overdrive_lowerlimits->GfxclkFmax;
1097 od_max_setting = overdrive_upperlimits->GfxclkFmax;
1098 break;
1099 case PP_OD_FEATURE_UCLK_FMIN:
1100 od_min_setting = overdrive_lowerlimits->UclkFmin;
1101 od_max_setting = overdrive_upperlimits->UclkFmin;
1102 break;
1103 case PP_OD_FEATURE_UCLK_FMAX:
1104 od_min_setting = overdrive_lowerlimits->UclkFmax;
1105 od_max_setting = overdrive_upperlimits->UclkFmax;
1106 break;
1107 case PP_OD_FEATURE_GFX_VF_CURVE:
1108 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
1109 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
1110 break;
1111 case PP_OD_FEATURE_FAN_CURVE_TEMP:
1112 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints;
1113 od_max_setting = overdrive_upperlimits->FanLinearTempPoints;
1114 break;
1115 case PP_OD_FEATURE_FAN_CURVE_PWM:
1116 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints;
1117 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints;
1118 break;
1119 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
1120 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
1121 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
1122 break;
1123 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
1124 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
1125 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
1126 break;
1127 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
1128 od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
1129 od_max_setting = overdrive_upperlimits->FanTargetTemperature;
1130 break;
1131 case PP_OD_FEATURE_FAN_MINIMUM_PWM:
1132 od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
1133 od_max_setting = overdrive_upperlimits->FanMinimumPwm;
1134 break;
1135 default:
1136 od_min_setting = od_max_setting = INT_MAX;
1137 break;
1138 }
1139
1140 if (min)
1141 *min = od_min_setting;
1142 if (max)
1143 *max = od_max_setting;
1144 }
1145
1146 static void smu_v13_0_7_dump_od_table(struct smu_context *smu,
1147 OverDriveTableExternal_t *od_table)
1148 {
1149 struct amdgpu_device *adev = smu->adev;
1150
1151 dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
1152 od_table->OverDriveTable.GfxclkFmax);
1153 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
1154 od_table->OverDriveTable.UclkFmax);
1155 }
1156
1157 static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu,
1158 OverDriveTableExternal_t *od_table)
1159 {
1160 int ret = 0;
1161
1162 ret = smu_cmn_update_table(smu,
1163 SMU_TABLE_OVERDRIVE,
1164 0,
1165 (void *)od_table,
1166 false);
1167 if (ret)
1168 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
1169
1170 return ret;
1171 }
1172
1173 static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu,
1174 OverDriveTableExternal_t *od_table)
1175 {
1176 int ret = 0;
1177
1178 ret = smu_cmn_update_table(smu,
1179 SMU_TABLE_OVERDRIVE,
1180 0,
1181 (void *)od_table,
1182 true);
1183 if (ret)
1184 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
1185
1186 return ret;
1187 }
1188
1189 static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
1190 enum smu_clk_type clk_type,
1191 char *buf)
1192 {
1193 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1194 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1195 OverDriveTableExternal_t *od_table =
1196 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1197 struct smu_13_0_dpm_table *single_dpm_table;
1198 struct smu_13_0_pcie_table *pcie_table;
1199 uint32_t gen_speed, lane_width;
1200 int i, curr_freq, size = 0;
1201 int32_t min_value, max_value;
1202 int ret = 0;
1203
1204 smu_cmn_get_sysfs_buf(&buf, &size);
1205
1206 if (amdgpu_ras_intr_triggered()) {
1207 size += sysfs_emit_at(buf, size, "unavailable\n");
1208 return size;
1209 }
1210
1211 switch (clk_type) {
1212 case SMU_SCLK:
1213 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1214 break;
1215 case SMU_MCLK:
1216 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1217 break;
1218 case SMU_SOCCLK:
1219 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1220 break;
1221 case SMU_FCLK:
1222 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1223 break;
1224 case SMU_VCLK:
1225 case SMU_VCLK1:
1226 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1227 break;
1228 case SMU_DCLK:
1229 case SMU_DCLK1:
1230 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1231 break;
1232 case SMU_DCEFCLK:
1233 single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
1234 break;
1235 default:
1236 break;
1237 }
1238
1239 switch (clk_type) {
1240 case SMU_SCLK:
1241 case SMU_MCLK:
1242 case SMU_SOCCLK:
1243 case SMU_FCLK:
1244 case SMU_VCLK:
1245 case SMU_VCLK1:
1246 case SMU_DCLK:
1247 case SMU_DCLK1:
1248 case SMU_DCEFCLK:
1249 ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1250 if (ret) {
1251 dev_err(smu->adev->dev, "Failed to get current clock freq!");
1252 return ret;
1253 }
1254
1255 if (single_dpm_table->is_fine_grained) {
1256 /*
1257 * For fine grained dpms, there are only two dpm levels:
1258 * - level 0 -> min clock freq
1259 * - level 1 -> max clock freq
1260 * And the current clock frequency can be any value between them.
1261 * So, if the current clock frequency is not at level 0 or level 1,
1262 * we will fake it as three dpm levels:
1263 * - level 0 -> min clock freq
1264 * - level 1 -> current actual clock freq
1265 * - level 2 -> max clock freq
1266 */
1267 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1268 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1269 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1270 single_dpm_table->dpm_levels[0].value);
1271 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1272 curr_freq);
1273 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1274 single_dpm_table->dpm_levels[1].value);
1275 } else {
1276 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1277 single_dpm_table->dpm_levels[0].value,
1278 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1279 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1280 single_dpm_table->dpm_levels[1].value,
1281 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1282 }
1283 } else {
1284 for (i = 0; i < single_dpm_table->count; i++)
1285 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1286 i, single_dpm_table->dpm_levels[i].value,
1287 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1288 }
1289 break;
1290 case SMU_PCIE:
1291 ret = smu_v13_0_7_get_smu_metrics_data(smu,
1292 METRICS_PCIE_RATE,
1293 &gen_speed);
1294 if (ret)
1295 return ret;
1296
1297 ret = smu_v13_0_7_get_smu_metrics_data(smu,
1298 METRICS_PCIE_WIDTH,
1299 &lane_width);
1300 if (ret)
1301 return ret;
1302
1303 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1304 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1305 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1306 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1307 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1308 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1309 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
1310 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1311 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1312 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1313 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1314 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1315 (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
1316 pcie_table->clk_freq[i],
1317 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1318 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1319 "*" : "");
1320 break;
1321
1322 case SMU_OD_SCLK:
1323 if (!smu_v13_0_7_is_od_feature_supported(smu,
1324 PP_OD_FEATURE_GFXCLK_BIT))
1325 break;
1326
1327 size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
1328 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
1329 od_table->OverDriveTable.GfxclkFmin,
1330 od_table->OverDriveTable.GfxclkFmax);
1331 break;
1332
1333 case SMU_OD_MCLK:
1334 if (!smu_v13_0_7_is_od_feature_supported(smu,
1335 PP_OD_FEATURE_UCLK_BIT))
1336 break;
1337
1338 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1339 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1340 od_table->OverDriveTable.UclkFmin,
1341 od_table->OverDriveTable.UclkFmax);
1342 break;
1343
1344 case SMU_OD_VDDGFX_OFFSET:
1345 if (!smu_v13_0_7_is_od_feature_supported(smu,
1346 PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1347 break;
1348
1349 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
1350 size += sysfs_emit_at(buf, size, "%dmV\n",
1351 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
1352 break;
1353
1354 case SMU_OD_FAN_CURVE:
1355 if (!smu_v13_0_7_is_od_feature_supported(smu,
1356 PP_OD_FEATURE_FAN_CURVE_BIT))
1357 break;
1358
1359 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
1360 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
1361 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
1362 i,
1363 (int)od_table->OverDriveTable.FanLinearTempPoints[i],
1364 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
1365
1366 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1367 smu_v13_0_7_get_od_setting_limits(smu,
1368 PP_OD_FEATURE_FAN_CURVE_TEMP,
1369 &min_value,
1370 &max_value);
1371 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
1372 min_value, max_value);
1373
1374 smu_v13_0_7_get_od_setting_limits(smu,
1375 PP_OD_FEATURE_FAN_CURVE_PWM,
1376 &min_value,
1377 &max_value);
1378 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
1379 min_value, max_value);
1380
1381 break;
1382
1383 case SMU_OD_ACOUSTIC_LIMIT:
1384 if (!smu_v13_0_7_is_od_feature_supported(smu,
1385 PP_OD_FEATURE_FAN_CURVE_BIT))
1386 break;
1387
1388 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
1389 size += sysfs_emit_at(buf, size, "%d\n",
1390 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
1391
1392 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1393 smu_v13_0_7_get_od_setting_limits(smu,
1394 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1395 &min_value,
1396 &max_value);
1397 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
1398 min_value, max_value);
1399 break;
1400
1401 case SMU_OD_ACOUSTIC_TARGET:
1402 if (!smu_v13_0_7_is_od_feature_supported(smu,
1403 PP_OD_FEATURE_FAN_CURVE_BIT))
1404 break;
1405
1406 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
1407 size += sysfs_emit_at(buf, size, "%d\n",
1408 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
1409
1410 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1411 smu_v13_0_7_get_od_setting_limits(smu,
1412 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1413 &min_value,
1414 &max_value);
1415 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
1416 min_value, max_value);
1417 break;
1418
1419 case SMU_OD_FAN_TARGET_TEMPERATURE:
1420 if (!smu_v13_0_7_is_od_feature_supported(smu,
1421 PP_OD_FEATURE_FAN_CURVE_BIT))
1422 break;
1423
1424 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
1425 size += sysfs_emit_at(buf, size, "%d\n",
1426 (int)od_table->OverDriveTable.FanTargetTemperature);
1427
1428 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1429 smu_v13_0_7_get_od_setting_limits(smu,
1430 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1431 &min_value,
1432 &max_value);
1433 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
1434 min_value, max_value);
1435 break;
1436
1437 case SMU_OD_FAN_MINIMUM_PWM:
1438 if (!smu_v13_0_7_is_od_feature_supported(smu,
1439 PP_OD_FEATURE_FAN_CURVE_BIT))
1440 break;
1441
1442 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
1443 size += sysfs_emit_at(buf, size, "%d\n",
1444 (int)od_table->OverDriveTable.FanMinimumPwm);
1445
1446 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1447 smu_v13_0_7_get_od_setting_limits(smu,
1448 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1449 &min_value,
1450 &max_value);
1451 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
1452 min_value, max_value);
1453 break;
1454
1455 case SMU_OD_RANGE:
1456 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1457 !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1458 !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1459 break;
1460
1461 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1462
1463 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1464 smu_v13_0_7_get_od_setting_limits(smu,
1465 PP_OD_FEATURE_GFXCLK_FMIN,
1466 &min_value,
1467 NULL);
1468 smu_v13_0_7_get_od_setting_limits(smu,
1469 PP_OD_FEATURE_GFXCLK_FMAX,
1470 NULL,
1471 &max_value);
1472 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
1473 min_value, max_value);
1474 }
1475
1476 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1477 smu_v13_0_7_get_od_setting_limits(smu,
1478 PP_OD_FEATURE_UCLK_FMIN,
1479 &min_value,
1480 NULL);
1481 smu_v13_0_7_get_od_setting_limits(smu,
1482 PP_OD_FEATURE_UCLK_FMAX,
1483 NULL,
1484 &max_value);
1485 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1486 min_value, max_value);
1487 }
1488
1489 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1490 smu_v13_0_7_get_od_setting_limits(smu,
1491 PP_OD_FEATURE_GFX_VF_CURVE,
1492 &min_value,
1493 &max_value);
1494 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
1495 min_value, max_value);
1496 }
1497 break;
1498
1499 default:
1500 break;
1501 }
1502
1503 return size;
1504 }
1505
1506 static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
1507 {
1508 struct smu_table_context *table_context = &smu->smu_table;
1509 OverDriveTableExternal_t *boot_overdrive_table =
1510 (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
1511 OverDriveTableExternal_t *od_table =
1512 (OverDriveTableExternal_t *)table_context->overdrive_table;
1513 struct amdgpu_device *adev = smu->adev;
1514 int i;
1515
1516 switch (input) {
1517 case PP_OD_EDIT_FAN_CURVE:
1518 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
1519 od_table->OverDriveTable.FanLinearTempPoints[i] =
1520 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
1521 od_table->OverDriveTable.FanLinearPwmPoints[i] =
1522 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
1523 }
1524 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1525 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1526 break;
1527 case PP_OD_EDIT_ACOUSTIC_LIMIT:
1528 od_table->OverDriveTable.AcousticLimitRpmThreshold =
1529 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
1530 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1531 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1532 break;
1533 case PP_OD_EDIT_ACOUSTIC_TARGET:
1534 od_table->OverDriveTable.AcousticTargetRpmThreshold =
1535 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
1536 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1537 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1538 break;
1539 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
1540 od_table->OverDriveTable.FanTargetTemperature =
1541 boot_overdrive_table->OverDriveTable.FanTargetTemperature;
1542 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1543 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1544 break;
1545 case PP_OD_EDIT_FAN_MINIMUM_PWM:
1546 od_table->OverDriveTable.FanMinimumPwm =
1547 boot_overdrive_table->OverDriveTable.FanMinimumPwm;
1548 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1549 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1550 break;
1551 default:
1552 dev_info(adev->dev, "Invalid table index: %ld\n", input);
1553 return -EINVAL;
1554 }
1555
1556 return 0;
1557 }
1558
1559 static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
1560 enum PP_OD_DPM_TABLE_COMMAND type,
1561 long input[],
1562 uint32_t size)
1563 {
1564 struct smu_table_context *table_context = &smu->smu_table;
1565 OverDriveTableExternal_t *od_table =
1566 (OverDriveTableExternal_t *)table_context->overdrive_table;
1567 struct amdgpu_device *adev = smu->adev;
1568 uint32_t offset_of_voltageoffset;
1569 int32_t minimum, maximum;
1570 uint32_t feature_ctrlmask;
1571 int i, ret = 0;
1572
1573 switch (type) {
1574 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1575 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1576 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
1577 return -ENOTSUPP;
1578 }
1579
1580 for (i = 0; i < size; i += 2) {
1581 if (i + 2 > size) {
1582 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
1583 return -EINVAL;
1584 }
1585
1586 switch (input[i]) {
1587 case 0:
1588 smu_v13_0_7_get_od_setting_limits(smu,
1589 PP_OD_FEATURE_GFXCLK_FMIN,
1590 &minimum,
1591 &maximum);
1592 if (input[i + 1] < minimum ||
1593 input[i + 1] > maximum) {
1594 dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
1595 input[i + 1], minimum, maximum);
1596 return -EINVAL;
1597 }
1598
1599 od_table->OverDriveTable.GfxclkFmin = input[i + 1];
1600 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
1601 break;
1602
1603 case 1:
1604 smu_v13_0_7_get_od_setting_limits(smu,
1605 PP_OD_FEATURE_GFXCLK_FMAX,
1606 &minimum,
1607 &maximum);
1608 if (input[i + 1] < minimum ||
1609 input[i + 1] > maximum) {
1610 dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
1611 input[i + 1], minimum, maximum);
1612 return -EINVAL;
1613 }
1614
1615 od_table->OverDriveTable.GfxclkFmax = input[i + 1];
1616 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
1617 break;
1618
1619 default:
1620 dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
1621 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
1622 return -EINVAL;
1623 }
1624 }
1625
1626 if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
1627 dev_err(adev->dev,
1628 "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
1629 (uint32_t)od_table->OverDriveTable.GfxclkFmin,
1630 (uint32_t)od_table->OverDriveTable.GfxclkFmax);
1631 return -EINVAL;
1632 }
1633 break;
1634
1635 case PP_OD_EDIT_MCLK_VDDC_TABLE:
1636 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1637 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
1638 return -ENOTSUPP;
1639 }
1640
1641 for (i = 0; i < size; i += 2) {
1642 if (i + 2 > size) {
1643 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
1644 return -EINVAL;
1645 }
1646
1647 switch (input[i]) {
1648 case 0:
1649 smu_v13_0_7_get_od_setting_limits(smu,
1650 PP_OD_FEATURE_UCLK_FMIN,
1651 &minimum,
1652 &maximum);
1653 if (input[i + 1] < minimum ||
1654 input[i + 1] > maximum) {
1655 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
1656 input[i + 1], minimum, maximum);
1657 return -EINVAL;
1658 }
1659
1660 od_table->OverDriveTable.UclkFmin = input[i + 1];
1661 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
1662 break;
1663
1664 case 1:
1665 smu_v13_0_7_get_od_setting_limits(smu,
1666 PP_OD_FEATURE_UCLK_FMAX,
1667 &minimum,
1668 &maximum);
1669 if (input[i + 1] < minimum ||
1670 input[i + 1] > maximum) {
1671 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
1672 input[i + 1], minimum, maximum);
1673 return -EINVAL;
1674 }
1675
1676 od_table->OverDriveTable.UclkFmax = input[i + 1];
1677 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
1678 break;
1679
1680 default:
1681 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
1682 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
1683 return -EINVAL;
1684 }
1685 }
1686
1687 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
1688 dev_err(adev->dev,
1689 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
1690 (uint32_t)od_table->OverDriveTable.UclkFmin,
1691 (uint32_t)od_table->OverDriveTable.UclkFmax);
1692 return -EINVAL;
1693 }
1694 break;
1695
1696 case PP_OD_EDIT_VDDGFX_OFFSET:
1697 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1698 dev_warn(adev->dev, "Gfx offset setting not supported!\n");
1699 return -ENOTSUPP;
1700 }
1701
1702 smu_v13_0_7_get_od_setting_limits(smu,
1703 PP_OD_FEATURE_GFX_VF_CURVE,
1704 &minimum,
1705 &maximum);
1706 if (input[0] < minimum ||
1707 input[0] > maximum) {
1708 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
1709 input[0], minimum, maximum);
1710 return -EINVAL;
1711 }
1712
1713 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
1714 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
1715 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
1716 break;
1717
1718 case PP_OD_EDIT_FAN_CURVE:
1719 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1720 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1721 return -ENOTSUPP;
1722 }
1723
1724 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
1725 input[0] < 0)
1726 return -EINVAL;
1727
1728 smu_v13_0_7_get_od_setting_limits(smu,
1729 PP_OD_FEATURE_FAN_CURVE_TEMP,
1730 &minimum,
1731 &maximum);
1732 if (input[1] < minimum ||
1733 input[1] > maximum) {
1734 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
1735 input[1], minimum, maximum);
1736 return -EINVAL;
1737 }
1738
1739 smu_v13_0_7_get_od_setting_limits(smu,
1740 PP_OD_FEATURE_FAN_CURVE_PWM,
1741 &minimum,
1742 &maximum);
1743 if (input[2] < minimum ||
1744 input[2] > maximum) {
1745 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
1746 input[2], minimum, maximum);
1747 return -EINVAL;
1748 }
1749
1750 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
1751 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
1752 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
1753 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1754 break;
1755
1756 case PP_OD_EDIT_ACOUSTIC_LIMIT:
1757 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1758 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1759 return -ENOTSUPP;
1760 }
1761
1762 smu_v13_0_7_get_od_setting_limits(smu,
1763 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1764 &minimum,
1765 &maximum);
1766 if (input[0] < minimum ||
1767 input[0] > maximum) {
1768 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
1769 input[0], minimum, maximum);
1770 return -EINVAL;
1771 }
1772
1773 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
1774 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1775 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1776 break;
1777
1778 case PP_OD_EDIT_ACOUSTIC_TARGET:
1779 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1780 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1781 return -ENOTSUPP;
1782 }
1783
1784 smu_v13_0_7_get_od_setting_limits(smu,
1785 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1786 &minimum,
1787 &maximum);
1788 if (input[0] < minimum ||
1789 input[0] > maximum) {
1790 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
1791 input[0], minimum, maximum);
1792 return -EINVAL;
1793 }
1794
1795 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
1796 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1797 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1798 break;
1799
1800 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
1801 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1802 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1803 return -ENOTSUPP;
1804 }
1805
1806 smu_v13_0_7_get_od_setting_limits(smu,
1807 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1808 &minimum,
1809 &maximum);
1810 if (input[0] < minimum ||
1811 input[0] > maximum) {
1812 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
1813 input[0], minimum, maximum);
1814 return -EINVAL;
1815 }
1816
1817 od_table->OverDriveTable.FanTargetTemperature = input[0];
1818 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1819 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1820 break;
1821
1822 case PP_OD_EDIT_FAN_MINIMUM_PWM:
1823 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1824 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1825 return -ENOTSUPP;
1826 }
1827
1828 smu_v13_0_7_get_od_setting_limits(smu,
1829 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1830 &minimum,
1831 &maximum);
1832 if (input[0] < minimum ||
1833 input[0] > maximum) {
1834 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
1835 input[0], minimum, maximum);
1836 return -EINVAL;
1837 }
1838
1839 od_table->OverDriveTable.FanMinimumPwm = input[0];
1840 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1841 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1842 break;
1843
1844 case PP_OD_RESTORE_DEFAULT_TABLE:
1845 if (size == 1) {
1846 ret = smu_v13_0_7_od_restore_table_single(smu, input[0]);
1847 if (ret)
1848 return ret;
1849 } else {
1850 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
1851 memcpy(od_table,
1852 table_context->boot_overdrive_table,
1853 sizeof(OverDriveTableExternal_t));
1854 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
1855 }
1856 fallthrough;
1857
1858 case PP_OD_COMMIT_DPM_TABLE:
1859 /*
1860 * The member below instructs PMFW the settings focused in
1861 * this single operation.
1862 * `uint32_t FeatureCtrlMask;`
1863 * It does not contain actual informations about user's custom
1864 * settings. Thus we do not cache it.
1865 */
1866 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
1867 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
1868 table_context->user_overdrive_table + offset_of_voltageoffset,
1869 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
1870 smu_v13_0_7_dump_od_table(smu, od_table);
1871
1872 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
1873 if (ret) {
1874 dev_err(adev->dev, "Failed to upload overdrive table!\n");
1875 return ret;
1876 }
1877
1878 od_table->OverDriveTable.FeatureCtrlMask = 0;
1879 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
1880 (u8 *)od_table + offset_of_voltageoffset,
1881 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
1882
1883 if (!memcmp(table_context->user_overdrive_table,
1884 table_context->boot_overdrive_table,
1885 sizeof(OverDriveTableExternal_t)))
1886 smu->user_dpm_profile.user_od = false;
1887 else
1888 smu->user_dpm_profile.user_od = true;
1889 }
1890 break;
1891
1892 default:
1893 return -ENOSYS;
1894 }
1895
1896 return ret;
1897 }
1898
1899 static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
1900 enum smu_clk_type clk_type,
1901 uint32_t mask)
1902 {
1903 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1904 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1905 struct smu_13_0_dpm_table *single_dpm_table;
1906 uint32_t soft_min_level, soft_max_level;
1907 uint32_t min_freq, max_freq;
1908 int ret = 0;
1909
1910 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1911 soft_max_level = mask ? (fls(mask) - 1) : 0;
1912
1913 switch (clk_type) {
1914 case SMU_GFXCLK:
1915 case SMU_SCLK:
1916 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1917 break;
1918 case SMU_MCLK:
1919 case SMU_UCLK:
1920 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1921 break;
1922 case SMU_SOCCLK:
1923 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1924 break;
1925 case SMU_FCLK:
1926 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1927 break;
1928 case SMU_VCLK:
1929 case SMU_VCLK1:
1930 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1931 break;
1932 case SMU_DCLK:
1933 case SMU_DCLK1:
1934 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1935 break;
1936 default:
1937 break;
1938 }
1939
1940 switch (clk_type) {
1941 case SMU_GFXCLK:
1942 case SMU_SCLK:
1943 case SMU_MCLK:
1944 case SMU_UCLK:
1945 case SMU_SOCCLK:
1946 case SMU_FCLK:
1947 case SMU_VCLK:
1948 case SMU_VCLK1:
1949 case SMU_DCLK:
1950 case SMU_DCLK1:
1951 if (single_dpm_table->is_fine_grained) {
1952 /* There is only 2 levels for fine grained DPM */
1953 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1954 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1955 } else {
1956 if ((soft_max_level >= single_dpm_table->count) ||
1957 (soft_min_level >= single_dpm_table->count))
1958 return -EINVAL;
1959 }
1960
1961 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1962 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1963
1964 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1965 clk_type,
1966 min_freq,
1967 max_freq);
1968 break;
1969 case SMU_DCEFCLK:
1970 case SMU_PCIE:
1971 default:
1972 break;
1973 }
1974
1975 return ret;
1976 }
1977
1978 static const struct smu_temperature_range smu13_thermal_policy[] = {
1979 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1980 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1981 };
1982
1983 static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
1984 struct smu_temperature_range *range)
1985 {
1986 struct smu_table_context *table_context = &smu->smu_table;
1987 struct smu_13_0_7_powerplay_table *powerplay_table =
1988 table_context->power_play_table;
1989 PPTable_t *pptable = smu->smu_table.driver_pptable;
1990
1991 if (!range)
1992 return -EINVAL;
1993
1994 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
1995
1996 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
1997 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1998 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1999 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2000 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
2001 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2002 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
2003 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2004 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
2005 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2006 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
2007 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2008 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
2009 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
2010
2011 return 0;
2012 }
2013
2014 static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
2015 void **table)
2016 {
2017 struct smu_table_context *smu_table = &smu->smu_table;
2018 struct gpu_metrics_v1_3 *gpu_metrics =
2019 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2020 SmuMetricsExternal_t metrics_ext;
2021 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
2022 int ret = 0;
2023
2024 ret = smu_cmn_get_metrics_table(smu,
2025 &metrics_ext,
2026 true);
2027 if (ret)
2028 return ret;
2029
2030 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2031
2032 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
2033 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
2034 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
2035 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
2036 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
2037 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
2038 metrics->AvgTemperature[TEMP_VR_MEM1]);
2039
2040 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
2041 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
2042 gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
2043 metrics->Vcn1ActivityPercentage);
2044
2045 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
2046 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
2047
2048 if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD)
2049 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
2050 else
2051 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
2052
2053 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
2054 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
2055 else
2056 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
2057
2058 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
2059 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
2060 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
2061 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
2062
2063 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
2064 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
2065 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
2066 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
2067 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
2068
2069 gpu_metrics->throttle_status =
2070 smu_v13_0_7_get_throttler_status(metrics);
2071 gpu_metrics->indep_throttle_status =
2072 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
2073 smu_v13_0_7_throttler_map);
2074
2075 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
2076
2077 gpu_metrics->pcie_link_width = metrics->PcieWidth;
2078 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
2079 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
2080 else
2081 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
2082
2083 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2084
2085 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX];
2086 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC];
2087 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP];
2088
2089 *table = (void *)gpu_metrics;
2090
2091 return sizeof(struct gpu_metrics_v1_3);
2092 }
2093
2094 static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu)
2095 {
2096 struct amdgpu_device *adev = smu->adev;
2097
2098 if (smu_v13_0_7_is_od_feature_supported(smu,
2099 PP_OD_FEATURE_FAN_CURVE_BIT))
2100 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
2101 OD_OPS_SUPPORT_FAN_CURVE_SET |
2102 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
2103 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
2104 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
2105 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
2106 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
2107 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
2108 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
2109 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
2110 }
2111
2112 static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
2113 {
2114 OverDriveTableExternal_t *od_table =
2115 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
2116 OverDriveTableExternal_t *boot_od_table =
2117 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
2118 OverDriveTableExternal_t *user_od_table =
2119 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
2120 OverDriveTableExternal_t user_od_table_bak;
2121 int ret = 0;
2122 int i;
2123
2124 ret = smu_v13_0_7_get_overdrive_table(smu, boot_od_table);
2125 if (ret)
2126 return ret;
2127
2128 smu_v13_0_7_dump_od_table(smu, boot_od_table);
2129
2130 memcpy(od_table,
2131 boot_od_table,
2132 sizeof(OverDriveTableExternal_t));
2133
2134 /*
2135 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
2136 * but we have to preserve user defined values in "user_od_table".
2137 */
2138 if (!smu->adev->in_suspend) {
2139 memcpy(user_od_table,
2140 boot_od_table,
2141 sizeof(OverDriveTableExternal_t));
2142 smu->user_dpm_profile.user_od = false;
2143 } else if (smu->user_dpm_profile.user_od) {
2144 memcpy(&user_od_table_bak,
2145 user_od_table,
2146 sizeof(OverDriveTableExternal_t));
2147 memcpy(user_od_table,
2148 boot_od_table,
2149 sizeof(OverDriveTableExternal_t));
2150 user_od_table->OverDriveTable.GfxclkFmin =
2151 user_od_table_bak.OverDriveTable.GfxclkFmin;
2152 user_od_table->OverDriveTable.GfxclkFmax =
2153 user_od_table_bak.OverDriveTable.GfxclkFmax;
2154 user_od_table->OverDriveTable.UclkFmin =
2155 user_od_table_bak.OverDriveTable.UclkFmin;
2156 user_od_table->OverDriveTable.UclkFmax =
2157 user_od_table_bak.OverDriveTable.UclkFmax;
2158 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2159 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
2160 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
2161 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
2162 user_od_table->OverDriveTable.FanLinearTempPoints[i] =
2163 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
2164 user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
2165 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
2166 }
2167 user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
2168 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
2169 user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
2170 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
2171 user_od_table->OverDriveTable.FanTargetTemperature =
2172 user_od_table_bak.OverDriveTable.FanTargetTemperature;
2173 user_od_table->OverDriveTable.FanMinimumPwm =
2174 user_od_table_bak.OverDriveTable.FanMinimumPwm;
2175 }
2176
2177 smu_v13_0_7_set_supported_od_feature_mask(smu);
2178
2179 return 0;
2180 }
2181
2182 static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
2183 {
2184 struct smu_table_context *table_context = &smu->smu_table;
2185 OverDriveTableExternal_t *od_table = table_context->overdrive_table;
2186 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
2187 int res;
2188
2189 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
2190 BIT(PP_OD_FEATURE_UCLK_BIT) |
2191 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
2192 BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2193 res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
2194 user_od_table->OverDriveTable.FeatureCtrlMask = 0;
2195 if (res == 0)
2196 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
2197
2198 return res;
2199 }
2200
2201 static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
2202 {
2203 struct smu_13_0_dpm_context *dpm_context =
2204 smu->smu_dpm.dpm_context;
2205 struct smu_13_0_dpm_table *gfx_table =
2206 &dpm_context->dpm_tables.gfx_table;
2207 struct smu_13_0_dpm_table *mem_table =
2208 &dpm_context->dpm_tables.uclk_table;
2209 struct smu_13_0_dpm_table *soc_table =
2210 &dpm_context->dpm_tables.soc_table;
2211 struct smu_13_0_dpm_table *vclk_table =
2212 &dpm_context->dpm_tables.vclk_table;
2213 struct smu_13_0_dpm_table *dclk_table =
2214 &dpm_context->dpm_tables.dclk_table;
2215 struct smu_13_0_dpm_table *fclk_table =
2216 &dpm_context->dpm_tables.fclk_table;
2217 struct smu_umd_pstate_table *pstate_table =
2218 &smu->pstate_table;
2219 struct smu_table_context *table_context = &smu->smu_table;
2220 PPTable_t *pptable = table_context->driver_pptable;
2221 DriverReportedClocks_t driver_clocks =
2222 pptable->SkuTable.DriverReportedClocks;
2223
2224 pstate_table->gfxclk_pstate.min = gfx_table->min;
2225 if (driver_clocks.GameClockAc &&
2226 (driver_clocks.GameClockAc < gfx_table->max))
2227 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
2228 else
2229 pstate_table->gfxclk_pstate.peak = gfx_table->max;
2230
2231 pstate_table->uclk_pstate.min = mem_table->min;
2232 pstate_table->uclk_pstate.peak = mem_table->max;
2233
2234 pstate_table->socclk_pstate.min = soc_table->min;
2235 pstate_table->socclk_pstate.peak = soc_table->max;
2236
2237 pstate_table->vclk_pstate.min = vclk_table->min;
2238 pstate_table->vclk_pstate.peak = vclk_table->max;
2239
2240 pstate_table->dclk_pstate.min = dclk_table->min;
2241 pstate_table->dclk_pstate.peak = dclk_table->max;
2242
2243 pstate_table->fclk_pstate.min = fclk_table->min;
2244 pstate_table->fclk_pstate.peak = fclk_table->max;
2245
2246 if (driver_clocks.BaseClockAc &&
2247 driver_clocks.BaseClockAc < gfx_table->max)
2248 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
2249 else
2250 pstate_table->gfxclk_pstate.standard = gfx_table->max;
2251 pstate_table->uclk_pstate.standard = mem_table->max;
2252 pstate_table->socclk_pstate.standard = soc_table->min;
2253 pstate_table->vclk_pstate.standard = vclk_table->min;
2254 pstate_table->dclk_pstate.standard = dclk_table->min;
2255 pstate_table->fclk_pstate.standard = fclk_table->min;
2256
2257 return 0;
2258 }
2259
2260 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
2261 uint32_t *speed)
2262 {
2263 int ret;
2264
2265 if (!speed)
2266 return -EINVAL;
2267
2268 ret = smu_v13_0_7_get_smu_metrics_data(smu,
2269 METRICS_CURR_FANPWM,
2270 speed);
2271 if (ret) {
2272 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
2273 return ret;
2274 }
2275
2276 /* Convert the PMFW output which is in percent to pwm(255) based */
2277 *speed = min(*speed * 255 / 100, (uint32_t)255);
2278
2279 return 0;
2280 }
2281
2282 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu,
2283 uint32_t *speed)
2284 {
2285 if (!speed)
2286 return -EINVAL;
2287
2288 return smu_v13_0_7_get_smu_metrics_data(smu,
2289 METRICS_CURR_FANSPEED,
2290 speed);
2291 }
2292
2293 static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu)
2294 {
2295 struct smu_table_context *table_context = &smu->smu_table;
2296 PPTable_t *pptable = table_context->driver_pptable;
2297 SkuTable_t *skutable = &pptable->SkuTable;
2298
2299 /*
2300 * Skip the MGpuFanBoost setting for those ASICs
2301 * which do not support it
2302 */
2303 if (skutable->MGpuAcousticLimitRpmThreshold == 0)
2304 return 0;
2305
2306 return smu_cmn_send_smc_msg_with_param(smu,
2307 SMU_MSG_SetMGpuFanBoostLimitRpm,
2308 0,
2309 NULL);
2310 }
2311
2312 static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
2313 uint32_t *current_power_limit,
2314 uint32_t *default_power_limit,
2315 uint32_t *max_power_limit,
2316 uint32_t *min_power_limit)
2317 {
2318 struct smu_table_context *table_context = &smu->smu_table;
2319 struct smu_13_0_7_powerplay_table *powerplay_table =
2320 (struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
2321 PPTable_t *pptable = table_context->driver_pptable;
2322 SkuTable_t *skutable = &pptable->SkuTable;
2323 uint32_t power_limit, od_percent_upper, od_percent_lower;
2324 uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2325
2326 if (smu_v13_0_get_current_power_limit(smu, &power_limit))
2327 power_limit = smu->adev->pm.ac_power ?
2328 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
2329 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
2330
2331 if (current_power_limit)
2332 *current_power_limit = power_limit;
2333 if (default_power_limit)
2334 *default_power_limit = power_limit;
2335
2336 if (smu->od_enabled) {
2337 od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
2338 od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
2339 } else {
2340 od_percent_upper = 0;
2341 od_percent_lower = 100;
2342 }
2343
2344 dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
2345 od_percent_upper, od_percent_lower, power_limit);
2346
2347 if (max_power_limit) {
2348 *max_power_limit = msg_limit * (100 + od_percent_upper);
2349 *max_power_limit /= 100;
2350 }
2351
2352 if (min_power_limit) {
2353 *min_power_limit = power_limit * (100 - od_percent_lower);
2354 *min_power_limit /= 100;
2355 }
2356
2357 return 0;
2358 }
2359
2360 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf)
2361 {
2362 DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
2363 uint32_t i, j, size = 0;
2364 int16_t workload_type = 0;
2365 int result = 0;
2366
2367 if (!buf)
2368 return -EINVAL;
2369
2370 activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
2371 sizeof(*activity_monitor_external),
2372 GFP_KERNEL);
2373 if (!activity_monitor_external)
2374 return -ENOMEM;
2375
2376 size += sysfs_emit_at(buf, size, " ");
2377 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
2378 size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
2379 (i == smu->power_profile_mode) ? "* " : " ");
2380
2381 size += sysfs_emit_at(buf, size, "\n");
2382
2383 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) {
2384 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
2385 workload_type = smu_cmn_to_asic_specific_index(smu,
2386 CMN2ASIC_MAPPING_WORKLOAD,
2387 i);
2388 if (workload_type == -ENOTSUPP)
2389 continue;
2390 else if (workload_type < 0) {
2391 result = -EINVAL;
2392 goto out;
2393 }
2394
2395 result = smu_cmn_update_table(smu,
2396 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
2397 (void *)(&activity_monitor_external[i]), false);
2398 if (result) {
2399 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2400 goto out;
2401 }
2402 }
2403
2404 #define PRINT_DPM_MONITOR(field) \
2405 do { \
2406 size += sysfs_emit_at(buf, size, "%-30s", #field); \
2407 for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \
2408 size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
2409 size += sysfs_emit_at(buf, size, "\n"); \
2410 } while (0)
2411
2412 PRINT_DPM_MONITOR(Gfx_ActiveHystLimit);
2413 PRINT_DPM_MONITOR(Gfx_IdleHystLimit);
2414 PRINT_DPM_MONITOR(Gfx_FPS);
2415 PRINT_DPM_MONITOR(Gfx_MinActiveFreqType);
2416 PRINT_DPM_MONITOR(Gfx_BoosterFreqType);
2417 PRINT_DPM_MONITOR(Gfx_MinActiveFreq);
2418 PRINT_DPM_MONITOR(Gfx_BoosterFreq);
2419 PRINT_DPM_MONITOR(Fclk_ActiveHystLimit);
2420 PRINT_DPM_MONITOR(Fclk_IdleHystLimit);
2421 PRINT_DPM_MONITOR(Fclk_FPS);
2422 PRINT_DPM_MONITOR(Fclk_MinActiveFreqType);
2423 PRINT_DPM_MONITOR(Fclk_BoosterFreqType);
2424 PRINT_DPM_MONITOR(Fclk_MinActiveFreq);
2425 PRINT_DPM_MONITOR(Fclk_BoosterFreq);
2426 #undef PRINT_DPM_MONITOR
2427
2428 result = size;
2429 out:
2430 kfree(activity_monitor_external);
2431 return result;
2432 }
2433
2434 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
2435 {
2436
2437 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
2438 DpmActivityMonitorCoeffInt_t *activity_monitor =
2439 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
2440 int workload_type, ret = 0;
2441
2442 smu->power_profile_mode = input[size];
2443
2444 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
2445 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
2446 return -EINVAL;
2447 }
2448
2449 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
2450
2451 ret = smu_cmn_update_table(smu,
2452 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
2453 (void *)(&activity_monitor_external), false);
2454 if (ret) {
2455 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2456 return ret;
2457 }
2458
2459 switch (input[0]) {
2460 case 0: /* Gfxclk */
2461 activity_monitor->Gfx_ActiveHystLimit = input[1];
2462 activity_monitor->Gfx_IdleHystLimit = input[2];
2463 activity_monitor->Gfx_FPS = input[3];
2464 activity_monitor->Gfx_MinActiveFreqType = input[4];
2465 activity_monitor->Gfx_BoosterFreqType = input[5];
2466 activity_monitor->Gfx_MinActiveFreq = input[6];
2467 activity_monitor->Gfx_BoosterFreq = input[7];
2468 break;
2469 case 1: /* Fclk */
2470 activity_monitor->Fclk_ActiveHystLimit = input[1];
2471 activity_monitor->Fclk_IdleHystLimit = input[2];
2472 activity_monitor->Fclk_FPS = input[3];
2473 activity_monitor->Fclk_MinActiveFreqType = input[4];
2474 activity_monitor->Fclk_BoosterFreqType = input[5];
2475 activity_monitor->Fclk_MinActiveFreq = input[6];
2476 activity_monitor->Fclk_BoosterFreq = input[7];
2477 break;
2478 }
2479
2480 ret = smu_cmn_update_table(smu,
2481 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
2482 (void *)(&activity_monitor_external), true);
2483 if (ret) {
2484 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
2485 return ret;
2486 }
2487 }
2488
2489 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
2490 workload_type = smu_cmn_to_asic_specific_index(smu,
2491 CMN2ASIC_MAPPING_WORKLOAD,
2492 smu->power_profile_mode);
2493 if (workload_type < 0)
2494 return -EINVAL;
2495 smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
2496 1 << workload_type, NULL);
2497
2498 return ret;
2499 }
2500
2501 static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
2502 enum pp_mp1_state mp1_state)
2503 {
2504 int ret;
2505
2506 switch (mp1_state) {
2507 case PP_MP1_STATE_UNLOAD:
2508 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2509 break;
2510 default:
2511 /* Ignore others */
2512 ret = 0;
2513 }
2514
2515 return ret;
2516 }
2517
2518 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
2519 {
2520 struct amdgpu_device *adev = smu->adev;
2521
2522 /* SRIOV does not support SMU mode1 reset */
2523 if (amdgpu_sriov_vf(adev))
2524 return false;
2525
2526 return true;
2527 }
2528
2529 static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
2530 enum pp_df_cstate state)
2531 {
2532 return smu_cmn_send_smc_msg_with_param(smu,
2533 SMU_MSG_DFCstateControl,
2534 state,
2535 NULL);
2536 }
2537
2538 static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
2539 {
2540 return smu->smc_fw_version > 0x00524600;
2541 }
2542
2543 static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
2544 enum smu_ppt_limit_type limit_type,
2545 uint32_t limit)
2546 {
2547 PPTable_t *pptable = smu->smu_table.driver_pptable;
2548 SkuTable_t *skutable = &pptable->SkuTable;
2549 uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2550 struct smu_table_context *table_context = &smu->smu_table;
2551 OverDriveTableExternal_t *od_table =
2552 (OverDriveTableExternal_t *)table_context->overdrive_table;
2553 int ret = 0;
2554
2555 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2556 return -EINVAL;
2557
2558 if (limit <= msg_limit) {
2559 if (smu->current_power_limit > msg_limit) {
2560 od_table->OverDriveTable.Ppt = 0;
2561 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2562
2563 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
2564 if (ret) {
2565 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2566 return ret;
2567 }
2568 }
2569 return smu_v13_0_set_power_limit(smu, limit_type, limit);
2570 } else if (smu->od_enabled) {
2571 ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
2572 if (ret)
2573 return ret;
2574
2575 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
2576 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2577
2578 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
2579 if (ret) {
2580 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2581 return ret;
2582 }
2583
2584 smu->current_power_limit = limit;
2585 } else {
2586 return -EINVAL;
2587 }
2588
2589 return 0;
2590 }
2591
2592 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
2593 .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
2594 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
2595 .is_dpm_running = smu_v13_0_7_is_dpm_running,
2596 .dump_pptable = smu_v13_0_7_dump_pptable,
2597 .init_microcode = smu_v13_0_init_microcode,
2598 .load_microcode = smu_v13_0_load_microcode,
2599 .fini_microcode = smu_v13_0_fini_microcode,
2600 .init_smc_tables = smu_v13_0_7_init_smc_tables,
2601 .fini_smc_tables = smu_v13_0_fini_smc_tables,
2602 .init_power = smu_v13_0_init_power,
2603 .fini_power = smu_v13_0_fini_power,
2604 .check_fw_status = smu_v13_0_7_check_fw_status,
2605 .setup_pptable = smu_v13_0_7_setup_pptable,
2606 .check_fw_version = smu_v13_0_check_fw_version,
2607 .write_pptable = smu_cmn_write_pptable,
2608 .set_driver_table_location = smu_v13_0_set_driver_table_location,
2609 .system_features_control = smu_v13_0_system_features_control,
2610 .set_allowed_mask = smu_v13_0_set_allowed_mask,
2611 .get_enabled_mask = smu_cmn_get_enabled_mask,
2612 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
2613 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
2614 .init_pptable_microcode = smu_v13_0_init_pptable_microcode,
2615 .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
2616 .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
2617 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
2618 .read_sensor = smu_v13_0_7_read_sensor,
2619 .feature_is_enabled = smu_cmn_feature_is_enabled,
2620 .print_clk_levels = smu_v13_0_7_print_clk_levels,
2621 .force_clk_levels = smu_v13_0_7_force_clk_levels,
2622 .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
2623 .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
2624 .register_irq_handler = smu_v13_0_register_irq_handler,
2625 .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2626 .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2627 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2628 .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics,
2629 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
2630 .set_default_od_settings = smu_v13_0_7_set_default_od_settings,
2631 .restore_user_od_settings = smu_v13_0_7_restore_user_od_settings,
2632 .od_edit_dpm_table = smu_v13_0_7_od_edit_dpm_table,
2633 .set_performance_level = smu_v13_0_set_performance_level,
2634 .gfx_off_control = smu_v13_0_gfx_off_control,
2635 .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm,
2636 .get_fan_speed_rpm = smu_v13_0_7_get_fan_speed_rpm,
2637 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm,
2638 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm,
2639 .get_fan_control_mode = smu_v13_0_get_fan_control_mode,
2640 .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
2641 .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
2642 .get_power_limit = smu_v13_0_7_get_power_limit,
2643 .set_power_limit = smu_v13_0_7_set_power_limit,
2644 .set_power_source = smu_v13_0_set_power_source,
2645 .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
2646 .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
2647 .set_tool_table_location = smu_v13_0_set_tool_table_location,
2648 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2649 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2650 .baco_is_support = smu_v13_0_baco_is_support,
2651 .baco_enter = smu_v13_0_baco_enter,
2652 .baco_exit = smu_v13_0_baco_exit,
2653 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
2654 .mode1_reset = smu_v13_0_mode1_reset,
2655 .set_mp1_state = smu_v13_0_7_set_mp1_state,
2656 .set_df_cstate = smu_v13_0_7_set_df_cstate,
2657 .gpo_control = smu_v13_0_gpo_control,
2658 .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
2659 .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
2660 .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
2661 };
2662
2663 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
2664 {
2665 smu->ppt_funcs = &smu_v13_0_7_ppt_funcs;
2666 smu->message_map = smu_v13_0_7_message_map;
2667 smu->clock_map = smu_v13_0_7_clk_map;
2668 smu->feature_map = smu_v13_0_7_feature_mask_map;
2669 smu->table_map = smu_v13_0_7_table_map;
2670 smu->pwr_src_map = smu_v13_0_7_pwr_src_map;
2671 smu->workload_map = smu_v13_0_7_workload_map;
2672 smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
2673 smu_v13_0_set_smu_mailbox_registers(smu);
2674 }