]>
Commit | Line | Data |
---|---|---|
137d63ab HR |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
137d63ab | 23 | #include <linux/firmware.h> |
1cf8c930 | 24 | #include <linux/pci.h> |
9fdd91aa | 25 | |
137d63ab HR |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_smu.h" | |
18c1d3ce | 28 | #include "smu_internal.h" |
07845526 | 29 | #include "smu_v11_0.h" |
5dbbe6a7 | 30 | #include "smu_v12_0.h" |
e15da5a4 | 31 | #include "atom.h" |
6c45e480 EQ |
32 | #include "vega20_ppt.h" |
33 | #include "arcturus_ppt.h" | |
34 | #include "navi10_ppt.h" | |
35 | #include "renoir_ppt.h" | |
137d63ab | 36 | |
6b294793 KW |
37 | #undef __SMU_DUMMY_MAP |
38 | #define __SMU_DUMMY_MAP(type) #type | |
39 | static const char* __smu_message_names[] = { | |
40 | SMU_MESSAGE_TYPES | |
41 | }; | |
42 | ||
43 | const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) | |
44 | { | |
e3bf125b | 45 | if (type < 0 || type >= SMU_MSG_MAX_COUNT) |
ab631311 | 46 | return "unknown smu message"; |
6b294793 KW |
47 | return __smu_message_names[type]; |
48 | } | |
49 | ||
cb33363d KW |
50 | #undef __SMU_DUMMY_MAP |
51 | #define __SMU_DUMMY_MAP(fea) #fea | |
52 | static const char* __smu_feature_names[] = { | |
53 | SMU_FEATURE_MASKS | |
54 | }; | |
55 | ||
56 | const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) | |
57 | { | |
e3bf125b | 58 | if (feature < 0 || feature >= SMU_FEATURE_COUNT) |
ab631311 | 59 | return "unknown smu feature"; |
cb33363d KW |
60 | return __smu_feature_names[feature]; |
61 | } | |
62 | ||
98eb03bb KW |
63 | size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) |
64 | { | |
65 | size_t size = 0; | |
66 | int ret = 0, i = 0; | |
67 | uint32_t feature_mask[2] = { 0 }; | |
68 | int32_t feature_index = 0; | |
69 | uint32_t count = 0; | |
67194518 KW |
70 | uint32_t sort_feature[SMU_FEATURE_COUNT]; |
71 | uint64_t hw_feature_count = 0; | |
98eb03bb | 72 | |
3697b339 EQ |
73 | mutex_lock(&smu->mutex); |
74 | ||
98eb03bb KW |
75 | ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); |
76 | if (ret) | |
77 | goto failed; | |
78 | ||
79 | size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n", | |
80 | feature_mask[1], feature_mask[0]); | |
81 | ||
82 | for (i = 0; i < SMU_FEATURE_COUNT; i++) { | |
83 | feature_index = smu_feature_get_index(smu, i); | |
84 | if (feature_index < 0) | |
85 | continue; | |
67194518 KW |
86 | sort_feature[feature_index] = i; |
87 | hw_feature_count++; | |
88 | } | |
89 | ||
90 | for (i = 0; i < hw_feature_count; i++) { | |
98eb03bb KW |
91 | size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n", |
92 | count++, | |
67194518 KW |
93 | smu_get_feature_name(smu, sort_feature[i]), |
94 | i, | |
95 | !!smu_feature_is_enabled(smu, sort_feature[i]) ? | |
ab631311 | 96 | "enabled" : "disabled"); |
98eb03bb KW |
97 | } |
98 | ||
99 | failed: | |
3697b339 EQ |
100 | mutex_unlock(&smu->mutex); |
101 | ||
98eb03bb KW |
102 | return size; |
103 | } | |
104 | ||
c66846e0 EQ |
105 | static int smu_feature_update_enable_state(struct smu_context *smu, |
106 | uint64_t feature_mask, | |
107 | bool enabled) | |
108 | { | |
109 | struct smu_feature *feature = &smu->smu_feature; | |
110 | uint32_t feature_low = 0, feature_high = 0; | |
111 | int ret = 0; | |
112 | ||
113 | if (!smu->pm_enabled) | |
114 | return ret; | |
115 | ||
116 | feature_low = (feature_mask >> 0 ) & 0xffffffff; | |
117 | feature_high = (feature_mask >> 32) & 0xffffffff; | |
118 | ||
119 | if (enabled) { | |
120 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, | |
1c58267c | 121 | feature_low, NULL); |
c66846e0 EQ |
122 | if (ret) |
123 | return ret; | |
124 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, | |
1c58267c | 125 | feature_high, NULL); |
c66846e0 EQ |
126 | if (ret) |
127 | return ret; | |
128 | } else { | |
129 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, | |
1c58267c | 130 | feature_low, NULL); |
c66846e0 EQ |
131 | if (ret) |
132 | return ret; | |
133 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, | |
1c58267c | 134 | feature_high, NULL); |
c66846e0 EQ |
135 | if (ret) |
136 | return ret; | |
137 | } | |
138 | ||
139 | mutex_lock(&feature->mutex); | |
140 | if (enabled) | |
141 | bitmap_or(feature->enabled, feature->enabled, | |
142 | (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); | |
143 | else | |
144 | bitmap_andnot(feature->enabled, feature->enabled, | |
145 | (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); | |
146 | mutex_unlock(&feature->mutex); | |
147 | ||
148 | return ret; | |
149 | } | |
150 | ||
98eb03bb KW |
151 | int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) |
152 | { | |
153 | int ret = 0; | |
154 | uint32_t feature_mask[2] = { 0 }; | |
155 | uint64_t feature_2_enabled = 0; | |
156 | uint64_t feature_2_disabled = 0; | |
157 | uint64_t feature_enables = 0; | |
158 | ||
3697b339 EQ |
159 | mutex_lock(&smu->mutex); |
160 | ||
98eb03bb KW |
161 | ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); |
162 | if (ret) | |
3697b339 | 163 | goto out; |
98eb03bb KW |
164 | |
165 | feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); | |
166 | ||
167 | feature_2_enabled = ~feature_enables & new_mask; | |
168 | feature_2_disabled = feature_enables & ~new_mask; | |
169 | ||
170 | if (feature_2_enabled) { | |
171 | ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); | |
172 | if (ret) | |
3697b339 | 173 | goto out; |
98eb03bb KW |
174 | } |
175 | if (feature_2_disabled) { | |
176 | ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); | |
177 | if (ret) | |
3697b339 | 178 | goto out; |
98eb03bb KW |
179 | } |
180 | ||
3697b339 EQ |
181 | out: |
182 | mutex_unlock(&smu->mutex); | |
183 | ||
98eb03bb KW |
184 | return ret; |
185 | } | |
186 | ||
4fde03a7 KW |
187 | int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) |
188 | { | |
189 | int ret = 0; | |
190 | ||
191 | if (!if_version && !smu_version) | |
192 | return -EINVAL; | |
193 | ||
194 | if (if_version) { | |
1c58267c | 195 | ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); |
4fde03a7 KW |
196 | if (ret) |
197 | return ret; | |
198 | } | |
199 | ||
200 | if (smu_version) { | |
1c58267c | 201 | ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); |
4fde03a7 KW |
202 | if (ret) |
203 | return ret; | |
204 | } | |
205 | ||
206 | return ret; | |
207 | } | |
208 | ||
0d7cbd28 | 209 | int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, |
31faff1a | 210 | uint32_t min, uint32_t max, bool lock_needed) |
0d7cbd28 | 211 | { |
4045f36f | 212 | int ret = 0; |
0d7cbd28 | 213 | |
54728170 KW |
214 | if (!smu_clk_dpm_is_enabled(smu, clk_type)) |
215 | return 0; | |
216 | ||
31faff1a CG |
217 | if (lock_needed) |
218 | mutex_lock(&smu->mutex); | |
4045f36f | 219 | ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); |
31faff1a CG |
220 | if (lock_needed) |
221 | mutex_unlock(&smu->mutex); | |
222 | ||
0d7cbd28 KW |
223 | return ret; |
224 | } | |
225 | ||
33665617 KW |
226 | int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, |
227 | uint32_t min, uint32_t max) | |
228 | { | |
229 | int ret = 0, clk_id = 0; | |
230 | uint32_t param; | |
231 | ||
232 | if (min <= 0 && max <= 0) | |
233 | return -EINVAL; | |
234 | ||
54728170 KW |
235 | if (!smu_clk_dpm_is_enabled(smu, clk_type)) |
236 | return 0; | |
237 | ||
33665617 KW |
238 | clk_id = smu_clk_get_index(smu, clk_type); |
239 | if (clk_id < 0) | |
240 | return clk_id; | |
241 | ||
242 | if (max > 0) { | |
243 | param = (uint32_t)((clk_id << 16) | (max & 0xffff)); | |
244 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, | |
1c58267c | 245 | param, NULL); |
33665617 KW |
246 | if (ret) |
247 | return ret; | |
248 | } | |
249 | ||
250 | if (min > 0) { | |
251 | param = (uint32_t)((clk_id << 16) | (min & 0xffff)); | |
252 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, | |
1c58267c | 253 | param, NULL); |
33665617 KW |
254 | if (ret) |
255 | return ret; | |
256 | } | |
257 | ||
258 | ||
259 | return ret; | |
260 | } | |
261 | ||
8b3d243e | 262 | int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, |
3697b339 | 263 | uint32_t *min, uint32_t *max, bool lock_needed) |
8b3d243e | 264 | { |
3a48c10d | 265 | uint32_t clock_limit; |
eee3258e | 266 | int ret = 0; |
8b3d243e KW |
267 | |
268 | if (!min && !max) | |
269 | return -EINVAL; | |
270 | ||
3697b339 EQ |
271 | if (lock_needed) |
272 | mutex_lock(&smu->mutex); | |
273 | ||
3a48c10d EQ |
274 | if (!smu_clk_dpm_is_enabled(smu, clk_type)) { |
275 | switch (clk_type) { | |
276 | case SMU_MCLK: | |
277 | case SMU_UCLK: | |
278 | clock_limit = smu->smu_table.boot_values.uclk; | |
279 | break; | |
280 | case SMU_GFXCLK: | |
281 | case SMU_SCLK: | |
282 | clock_limit = smu->smu_table.boot_values.gfxclk; | |
283 | break; | |
284 | case SMU_SOCCLK: | |
285 | clock_limit = smu->smu_table.boot_values.socclk; | |
286 | break; | |
287 | default: | |
288 | clock_limit = 0; | |
289 | break; | |
290 | } | |
291 | ||
292 | /* clock in Mhz unit */ | |
293 | if (min) | |
294 | *min = clock_limit / 100; | |
295 | if (max) | |
296 | *max = clock_limit / 100; | |
3697b339 EQ |
297 | } else { |
298 | /* | |
299 | * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the | |
300 | * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). | |
301 | */ | |
302 | ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); | |
3a48c10d | 303 | } |
3697b339 EQ |
304 | |
305 | if (lock_needed) | |
306 | mutex_unlock(&smu->mutex); | |
307 | ||
8b3d243e KW |
308 | return ret; |
309 | } | |
310 | ||
3ac54a50 KW |
311 | int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, |
312 | uint16_t level, uint32_t *value) | |
313 | { | |
314 | int ret = 0, clk_id = 0; | |
315 | uint32_t param; | |
316 | ||
317 | if (!value) | |
318 | return -EINVAL; | |
319 | ||
54728170 KW |
320 | if (!smu_clk_dpm_is_enabled(smu, clk_type)) |
321 | return 0; | |
322 | ||
3ac54a50 KW |
323 | clk_id = smu_clk_get_index(smu, clk_type); |
324 | if (clk_id < 0) | |
325 | return clk_id; | |
326 | ||
327 | param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); | |
328 | ||
1c58267c MC |
329 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, |
330 | param, ¶m); | |
3ac54a50 KW |
331 | if (ret) |
332 | return ret; | |
333 | ||
334 | /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM | |
335 | * now, we un-support it */ | |
336 | *value = param & 0x7fffffff; | |
337 | ||
338 | return ret; | |
339 | } | |
340 | ||
341 | int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, | |
342 | uint32_t *value) | |
343 | { | |
344 | return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); | |
345 | } | |
346 | ||
e4b613e0 KW |
347 | int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, |
348 | uint32_t *min_value, uint32_t *max_value) | |
349 | { | |
350 | int ret = 0; | |
351 | uint32_t level_count = 0; | |
352 | ||
353 | if (!min_value && !max_value) | |
354 | return -EINVAL; | |
355 | ||
356 | if (min_value) { | |
357 | /* by default, level 0 clock value as min value */ | |
358 | ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value); | |
359 | if (ret) | |
360 | return ret; | |
361 | } | |
362 | ||
363 | if (max_value) { | |
364 | ret = smu_get_dpm_level_count(smu, clk_type, &level_count); | |
365 | if (ret) | |
366 | return ret; | |
367 | ||
368 | ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value); | |
369 | if (ret) | |
370 | return ret; | |
371 | } | |
372 | ||
373 | return ret; | |
374 | } | |
375 | ||
54728170 KW |
376 | bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) |
377 | { | |
378 | enum smu_feature_mask feature_id = 0; | |
379 | ||
380 | switch (clk_type) { | |
381 | case SMU_MCLK: | |
382 | case SMU_UCLK: | |
383 | feature_id = SMU_FEATURE_DPM_UCLK_BIT; | |
384 | break; | |
385 | case SMU_GFXCLK: | |
386 | case SMU_SCLK: | |
387 | feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; | |
388 | break; | |
389 | case SMU_SOCCLK: | |
390 | feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; | |
391 | break; | |
392 | default: | |
393 | return true; | |
394 | } | |
395 | ||
396 | if(!smu_feature_is_enabled(smu, feature_id)) { | |
54728170 KW |
397 | return false; |
398 | } | |
399 | ||
400 | return true; | |
401 | } | |
402 | ||
cf3fde89 EQ |
403 | /** |
404 | * smu_dpm_set_power_gate - power gate/ungate the specific IP block | |
405 | * | |
406 | * @smu: smu_context pointer | |
407 | * @block_type: the IP block to power gate/ungate | |
408 | * @gate: to power gate if true, ungate otherwise | |
409 | * | |
410 | * This API uses no smu->mutex lock protection due to: | |
411 | * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). | |
412 | * This is guarded to be race condition free by the caller. | |
413 | * 2. Or get called on user setting request of power_dpm_force_performance_level. | |
414 | * Under this case, the smu->mutex lock protection is already enforced on | |
415 | * the parent API smu_force_performance_level of the call path. | |
416 | */ | |
72e91f37 KW |
417 | int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, |
418 | bool gate) | |
419 | { | |
420 | int ret = 0; | |
421 | ||
422 | switch (block_type) { | |
423 | case AMD_IP_BLOCK_TYPE_UVD: | |
a64c9e15 | 424 | ret = smu_dpm_set_uvd_enable(smu, !gate); |
72e91f37 KW |
425 | break; |
426 | case AMD_IP_BLOCK_TYPE_VCE: | |
a64c9e15 | 427 | ret = smu_dpm_set_vce_enable(smu, !gate); |
72e91f37 | 428 | break; |
73c86d62 HZ |
429 | case AMD_IP_BLOCK_TYPE_GFX: |
430 | ret = smu_gfx_off_control(smu, gate); | |
431 | break; | |
a90a24d5 PL |
432 | case AMD_IP_BLOCK_TYPE_SDMA: |
433 | ret = smu_powergate_sdma(smu, gate); | |
434 | break; | |
0db2ab99 | 435 | case AMD_IP_BLOCK_TYPE_JPEG: |
a64c9e15 | 436 | ret = smu_dpm_set_jpeg_enable(smu, !gate); |
0db2ab99 | 437 | break; |
72e91f37 KW |
438 | default: |
439 | break; | |
440 | } | |
441 | ||
3697b339 | 442 | return ret; |
ea2d0bf8 KW |
443 | } |
444 | ||
09895323 KW |
445 | int smu_get_power_num_states(struct smu_context *smu, |
446 | struct pp_states_info *state_info) | |
447 | { | |
448 | if (!state_info) | |
449 | return -EINVAL; | |
450 | ||
451 | /* not support power state */ | |
452 | memset(state_info, 0, sizeof(struct pp_states_info)); | |
f0d2a7dc EQ |
453 | state_info->nums = 1; |
454 | state_info->states[0] = POWER_STATE_TYPE_DEFAULT; | |
09895323 KW |
455 | |
456 | return 0; | |
457 | } | |
458 | ||
143c75d6 KW |
459 | int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, |
460 | void *data, uint32_t *size) | |
461 | { | |
706e5082 EQ |
462 | struct smu_power_context *smu_power = &smu->smu_power; |
463 | struct smu_power_gate *power_gate = &smu_power->power_gate; | |
143c75d6 KW |
464 | int ret = 0; |
465 | ||
9b4e63f4 KF |
466 | if(!data || !size) |
467 | return -EINVAL; | |
468 | ||
143c75d6 | 469 | switch (sensor) { |
46814f51 CG |
470 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: |
471 | *((uint32_t *)data) = smu->pstate_sclk; | |
472 | *size = 4; | |
473 | break; | |
474 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: | |
475 | *((uint32_t *)data) = smu->pstate_mclk; | |
476 | *size = 4; | |
477 | break; | |
143c75d6 KW |
478 | case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: |
479 | ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); | |
480 | *size = 8; | |
481 | break; | |
6b1b7b5b KW |
482 | case AMDGPU_PP_SENSOR_UVD_POWER: |
483 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; | |
484 | *size = 4; | |
485 | break; | |
486 | case AMDGPU_PP_SENSOR_VCE_POWER: | |
487 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; | |
488 | *size = 4; | |
489 | break; | |
bf2bf523 | 490 | case AMDGPU_PP_SENSOR_VCN_POWER_STATE: |
706e5082 | 491 | *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1; |
bf2bf523 EQ |
492 | *size = 4; |
493 | break; | |
143c75d6 KW |
494 | default: |
495 | ret = -EINVAL; | |
496 | break; | |
497 | } | |
498 | ||
499 | if (ret) | |
500 | *size = 0; | |
501 | ||
502 | return ret; | |
503 | } | |
504 | ||
0d9d78b5 | 505 | int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument, |
dbe6a970 KW |
506 | void *table_data, bool drv2smu) |
507 | { | |
508 | struct smu_table_context *smu_table = &smu->smu_table; | |
839f9117 | 509 | struct amdgpu_device *adev = smu->adev; |
ce0d0ec3 | 510 | struct smu_table *table = &smu_table->driver_table; |
33bd73ae | 511 | int table_id = smu_table_get_index(smu, table_index); |
ce0d0ec3 EQ |
512 | uint32_t table_size; |
513 | int ret = 0; | |
dbe6a970 | 514 | |
871e5e72 | 515 | if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) |
dbe6a970 KW |
516 | return -EINVAL; |
517 | ||
ce0d0ec3 | 518 | table_size = smu_table->tables[table_index].size; |
dbe6a970 | 519 | |
e0aa4a92 | 520 | if (drv2smu) { |
ce0d0ec3 | 521 | memcpy(table->cpu_addr, table_data, table_size); |
e0aa4a92 EQ |
522 | /* |
523 | * Flush hdp cache: to guard the content seen by | |
524 | * GPU is consitent with CPU. | |
525 | */ | |
526 | amdgpu_asic_flush_hdp(adev, NULL); | |
527 | } | |
dbe6a970 | 528 | |
dbe6a970 KW |
529 | ret = smu_send_smc_msg_with_param(smu, drv2smu ? |
530 | SMU_MSG_TransferTableDram2Smu : | |
531 | SMU_MSG_TransferTableSmu2Dram, | |
1c58267c MC |
532 | table_id | ((argument & 0xFFFF) << 16), |
533 | NULL); | |
dbe6a970 KW |
534 | if (ret) |
535 | return ret; | |
536 | ||
e0aa4a92 EQ |
537 | if (!drv2smu) { |
538 | amdgpu_asic_flush_hdp(adev, NULL); | |
ce0d0ec3 | 539 | memcpy(table_data, table->cpu_addr, table_size); |
e0aa4a92 | 540 | } |
dbe6a970 KW |
541 | |
542 | return ret; | |
543 | } | |
544 | ||
dc8e3a0c KW |
545 | bool is_support_sw_smu(struct amdgpu_device *adev) |
546 | { | |
54b998ca HZ |
547 | if (adev->asic_type == CHIP_VEGA20) |
548 | return (amdgpu_dpm == 2) ? true : false; | |
96358810 | 549 | else if (adev->asic_type >= CHIP_ARCTURUS) { |
c2a801af | 550 | if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) |
96358810 JZ |
551 | return false; |
552 | else | |
553 | return true; | |
554 | } else | |
54b998ca | 555 | return false; |
dc8e3a0c KW |
556 | } |
557 | ||
54bd77f3 YZ |
558 | bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) |
559 | { | |
086e1c56 | 560 | if (!is_support_sw_smu(adev)) |
54bd77f3 YZ |
561 | return false; |
562 | ||
563 | if (adev->asic_type == CHIP_VEGA20) | |
564 | return true; | |
565 | ||
566 | return false; | |
567 | } | |
568 | ||
289921b0 KW |
569 | int smu_sys_get_pp_table(struct smu_context *smu, void **table) |
570 | { | |
571 | struct smu_table_context *smu_table = &smu->smu_table; | |
3697b339 | 572 | uint32_t powerplay_table_size; |
289921b0 KW |
573 | |
574 | if (!smu_table->power_play_table && !smu_table->hardcode_pptable) | |
575 | return -EINVAL; | |
576 | ||
3697b339 EQ |
577 | mutex_lock(&smu->mutex); |
578 | ||
289921b0 KW |
579 | if (smu_table->hardcode_pptable) |
580 | *table = smu_table->hardcode_pptable; | |
581 | else | |
582 | *table = smu_table->power_play_table; | |
583 | ||
3697b339 EQ |
584 | powerplay_table_size = smu_table->power_play_table_size; |
585 | ||
586 | mutex_unlock(&smu->mutex); | |
587 | ||
588 | return powerplay_table_size; | |
289921b0 KW |
589 | } |
590 | ||
591 | int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) | |
592 | { | |
593 | struct smu_table_context *smu_table = &smu->smu_table; | |
594 | ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; | |
595 | int ret = 0; | |
596 | ||
a254bfa2 CG |
597 | if (!smu->pm_enabled) |
598 | return -EINVAL; | |
289921b0 KW |
599 | if (header->usStructureSize != size) { |
600 | pr_err("pp table size not matched !\n"); | |
601 | return -EIO; | |
602 | } | |
603 | ||
604 | mutex_lock(&smu->mutex); | |
605 | if (!smu_table->hardcode_pptable) | |
606 | smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); | |
607 | if (!smu_table->hardcode_pptable) { | |
608 | ret = -ENOMEM; | |
609 | goto failed; | |
610 | } | |
611 | ||
612 | memcpy(smu_table->hardcode_pptable, buf, size); | |
613 | smu_table->power_play_table = smu_table->hardcode_pptable; | |
614 | smu_table->power_play_table_size = size; | |
289921b0 | 615 | |
c39f062e EQ |
616 | /* |
617 | * Special hw_fini action(for Navi1x, the DPMs disablement will be | |
618 | * skipped) may be needed for custom pptable uploading. | |
619 | */ | |
620 | smu->uploading_custom_pp_table = true; | |
621 | ||
289921b0 KW |
622 | ret = smu_reset(smu); |
623 | if (ret) | |
624 | pr_info("smu reset failed, ret = %d\n", ret); | |
625 | ||
c39f062e EQ |
626 | smu->uploading_custom_pp_table = false; |
627 | ||
289921b0 KW |
628 | failed: |
629 | mutex_unlock(&smu->mutex); | |
630 | return ret; | |
631 | } | |
632 | ||
6b816d73 KW |
633 | int smu_feature_init_dpm(struct smu_context *smu) |
634 | { | |
635 | struct smu_feature *feature = &smu->smu_feature; | |
636 | int ret = 0; | |
74c958a3 | 637 | uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; |
6b816d73 | 638 | |
a254bfa2 CG |
639 | if (!smu->pm_enabled) |
640 | return ret; | |
f14a323d | 641 | mutex_lock(&feature->mutex); |
74c958a3 | 642 | bitmap_zero(feature->allowed, SMU_FEATURE_MAX); |
f14a323d | 643 | mutex_unlock(&feature->mutex); |
6b816d73 | 644 | |
74c958a3 | 645 | ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, |
6b816d73 KW |
646 | SMU_FEATURE_MAX/32); |
647 | if (ret) | |
648 | return ret; | |
649 | ||
f14a323d | 650 | mutex_lock(&feature->mutex); |
74c958a3 KW |
651 | bitmap_or(feature->allowed, feature->allowed, |
652 | (unsigned long *)allowed_feature_mask, | |
6b816d73 | 653 | feature->feature_num); |
f14a323d | 654 | mutex_unlock(&feature->mutex); |
6b816d73 KW |
655 | |
656 | return ret; | |
657 | } | |
26dd6681 | 658 | |
6b816d73 | 659 | |
ffcb08df | 660 | int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) |
2f25158d KW |
661 | { |
662 | struct smu_feature *feature = &smu->smu_feature; | |
c0640304 | 663 | int feature_id; |
f14a323d KW |
664 | int ret = 0; |
665 | ||
d2f925ff | 666 | if (smu->is_apu) |
ffe61cd6 | 667 | return 1; |
51b9121a | 668 | |
ffcb08df | 669 | feature_id = smu_feature_get_index(smu, mask); |
c0640304 EQ |
670 | if (feature_id < 0) |
671 | return 0; | |
ffcb08df | 672 | |
2f25158d | 673 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
674 | |
675 | mutex_lock(&feature->mutex); | |
676 | ret = test_bit(feature_id, feature->enabled); | |
677 | mutex_unlock(&feature->mutex); | |
678 | ||
679 | return ret; | |
2f25158d KW |
680 | } |
681 | ||
ffcb08df HR |
682 | int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, |
683 | bool enable) | |
2f25158d KW |
684 | { |
685 | struct smu_feature *feature = &smu->smu_feature; | |
c0640304 | 686 | int feature_id; |
f14a323d | 687 | |
ffcb08df | 688 | feature_id = smu_feature_get_index(smu, mask); |
c0640304 EQ |
689 | if (feature_id < 0) |
690 | return -EINVAL; | |
ffcb08df | 691 | |
2f25158d | 692 | WARN_ON(feature_id > feature->feature_num); |
f14a323d | 693 | |
c66846e0 EQ |
694 | return smu_feature_update_enable_state(smu, |
695 | 1ULL << feature_id, | |
696 | enable); | |
2f25158d KW |
697 | } |
698 | ||
ffcb08df | 699 | int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) |
2f25158d KW |
700 | { |
701 | struct smu_feature *feature = &smu->smu_feature; | |
c0640304 | 702 | int feature_id; |
f14a323d KW |
703 | int ret = 0; |
704 | ||
ffcb08df | 705 | feature_id = smu_feature_get_index(smu, mask); |
c0640304 EQ |
706 | if (feature_id < 0) |
707 | return 0; | |
ffcb08df | 708 | |
2f25158d | 709 | WARN_ON(feature_id > feature->feature_num); |
f14a323d KW |
710 | |
711 | mutex_lock(&feature->mutex); | |
712 | ret = test_bit(feature_id, feature->supported); | |
713 | mutex_unlock(&feature->mutex); | |
714 | ||
715 | return ret; | |
2f25158d KW |
716 | } |
717 | ||
ffcb08df HR |
718 | int smu_feature_set_supported(struct smu_context *smu, |
719 | enum smu_feature_mask mask, | |
2f25158d KW |
720 | bool enable) |
721 | { | |
722 | struct smu_feature *feature = &smu->smu_feature; | |
c0640304 | 723 | int feature_id; |
f14a323d KW |
724 | int ret = 0; |
725 | ||
ffcb08df | 726 | feature_id = smu_feature_get_index(smu, mask); |
c0640304 EQ |
727 | if (feature_id < 0) |
728 | return -EINVAL; | |
ffcb08df | 729 | |
2f25158d | 730 | WARN_ON(feature_id > feature->feature_num); |
f14a323d | 731 | |
029f4153 | 732 | mutex_lock(&feature->mutex); |
2f25158d KW |
733 | if (enable) |
734 | test_and_set_bit(feature_id, feature->supported); | |
735 | else | |
736 | test_and_clear_bit(feature_id, feature->supported); | |
f14a323d KW |
737 | mutex_unlock(&feature->mutex); |
738 | ||
739 | return ret; | |
2f25158d KW |
740 | } |
741 | ||
137d63ab HR |
742 | static int smu_set_funcs(struct amdgpu_device *adev) |
743 | { | |
07845526 HR |
744 | struct smu_context *smu = &adev->smu; |
745 | ||
875dc7c4 EQ |
746 | if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) |
747 | smu->od_enabled = true; | |
748 | ||
07845526 HR |
749 | switch (adev->asic_type) { |
750 | case CHIP_VEGA20: | |
85f8433f | 751 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
6c45e480 EQ |
752 | vega20_set_ppt_funcs(smu); |
753 | break; | |
2573e870 | 754 | case CHIP_NAVI10: |
b02ff126 | 755 | case CHIP_NAVI14: |
9ea8da75 | 756 | case CHIP_NAVI12: |
6c45e480 EQ |
757 | navi10_set_ppt_funcs(smu); |
758 | break; | |
22e18317 | 759 | case CHIP_ARCTURUS: |
85f8433f | 760 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
6c45e480 | 761 | arcturus_set_ppt_funcs(smu); |
875dc7c4 EQ |
762 | /* OD is not supported on Arcturus */ |
763 | smu->od_enabled =false; | |
07845526 | 764 | break; |
5dbbe6a7 | 765 | case CHIP_RENOIR: |
6c45e480 | 766 | renoir_set_ppt_funcs(smu); |
5dbbe6a7 | 767 | break; |
07845526 HR |
768 | default: |
769 | return -EINVAL; | |
770 | } | |
771 | ||
137d63ab HR |
772 | return 0; |
773 | } | |
774 | ||
775 | static int smu_early_init(void *handle) | |
776 | { | |
777 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
778 | struct smu_context *smu = &adev->smu; | |
137d63ab HR |
779 | |
780 | smu->adev = adev; | |
a7517677 | 781 | smu->pm_enabled = !!amdgpu_dpm; |
fe9c32a6 | 782 | smu->is_apu = false; |
137d63ab HR |
783 | mutex_init(&smu->mutex); |
784 | ||
74e07f9d | 785 | return smu_set_funcs(adev); |
137d63ab HR |
786 | } |
787 | ||
bee71d26 CG |
788 | static int smu_late_init(void *handle) |
789 | { | |
790 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
791 | struct smu_context *smu = &adev->smu; | |
a254bfa2 CG |
792 | |
793 | if (!smu->pm_enabled) | |
794 | return 0; | |
51548c0f | 795 | |
bee71d26 CG |
796 | smu_handle_task(&adev->smu, |
797 | smu->smu_dpm.dpm_level, | |
3697b339 EQ |
798 | AMD_PP_TASK_COMPLETE_INIT, |
799 | false); | |
bee71d26 CG |
800 | |
801 | return 0; | |
802 | } | |
803 | ||
e15da5a4 HR |
804 | int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, |
805 | uint16_t *size, uint8_t *frev, uint8_t *crev, | |
806 | uint8_t **addr) | |
807 | { | |
808 | struct amdgpu_device *adev = smu->adev; | |
809 | uint16_t data_start; | |
810 | ||
811 | if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, | |
812 | size, frev, crev, &data_start)) | |
813 | return -EINVAL; | |
814 | ||
815 | *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; | |
816 | ||
817 | return 0; | |
818 | } | |
819 | ||
b5624000 HR |
820 | static int smu_initialize_pptable(struct smu_context *smu) |
821 | { | |
822 | /* TODO */ | |
823 | return 0; | |
824 | } | |
825 | ||
826 | static int smu_smc_table_sw_init(struct smu_context *smu) | |
827 | { | |
828 | int ret; | |
829 | ||
830 | ret = smu_initialize_pptable(smu); | |
831 | if (ret) { | |
832 | pr_err("Failed to init smu_initialize_pptable!\n"); | |
833 | return ret; | |
834 | } | |
835 | ||
cabd44c0 HR |
836 | /** |
837 | * Create smu_table structure, and init smc tables such as | |
838 | * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. | |
839 | */ | |
840 | ret = smu_init_smc_tables(smu); | |
841 | if (ret) { | |
842 | pr_err("Failed to init smc tables!\n"); | |
843 | return ret; | |
844 | } | |
845 | ||
17e6081b HR |
846 | /** |
847 | * Create smu_power_context structure, and allocate smu_dpm_context and | |
848 | * context size to fill the smu_power_context data. | |
849 | */ | |
850 | ret = smu_init_power(smu); | |
851 | if (ret) { | |
852 | pr_err("Failed to init smu_init_power!\n"); | |
853 | return ret; | |
854 | } | |
855 | ||
b5624000 HR |
856 | return 0; |
857 | } | |
858 | ||
813ce279 KW |
859 | static int smu_smc_table_sw_fini(struct smu_context *smu) |
860 | { | |
861 | int ret; | |
862 | ||
863 | ret = smu_fini_smc_tables(smu); | |
864 | if (ret) { | |
865 | pr_err("Failed to smu_fini_smc_tables!\n"); | |
866 | return ret; | |
867 | } | |
868 | ||
869 | return 0; | |
870 | } | |
871 | ||
137d63ab HR |
872 | static int smu_sw_init(void *handle) |
873 | { | |
874 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
875 | struct smu_context *smu = &adev->smu; | |
876 | int ret; | |
877 | ||
0b51d993 | 878 | smu->pool_size = adev->pm.smu_prv_buffer_size; |
6b816d73 | 879 | smu->smu_feature.feature_num = SMU_FEATURE_MAX; |
f14a323d | 880 | mutex_init(&smu->smu_feature.mutex); |
6b816d73 KW |
881 | bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); |
882 | bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); | |
883 | bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); | |
767acabd KW |
884 | |
885 | mutex_init(&smu->smu_baco.mutex); | |
886 | smu->smu_baco.state = SMU_BACO_STATE_EXIT; | |
887 | smu->smu_baco.platform_support = false; | |
888 | ||
95f71bfa | 889 | mutex_init(&smu->sensor_lock); |
fa7df751 | 890 | mutex_init(&smu->metrics_lock); |
eb696d04 | 891 | mutex_init(&smu->message_lock); |
95f71bfa | 892 | |
2e069391 | 893 | smu->watermarks_bitmap = 0; |
16177fd0 CG |
894 | smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
895 | smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |
896 | ||
897 | smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; | |
898 | smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; | |
899 | smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; | |
900 | smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; | |
901 | smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; | |
902 | smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; | |
903 | smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; | |
904 | smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; | |
905 | ||
906 | smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |
907 | smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | |
908 | smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; | |
909 | smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; | |
910 | smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; | |
911 | smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; | |
912 | smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; | |
379a4454 | 913 | smu->display_config = &adev->pm.pm_display_cfg; |
0b51d993 | 914 | |
9a431038 CG |
915 | smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; |
916 | smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; | |
137d63ab HR |
917 | ret = smu_init_microcode(smu); |
918 | if (ret) { | |
919 | pr_err("Failed to load smu firmware!\n"); | |
920 | return ret; | |
921 | } | |
922 | ||
b5624000 HR |
923 | ret = smu_smc_table_sw_init(smu); |
924 | if (ret) { | |
925 | pr_err("Failed to sw init smc table!\n"); | |
926 | return ret; | |
927 | } | |
928 | ||
4d7fd9e2 EQ |
929 | ret = smu_register_irq_handler(smu); |
930 | if (ret) { | |
931 | pr_err("Failed to register smc irq handler!\n"); | |
932 | return ret; | |
933 | } | |
934 | ||
9015d60c AG |
935 | if (adev->smu.ppt_funcs->i2c_eeprom_init) { |
936 | ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); | |
937 | ||
938 | if (ret) | |
939 | return ret; | |
940 | } | |
941 | ||
137d63ab HR |
942 | return 0; |
943 | } | |
944 | ||
945 | static int smu_sw_fini(void *handle) | |
946 | { | |
947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
813ce279 KW |
948 | struct smu_context *smu = &adev->smu; |
949 | int ret; | |
137d63ab | 950 | |
9015d60c AG |
951 | if (adev->smu.ppt_funcs->i2c_eeprom_fini) |
952 | smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); | |
953 | ||
4d7fd9e2 EQ |
954 | kfree(smu->irq_source); |
955 | smu->irq_source = NULL; | |
956 | ||
813ce279 KW |
957 | ret = smu_smc_table_sw_fini(smu); |
958 | if (ret) { | |
959 | pr_err("Failed to sw fini smc table!\n"); | |
960 | return ret; | |
961 | } | |
962 | ||
8bf16963 KW |
963 | ret = smu_fini_power(smu); |
964 | if (ret) { | |
965 | pr_err("Failed to init smu_fini_power!\n"); | |
966 | return ret; | |
967 | } | |
968 | ||
137d63ab HR |
969 | return 0; |
970 | } | |
971 | ||
9c9a1747 HR |
972 | static int smu_init_fb_allocations(struct smu_context *smu) |
973 | { | |
f96357a9 KW |
974 | struct amdgpu_device *adev = smu->adev; |
975 | struct smu_table_context *smu_table = &smu->smu_table; | |
976 | struct smu_table *tables = smu_table->tables; | |
ce0d0ec3 EQ |
977 | struct smu_table *driver_table = &(smu_table->driver_table); |
978 | uint32_t max_table_size = 0; | |
ea6d8811 | 979 | int ret, i; |
f96357a9 | 980 | |
ce0d0ec3 EQ |
981 | /* VRAM allocation for tool table */ |
982 | if (tables[SMU_TABLE_PMSTATUSLOG].size) { | |
f96357a9 | 983 | ret = amdgpu_bo_create_kernel(adev, |
ce0d0ec3 EQ |
984 | tables[SMU_TABLE_PMSTATUSLOG].size, |
985 | tables[SMU_TABLE_PMSTATUSLOG].align, | |
986 | tables[SMU_TABLE_PMSTATUSLOG].domain, | |
987 | &tables[SMU_TABLE_PMSTATUSLOG].bo, | |
988 | &tables[SMU_TABLE_PMSTATUSLOG].mc_address, | |
989 | &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); | |
990 | if (ret) { | |
991 | pr_err("VRAM allocation for tool table failed!\n"); | |
992 | return ret; | |
993 | } | |
f96357a9 KW |
994 | } |
995 | ||
ce0d0ec3 EQ |
996 | /* VRAM allocation for driver table */ |
997 | for (i = 0; i < SMU_TABLE_COUNT; i++) { | |
f96357a9 KW |
998 | if (tables[i].size == 0) |
999 | continue; | |
f96357a9 | 1000 | |
ce0d0ec3 EQ |
1001 | if (i == SMU_TABLE_PMSTATUSLOG) |
1002 | continue; | |
1003 | ||
1004 | if (max_table_size < tables[i].size) | |
1005 | max_table_size = tables[i].size; | |
f96357a9 | 1006 | } |
ce0d0ec3 EQ |
1007 | |
1008 | driver_table->size = max_table_size; | |
1009 | driver_table->align = PAGE_SIZE; | |
1010 | driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; | |
1011 | ||
1012 | ret = amdgpu_bo_create_kernel(adev, | |
1013 | driver_table->size, | |
1014 | driver_table->align, | |
1015 | driver_table->domain, | |
1016 | &driver_table->bo, | |
1017 | &driver_table->mc_address, | |
1018 | &driver_table->cpu_addr); | |
1019 | if (ret) { | |
1020 | pr_err("VRAM allocation for driver table failed!\n"); | |
1021 | if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) | |
1022 | amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, | |
1023 | &tables[SMU_TABLE_PMSTATUSLOG].mc_address, | |
1024 | &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); | |
1025 | } | |
1026 | ||
f96357a9 | 1027 | return ret; |
9c9a1747 HR |
1028 | } |
1029 | ||
f96357a9 KW |
1030 | static int smu_fini_fb_allocations(struct smu_context *smu) |
1031 | { | |
1032 | struct smu_table_context *smu_table = &smu->smu_table; | |
1033 | struct smu_table *tables = smu_table->tables; | |
ce0d0ec3 | 1034 | struct smu_table *driver_table = &(smu_table->driver_table); |
f96357a9 | 1035 | |
871e5e72 | 1036 | if (!tables) |
289921b0 | 1037 | return 0; |
f96357a9 | 1038 | |
ce0d0ec3 EQ |
1039 | if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) |
1040 | amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, | |
1041 | &tables[SMU_TABLE_PMSTATUSLOG].mc_address, | |
1042 | &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); | |
1043 | ||
1044 | amdgpu_bo_free_kernel(&driver_table->bo, | |
1045 | &driver_table->mc_address, | |
1046 | &driver_table->cpu_addr); | |
f96357a9 KW |
1047 | |
1048 | return 0; | |
1049 | } | |
f6a6b952 | 1050 | |
4733cc72 LG |
1051 | static int smu_smc_table_hw_init(struct smu_context *smu, |
1052 | bool initialize) | |
05cadcd3 | 1053 | { |
f067499b | 1054 | struct amdgpu_device *adev = smu->adev; |
05cadcd3 HR |
1055 | int ret; |
1056 | ||
f067499b LG |
1057 | if (smu_is_dpm_running(smu) && adev->in_suspend) { |
1058 | pr_info("dpm has been enabled\n"); | |
1059 | return 0; | |
1060 | } | |
1061 | ||
22e18317 EQ |
1062 | if (adev->asic_type != CHIP_ARCTURUS) { |
1063 | ret = smu_init_display_count(smu, 0); | |
1064 | if (ret) | |
1065 | return ret; | |
1066 | } | |
56c53ad6 | 1067 | |
4733cc72 | 1068 | if (initialize) { |
4009b9b5 HR |
1069 | /* get boot_values from vbios to set revision, gfxclk, and etc. */ |
1070 | ret = smu_get_vbios_bootup_values(smu); | |
4733cc72 LG |
1071 | if (ret) |
1072 | return ret; | |
05cadcd3 | 1073 | |
b55c83a7 | 1074 | ret = smu_setup_pptable(smu); |
4733cc72 LG |
1075 | if (ret) |
1076 | return ret; | |
a6b35900 | 1077 | |
309bce0c EQ |
1078 | ret = smu_get_clk_info_from_vbios(smu); |
1079 | if (ret) | |
1080 | return ret; | |
1081 | ||
4733cc72 LG |
1082 | /* |
1083 | * check if the format_revision in vbios is up to pptable header | |
1084 | * version, and the structure size is not 0. | |
1085 | */ | |
4733cc72 LG |
1086 | ret = smu_check_pptable(smu); |
1087 | if (ret) | |
1088 | return ret; | |
46126e6d | 1089 | |
4733cc72 LG |
1090 | /* |
1091 | * allocate vram bos to store smc table contents. | |
1092 | */ | |
1093 | ret = smu_init_fb_allocations(smu); | |
1094 | if (ret) | |
1095 | return ret; | |
9c9a1747 | 1096 | |
4733cc72 LG |
1097 | /* |
1098 | * Parse pptable format and fill PPTable_t smc_pptable to | |
1099 | * smu_table_context structure. And read the smc_dpm_table from vbios, | |
1100 | * then fill it into smc_pptable. | |
1101 | */ | |
1102 | ret = smu_parse_pptable(smu); | |
1103 | if (ret) | |
1104 | return ret; | |
9e4848a4 | 1105 | |
4733cc72 LG |
1106 | /* |
1107 | * Send msg GetDriverIfVersion to check if the return value is equal | |
1108 | * with DRIVER_IF_VERSION of smc header. | |
1109 | */ | |
1110 | ret = smu_check_fw_version(smu); | |
1111 | if (ret) | |
1112 | return ret; | |
1113 | } | |
a751b095 | 1114 | |
4bcbc25c JZ |
1115 | ret = smu_set_driver_table_location(smu); |
1116 | if (ret) | |
1117 | return ret; | |
1118 | ||
7c8bcaf4 | 1119 | /* smu_dump_pptable(smu); */ |
c2a801af JZ |
1120 | if (!amdgpu_sriov_vf(adev)) { |
1121 | /* | |
1122 | * Copy pptable bo in the vram to smc with SMU MSGs such as | |
1123 | * SetDriverDramAddr and TransferTableDram2Smu. | |
1124 | */ | |
1125 | ret = smu_write_pptable(smu); | |
1126 | if (ret) | |
1127 | return ret; | |
7c8bcaf4 | 1128 | |
c2a801af JZ |
1129 | /* issue Run*Btc msg */ |
1130 | ret = smu_run_btc(smu); | |
1131 | if (ret) | |
1132 | return ret; | |
1133 | ret = smu_feature_set_allowed_mask(smu); | |
1134 | if (ret) | |
1135 | return ret; | |
6b816d73 | 1136 | |
c2a801af JZ |
1137 | ret = smu_system_features_control(smu, true); |
1138 | if (ret) | |
1139 | return ret; | |
1cf8c930 EQ |
1140 | |
1141 | if (adev->asic_type == CHIP_NAVI10) { | |
1142 | if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 || | |
1143 | adev->pdev->revision == 0xc3 || | |
1144 | adev->pdev->revision == 0xca || | |
1145 | adev->pdev->revision == 0xcb)) || | |
1146 | (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 || | |
1147 | adev->pdev->revision == 0xf4 || | |
1148 | adev->pdev->revision == 0xf5 || | |
1149 | adev->pdev->revision == 0xf6))) { | |
1150 | ret = smu_disable_umc_cdr_12gbps_workaround(smu); | |
1151 | if (ret) { | |
1152 | pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); | |
1153 | return ret; | |
1154 | } | |
1155 | } | |
1156 | } | |
66c2f5db | 1157 | |
75610fdd | 1158 | if (smu->ppt_funcs->set_power_source) { |
66c2f5db EQ |
1159 | /* |
1160 | * For Navi1X, manually switch it to AC mode as PMFW | |
1161 | * may boot it with DC mode. | |
66c2f5db | 1162 | */ |
75610fdd AD |
1163 | if (adev->pm.ac_power) |
1164 | ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC); | |
1165 | else | |
1166 | ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC); | |
66c2f5db | 1167 | if (ret) { |
75610fdd | 1168 | pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC"); |
66c2f5db EQ |
1169 | return ret; |
1170 | } | |
1171 | } | |
c2a801af | 1172 | } |
22e18317 EQ |
1173 | if (adev->asic_type != CHIP_ARCTURUS) { |
1174 | ret = smu_notify_display_change(smu); | |
1175 | if (ret) | |
1176 | return ret; | |
e1c6f86a | 1177 | |
22e18317 EQ |
1178 | /* |
1179 | * Set min deep sleep dce fclk with bootup value from vbios via | |
1180 | * SetMinDeepSleepDcefclk MSG. | |
1181 | */ | |
1182 | ret = smu_set_min_dcef_deep_sleep(smu); | |
1183 | if (ret) | |
1184 | return ret; | |
1185 | } | |
a7ebb6d2 | 1186 | |
d6a4aa82 LG |
1187 | /* |
1188 | * Set initialized values (get from vbios) to dpm tables context such as | |
1189 | * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each | |
1190 | * type of clks. | |
1191 | */ | |
4733cc72 | 1192 | if (initialize) { |
723d4735 | 1193 | ret = smu_populate_smc_tables(smu); |
4733cc72 LG |
1194 | if (ret) |
1195 | return ret; | |
d6a4aa82 | 1196 | |
4733cc72 LG |
1197 | ret = smu_init_max_sustainable_clocks(smu); |
1198 | if (ret) | |
1199 | return ret; | |
1200 | } | |
7457cf02 | 1201 | |
fddbfb1c KF |
1202 | if (adev->asic_type != CHIP_ARCTURUS) { |
1203 | ret = smu_override_pcie_parameters(smu); | |
1204 | if (ret) | |
1205 | return ret; | |
1206 | } | |
1207 | ||
8f30a16d | 1208 | ret = smu_set_default_od_settings(smu, initialize); |
2c80abe3 LG |
1209 | if (ret) |
1210 | return ret; | |
1211 | ||
4733cc72 LG |
1212 | if (initialize) { |
1213 | ret = smu_populate_umd_state_clk(smu); | |
1214 | if (ret) | |
1215 | return ret; | |
133438fa | 1216 | |
73abde4d | 1217 | ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false); |
4733cc72 LG |
1218 | if (ret) |
1219 | return ret; | |
1220 | } | |
e66adb1e | 1221 | |
206bc589 HR |
1222 | /* |
1223 | * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. | |
1224 | */ | |
c2a801af JZ |
1225 | if (!amdgpu_sriov_vf(adev)) { |
1226 | ret = smu_set_tool_table_location(smu); | |
1227 | } | |
a254bfa2 CG |
1228 | if (!smu_is_dpm_running(smu)) |
1229 | pr_info("dpm has been disabled\n"); | |
1230 | ||
206bc589 | 1231 | return ret; |
05cadcd3 HR |
1232 | } |
1233 | ||
e65d45f2 HR |
1234 | /** |
1235 | * smu_alloc_memory_pool - allocate memory pool in the system memory | |
1236 | * | |
1237 | * @smu: amdgpu_device pointer | |
1238 | * | |
1239 | * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr | |
1240 | * and DramLogSetDramAddr can notify it changed. | |
1241 | * | |
1242 | * Returns 0 on success, error on failure. | |
1243 | */ | |
1244 | static int smu_alloc_memory_pool(struct smu_context *smu) | |
1245 | { | |
0b51d993 KW |
1246 | struct amdgpu_device *adev = smu->adev; |
1247 | struct smu_table_context *smu_table = &smu->smu_table; | |
1248 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
1249 | uint64_t pool_size = smu->pool_size; | |
1250 | int ret = 0; | |
1251 | ||
1252 | if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) | |
1253 | return ret; | |
1254 | ||
1255 | memory_pool->size = pool_size; | |
1256 | memory_pool->align = PAGE_SIZE; | |
1257 | memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; | |
1258 | ||
1259 | switch (pool_size) { | |
1260 | case SMU_MEMORY_POOL_SIZE_256_MB: | |
1261 | case SMU_MEMORY_POOL_SIZE_512_MB: | |
1262 | case SMU_MEMORY_POOL_SIZE_1_GB: | |
1263 | case SMU_MEMORY_POOL_SIZE_2_GB: | |
1264 | ret = amdgpu_bo_create_kernel(adev, | |
1265 | memory_pool->size, | |
1266 | memory_pool->align, | |
1267 | memory_pool->domain, | |
1268 | &memory_pool->bo, | |
1269 | &memory_pool->mc_address, | |
1270 | &memory_pool->cpu_addr); | |
1271 | break; | |
1272 | default: | |
1273 | break; | |
1274 | } | |
1275 | ||
1276 | return ret; | |
e65d45f2 HR |
1277 | } |
1278 | ||
0b51d993 KW |
1279 | static int smu_free_memory_pool(struct smu_context *smu) |
1280 | { | |
1281 | struct smu_table_context *smu_table = &smu->smu_table; | |
1282 | struct smu_table *memory_pool = &smu_table->memory_pool; | |
0b51d993 KW |
1283 | |
1284 | if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) | |
c7d5dfa8 | 1285 | return 0; |
0b51d993 KW |
1286 | |
1287 | amdgpu_bo_free_kernel(&memory_pool->bo, | |
1288 | &memory_pool->mc_address, | |
1289 | &memory_pool->cpu_addr); | |
1290 | ||
1291 | memset(memory_pool, 0, sizeof(struct smu_table)); | |
1292 | ||
c7d5dfa8 | 1293 | return 0; |
0b51d993 | 1294 | } |
4733cc72 | 1295 | |
f7e3a577 | 1296 | static int smu_start_smc_engine(struct smu_context *smu) |
137d63ab | 1297 | { |
f7e3a577 EQ |
1298 | struct amdgpu_device *adev = smu->adev; |
1299 | int ret = 0; | |
137d63ab | 1300 | |
22e18317 EQ |
1301 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
1302 | if (adev->asic_type < CHIP_NAVI10) { | |
6c45e480 EQ |
1303 | if (smu->ppt_funcs->load_microcode) { |
1304 | ret = smu->ppt_funcs->load_microcode(smu); | |
3697b339 EQ |
1305 | if (ret) |
1306 | return ret; | |
1307 | } | |
0186eb96 | 1308 | } |
e11c4fd5 HR |
1309 | } |
1310 | ||
6c45e480 EQ |
1311 | if (smu->ppt_funcs->check_fw_status) { |
1312 | ret = smu->ppt_funcs->check_fw_status(smu); | |
3697b339 EQ |
1313 | if (ret) |
1314 | pr_err("SMC is not ready\n"); | |
1315 | } | |
f7e3a577 EQ |
1316 | |
1317 | return ret; | |
1318 | } | |
1319 | ||
1320 | static int smu_hw_init(void *handle) | |
1321 | { | |
1322 | int ret; | |
1323 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1324 | struct smu_context *smu = &adev->smu; | |
1325 | ||
1326 | ret = smu_start_smc_engine(smu); | |
22e18317 | 1327 | if (ret) { |
f7e3a577 | 1328 | pr_err("SMU is not ready yet!\n"); |
22e18317 EQ |
1329 | return ret; |
1330 | } | |
1331 | ||
d2f925ff | 1332 | if (smu->is_apu) { |
97222cfa | 1333 | smu_powergate_sdma(&adev->smu, false); |
4a629668 | 1334 | smu_powergate_vcn(&adev->smu, false); |
27f7ff32 | 1335 | smu_powergate_jpeg(&adev->smu, false); |
f8391101 | 1336 | smu_set_gfx_cgpg(&adev->smu, true); |
4a629668 | 1337 | } |
97222cfa | 1338 | |
c2a801af JZ |
1339 | if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) |
1340 | return 0; | |
1341 | ||
d3a593e9 EQ |
1342 | if (!smu->pm_enabled) |
1343 | return 0; | |
1344 | ||
6b816d73 KW |
1345 | ret = smu_feature_init_dpm(smu); |
1346 | if (ret) | |
1347 | goto failed; | |
1348 | ||
4733cc72 | 1349 | ret = smu_smc_table_hw_init(smu, true); |
05cadcd3 HR |
1350 | if (ret) |
1351 | goto failed; | |
137d63ab | 1352 | |
e65d45f2 HR |
1353 | ret = smu_alloc_memory_pool(smu); |
1354 | if (ret) | |
1355 | goto failed; | |
1356 | ||
c56de9e8 HR |
1357 | /* |
1358 | * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify | |
1359 | * pool location. | |
1360 | */ | |
1361 | ret = smu_notify_memory_pool_location(smu); | |
1362 | if (ret) | |
1363 | goto failed; | |
1364 | ||
74ba3553 LG |
1365 | ret = smu_start_thermal_control(smu); |
1366 | if (ret) | |
1367 | goto failed; | |
1368 | ||
a254bfa2 CG |
1369 | if (!smu->pm_enabled) |
1370 | adev->pm.dpm_enabled = false; | |
1371 | else | |
948f540c | 1372 | adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ |
a317cf03 | 1373 | |
137d63ab HR |
1374 | pr_info("SMU is initialized successfully!\n"); |
1375 | ||
1376 | return 0; | |
05cadcd3 HR |
1377 | |
1378 | failed: | |
05cadcd3 | 1379 | return ret; |
137d63ab HR |
1380 | } |
1381 | ||
faa695c7 EQ |
1382 | static int smu_stop_dpms(struct smu_context *smu) |
1383 | { | |
6a876844 | 1384 | return smu_system_features_control(smu, false); |
faa695c7 EQ |
1385 | } |
1386 | ||
137d63ab HR |
1387 | static int smu_hw_fini(void *handle) |
1388 | { | |
1389 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1390 | struct smu_context *smu = &adev->smu; | |
afba8282 | 1391 | struct smu_table_context *table_context = &smu->smu_table; |
f96357a9 | 1392 | int ret = 0; |
137d63ab | 1393 | |
c2a801af JZ |
1394 | if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) |
1395 | return 0; | |
1396 | ||
d2f925ff | 1397 | if (smu->is_apu) { |
97222cfa | 1398 | smu_powergate_sdma(&adev->smu, true); |
4a629668 | 1399 | smu_powergate_vcn(&adev->smu, true); |
27f7ff32 | 1400 | smu_powergate_jpeg(&adev->smu, true); |
4a629668 | 1401 | } |
97222cfa | 1402 | |
29a45960 EQ |
1403 | if (!smu->pm_enabled) |
1404 | return 0; | |
1405 | ||
c2a801af JZ |
1406 | if (!amdgpu_sriov_vf(adev)){ |
1407 | ret = smu_stop_thermal_control(smu); | |
c39f062e | 1408 | if (ret) { |
c2a801af | 1409 | pr_warn("Fail to stop thermal control!\n"); |
c39f062e EQ |
1410 | return ret; |
1411 | } | |
c2a801af JZ |
1412 | |
1413 | /* | |
1414 | * For custom pptable uploading, skip the DPM features | |
1415 | * disable process on Navi1x ASICs. | |
1416 | * - As the gfx related features are under control of | |
1417 | * RLC on those ASICs. RLC reinitialization will be | |
1418 | * needed to reenable them. That will cost much more | |
1419 | * efforts. | |
1420 | * | |
1421 | * - SMU firmware can handle the DPM reenablement | |
1422 | * properly. | |
1423 | */ | |
1424 | if (!smu->uploading_custom_pp_table || | |
1425 | !((adev->asic_type >= CHIP_NAVI10) && | |
1426 | (adev->asic_type <= CHIP_NAVI12))) { | |
1427 | ret = smu_stop_dpms(smu); | |
1428 | if (ret) { | |
1429 | pr_warn("Fail to stop Dpms!\n"); | |
1430 | return ret; | |
1431 | } | |
1432 | } | |
faa695c7 EQ |
1433 | } |
1434 | ||
6316f51c HR |
1435 | kfree(table_context->driver_pptable); |
1436 | table_context->driver_pptable = NULL; | |
afba8282 | 1437 | |
6316f51c HR |
1438 | kfree(table_context->max_sustainable_clocks); |
1439 | table_context->max_sustainable_clocks = NULL; | |
7457cf02 | 1440 | |
6316f51c HR |
1441 | kfree(table_context->overdrive_table); |
1442 | table_context->overdrive_table = NULL; | |
2c80abe3 | 1443 | |
f96357a9 KW |
1444 | ret = smu_fini_fb_allocations(smu); |
1445 | if (ret) | |
1446 | return ret; | |
1447 | ||
0b51d993 KW |
1448 | ret = smu_free_memory_pool(smu); |
1449 | if (ret) | |
1450 | return ret; | |
1451 | ||
137d63ab HR |
1452 | return 0; |
1453 | } | |
1454 | ||
289921b0 KW |
1455 | int smu_reset(struct smu_context *smu) |
1456 | { | |
1457 | struct amdgpu_device *adev = smu->adev; | |
1458 | int ret = 0; | |
1459 | ||
1460 | ret = smu_hw_fini(adev); | |
1461 | if (ret) | |
1462 | return ret; | |
1463 | ||
1464 | ret = smu_hw_init(adev); | |
1465 | if (ret) | |
1466 | return ret; | |
1467 | ||
1468 | return ret; | |
1469 | } | |
1470 | ||
5d8b936d | 1471 | static int smu_disable_dpm(struct smu_context *smu) |
4a6f8f01 EQ |
1472 | { |
1473 | struct amdgpu_device *adev = smu->adev; | |
1474 | uint32_t smu_version; | |
1475 | int ret = 0; | |
5d8b936d AD |
1476 | bool use_baco = !smu->is_apu && |
1477 | ((adev->in_gpu_reset && | |
1478 | (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || | |
b2b6290a | 1479 | ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); |
4a6f8f01 EQ |
1480 | |
1481 | ret = smu_get_smc_version(smu, NULL, &smu_version); | |
1482 | if (ret) { | |
1483 | pr_err("Failed to get smu version.\n"); | |
1484 | return ret; | |
1485 | } | |
1486 | ||
1487 | /* | |
8e025615 JC |
1488 | * Disable all enabled SMU features. |
1489 | * This should be handled in SMU FW, as a backup | |
1490 | * driver can issue call to SMU FW until sequence | |
1491 | * in SMU FW is operational. | |
4a6f8f01 | 1492 | */ |
4a6f8f01 EQ |
1493 | ret = smu_system_features_control(smu, false); |
1494 | if (ret) { | |
1495 | pr_err("Failed to disable smu features.\n"); | |
1496 | return ret; | |
1497 | } | |
1498 | ||
8e025615 JC |
1499 | /* |
1500 | * Arcturus does not have BACO bit in disable feature mask. | |
1501 | * Enablement of BACO bit on Arcturus should be skipped. | |
1502 | */ | |
1503 | if (adev->asic_type == CHIP_ARCTURUS) { | |
1504 | if (use_baco && (smu_version > 0x360e00)) | |
1505 | return 0; | |
1506 | } | |
1507 | ||
5d8b936d AD |
1508 | /* For baco, need to leave BACO feature enabled */ |
1509 | if (use_baco) { | |
c16904b0 EQ |
1510 | /* |
1511 | * Correct the way for checking whether SMU_FEATURE_BACO_BIT | |
1512 | * is supported. | |
1513 | * | |
1514 | * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will | |
1515 | * always return false as the 'smu_system_features_control(smu, false)' | |
1516 | * was just issued above which disabled all SMU features. | |
1517 | * | |
1518 | * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used | |
1519 | * now for the checking. | |
1520 | */ | |
1521 | if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) { | |
5d8b936d AD |
1522 | ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); |
1523 | if (ret) { | |
1524 | pr_warn("set BACO feature enabled failed, return %d\n", ret); | |
1525 | return ret; | |
1526 | } | |
4a6f8f01 EQ |
1527 | } |
1528 | } | |
1529 | ||
1530 | return ret; | |
1531 | } | |
1532 | ||
137d63ab HR |
1533 | static int smu_suspend(void *handle) |
1534 | { | |
1535 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
4733cc72 | 1536 | struct smu_context *smu = &adev->smu; |
4a6f8f01 | 1537 | int ret; |
068ad870 | 1538 | |
86b93fd6 JZ |
1539 | if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) |
1540 | return 0; | |
1541 | ||
29a45960 EQ |
1542 | if (!smu->pm_enabled) |
1543 | return 0; | |
1544 | ||
86b93fd6 | 1545 | if(!amdgpu_sriov_vf(adev)) { |
5d8b936d | 1546 | ret = smu_disable_dpm(smu); |
86b93fd6 | 1547 | if (ret) |
767acabd | 1548 | return ret; |
767acabd KW |
1549 | } |
1550 | ||
4733cc72 LG |
1551 | smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); |
1552 | ||
e17a512a JX |
1553 | if (adev->asic_type >= CHIP_NAVI10 && |
1554 | adev->gfx.rlc.funcs->stop) | |
1555 | adev->gfx.rlc.funcs->stop(adev); | |
f509be18 | 1556 | if (smu->is_apu) |
1557 | smu_set_gfx_cgpg(&adev->smu, false); | |
e17a512a | 1558 | |
137d63ab HR |
1559 | return 0; |
1560 | } | |
1561 | ||
1562 | static int smu_resume(void *handle) | |
1563 | { | |
1564 | int ret; | |
1565 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1566 | struct smu_context *smu = &adev->smu; | |
1567 | ||
895bd048 JZ |
1568 | if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) |
1569 | return 0; | |
1570 | ||
1571 | if (!smu->pm_enabled) | |
1572 | return 0; | |
1573 | ||
fad3ecf2 HR |
1574 | pr_info("SMU is resuming...\n"); |
1575 | ||
f7e3a577 EQ |
1576 | ret = smu_start_smc_engine(smu); |
1577 | if (ret) { | |
1578 | pr_err("SMU is not ready yet!\n"); | |
fa073f13 | 1579 | goto failed; |
f7e3a577 EQ |
1580 | } |
1581 | ||
4733cc72 | 1582 | ret = smu_smc_table_hw_init(smu, false); |
fad3ecf2 HR |
1583 | if (ret) |
1584 | goto failed; | |
1585 | ||
4733cc72 | 1586 | ret = smu_start_thermal_control(smu); |
fad3ecf2 HR |
1587 | if (ret) |
1588 | goto failed; | |
137d63ab | 1589 | |
f8391101 PL |
1590 | if (smu->is_apu) |
1591 | smu_set_gfx_cgpg(&adev->smu, true); | |
1592 | ||
5441dd0e KF |
1593 | smu->disable_uclk_switch = 0; |
1594 | ||
fad3ecf2 HR |
1595 | pr_info("SMU is resumed successfully!\n"); |
1596 | ||
137d63ab | 1597 | return 0; |
3697b339 | 1598 | |
fad3ecf2 | 1599 | failed: |
fad3ecf2 | 1600 | return ret; |
137d63ab HR |
1601 | } |
1602 | ||
94ed6d0c HR |
1603 | int smu_display_configuration_change(struct smu_context *smu, |
1604 | const struct amd_pp_display_configuration *display_config) | |
1605 | { | |
1606 | int index = 0; | |
1607 | int num_of_active_display = 0; | |
1608 | ||
a254bfa2 | 1609 | if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) |
94ed6d0c HR |
1610 | return -EINVAL; |
1611 | ||
1612 | if (!display_config) | |
1613 | return -EINVAL; | |
1614 | ||
1615 | mutex_lock(&smu->mutex); | |
1616 | ||
6c45e480 EQ |
1617 | if (smu->ppt_funcs->set_deep_sleep_dcefclk) |
1618 | smu->ppt_funcs->set_deep_sleep_dcefclk(smu, | |
3697b339 | 1619 | display_config->min_dcef_deep_sleep_set_clk / 100); |
94ed6d0c HR |
1620 | |
1621 | for (index = 0; index < display_config->num_path_including_non_display; index++) { | |
1622 | if (display_config->displays[index].controller_id != 0) | |
1623 | num_of_active_display++; | |
1624 | } | |
1625 | ||
1626 | smu_set_active_display_count(smu, num_of_active_display); | |
1627 | ||
1628 | smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time, | |
1629 | display_config->cpu_cc6_disable, | |
1630 | display_config->cpu_pstate_disable, | |
1631 | display_config->nb_pstate_switch_disable); | |
1632 | ||
1633 | mutex_unlock(&smu->mutex); | |
1634 | ||
1635 | return 0; | |
1636 | } | |
1637 | ||
5e2d3881 HR |
1638 | static int smu_get_clock_info(struct smu_context *smu, |
1639 | struct smu_clock_info *clk_info, | |
1640 | enum smu_perf_level_designation designation) | |
1641 | { | |
1642 | int ret; | |
1643 | struct smu_performance_level level = {0}; | |
1644 | ||
1645 | if (!clk_info) | |
1646 | return -EINVAL; | |
1647 | ||
1648 | ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level); | |
1649 | if (ret) | |
1650 | return -EINVAL; | |
1651 | ||
1652 | clk_info->min_mem_clk = level.memory_clock; | |
1653 | clk_info->min_eng_clk = level.core_clock; | |
1654 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |
1655 | ||
1656 | ret = smu_get_perf_level(smu, designation, &level); | |
1657 | if (ret) | |
1658 | return -EINVAL; | |
1659 | ||
1660 | clk_info->min_mem_clk = level.memory_clock; | |
1661 | clk_info->min_eng_clk = level.core_clock; | |
1662 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |
1663 | ||
1664 | return 0; | |
1665 | } | |
1666 | ||
1667 | int smu_get_current_clocks(struct smu_context *smu, | |
1668 | struct amd_pp_clock_info *clocks) | |
1669 | { | |
1670 | struct amd_pp_simple_clock_info simple_clocks = {0}; | |
1671 | struct smu_clock_info hw_clocks; | |
1672 | int ret = 0; | |
1673 | ||
1674 | if (!is_support_sw_smu(smu->adev)) | |
1675 | return -EINVAL; | |
1676 | ||
1677 | mutex_lock(&smu->mutex); | |
1678 | ||
1679 | smu_get_dal_power_level(smu, &simple_clocks); | |
1680 | ||
1681 | if (smu->support_power_containment) | |
1682 | ret = smu_get_clock_info(smu, &hw_clocks, | |
1683 | PERF_LEVEL_POWER_CONTAINMENT); | |
1684 | else | |
1685 | ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); | |
1686 | ||
1687 | if (ret) { | |
1688 | pr_err("Error in smu_get_clock_info\n"); | |
1689 | goto failed; | |
1690 | } | |
1691 | ||
1692 | clocks->min_engine_clock = hw_clocks.min_eng_clk; | |
1693 | clocks->max_engine_clock = hw_clocks.max_eng_clk; | |
1694 | clocks->min_memory_clock = hw_clocks.min_mem_clk; | |
1695 | clocks->max_memory_clock = hw_clocks.max_mem_clk; | |
1696 | clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; | |
1697 | clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; | |
1698 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |
1699 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |
1700 | ||
1701 | if (simple_clocks.level == 0) | |
1702 | clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; | |
1703 | else | |
1704 | clocks->max_clocks_state = simple_clocks.level; | |
1705 | ||
1706 | if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) { | |
1707 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |
1708 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |
1709 | } | |
1710 | ||
1711 | failed: | |
1712 | mutex_unlock(&smu->mutex); | |
1713 | return ret; | |
1714 | } | |
1715 | ||
137d63ab HR |
1716 | static int smu_set_clockgating_state(void *handle, |
1717 | enum amd_clockgating_state state) | |
1718 | { | |
1719 | return 0; | |
1720 | } | |
1721 | ||
1722 | static int smu_set_powergating_state(void *handle, | |
1723 | enum amd_powergating_state state) | |
1724 | { | |
1725 | return 0; | |
1726 | } | |
1727 | ||
49d27e91 CG |
1728 | static int smu_enable_umd_pstate(void *handle, |
1729 | enum amd_dpm_forced_level *level) | |
1730 | { | |
1731 | uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | |
1732 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | |
1733 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | | |
1734 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | |
1735 | ||
1736 | struct smu_context *smu = (struct smu_context*)(handle); | |
1737 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
af1ec44f PL |
1738 | |
1739 | if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) | |
49d27e91 CG |
1740 | return -EINVAL; |
1741 | ||
1742 | if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { | |
1743 | /* enter umd pstate, save current level, disable gfx cg*/ | |
1744 | if (*level & profile_mode_mask) { | |
1745 | smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; | |
1746 | smu_dpm_ctx->enable_umd_pstate = true; | |
49d27e91 CG |
1747 | amdgpu_device_ip_set_powergating_state(smu->adev, |
1748 | AMD_IP_BLOCK_TYPE_GFX, | |
1749 | AMD_PG_STATE_UNGATE); | |
f4fcfa42 EQ |
1750 | amdgpu_device_ip_set_clockgating_state(smu->adev, |
1751 | AMD_IP_BLOCK_TYPE_GFX, | |
1752 | AMD_CG_STATE_UNGATE); | |
49d27e91 CG |
1753 | } |
1754 | } else { | |
1755 | /* exit umd pstate, restore level, enable gfx cg*/ | |
1756 | if (!(*level & profile_mode_mask)) { | |
1757 | if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) | |
1758 | *level = smu_dpm_ctx->saved_dpm_level; | |
1759 | smu_dpm_ctx->enable_umd_pstate = false; | |
1760 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |
1761 | AMD_IP_BLOCK_TYPE_GFX, | |
1762 | AMD_CG_STATE_GATE); | |
1763 | amdgpu_device_ip_set_powergating_state(smu->adev, | |
1764 | AMD_IP_BLOCK_TYPE_GFX, | |
1765 | AMD_PG_STATE_GATE); | |
1766 | } | |
1767 | } | |
1768 | ||
1769 | return 0; | |
1770 | } | |
1771 | ||
bc0fcffd LG |
1772 | int smu_adjust_power_state_dynamic(struct smu_context *smu, |
1773 | enum amd_dpm_forced_level level, | |
1774 | bool skip_display_settings) | |
1775 | { | |
1776 | int ret = 0; | |
1777 | int index = 0; | |
bc0fcffd LG |
1778 | long workload; |
1779 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1780 | ||
a254bfa2 CG |
1781 | if (!smu->pm_enabled) |
1782 | return -EINVAL; | |
780f3a9c | 1783 | |
bc0fcffd LG |
1784 | if (!skip_display_settings) { |
1785 | ret = smu_display_config_changed(smu); | |
1786 | if (ret) { | |
1787 | pr_err("Failed to change display config!"); | |
1788 | return ret; | |
1789 | } | |
1790 | } | |
1791 | ||
1792 | ret = smu_apply_clocks_adjust_rules(smu); | |
1793 | if (ret) { | |
1794 | pr_err("Failed to apply clocks adjust rules!"); | |
1795 | return ret; | |
1796 | } | |
1797 | ||
1798 | if (!skip_display_settings) { | |
19796597 | 1799 | ret = smu_notify_smc_display_config(smu); |
bc0fcffd LG |
1800 | if (ret) { |
1801 | pr_err("Failed to notify smc display config!"); | |
1802 | return ret; | |
1803 | } | |
1804 | } | |
1805 | ||
1806 | if (smu_dpm_ctx->dpm_level != level) { | |
ebf8fc31 KW |
1807 | ret = smu_asic_set_performance_level(smu, level); |
1808 | if (ret) { | |
337443d0 AD |
1809 | pr_err("Failed to set performance level!"); |
1810 | return ret; | |
bc0fcffd | 1811 | } |
780f3a9c EQ |
1812 | |
1813 | /* update the saved copy */ | |
1814 | smu_dpm_ctx->dpm_level = level; | |
bc0fcffd LG |
1815 | } |
1816 | ||
1817 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { | |
1818 | index = fls(smu->workload_mask); | |
1819 | index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; | |
1820 | workload = smu->workload_setting[index]; | |
1821 | ||
1822 | if (smu->power_profile_mode != workload) | |
3697b339 | 1823 | smu_set_power_profile_mode(smu, &workload, 0, false); |
bc0fcffd LG |
1824 | } |
1825 | ||
1826 | return ret; | |
1827 | } | |
1828 | ||
1829 | int smu_handle_task(struct smu_context *smu, | |
1830 | enum amd_dpm_forced_level level, | |
3697b339 EQ |
1831 | enum amd_pp_task task_id, |
1832 | bool lock_needed) | |
bc0fcffd LG |
1833 | { |
1834 | int ret = 0; | |
1835 | ||
3697b339 EQ |
1836 | if (lock_needed) |
1837 | mutex_lock(&smu->mutex); | |
1838 | ||
bc0fcffd LG |
1839 | switch (task_id) { |
1840 | case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: | |
1841 | ret = smu_pre_display_config_changed(smu); | |
1842 | if (ret) | |
3697b339 | 1843 | goto out; |
bc0fcffd LG |
1844 | ret = smu_set_cpu_power_state(smu); |
1845 | if (ret) | |
3697b339 | 1846 | goto out; |
bc0fcffd LG |
1847 | ret = smu_adjust_power_state_dynamic(smu, level, false); |
1848 | break; | |
1849 | case AMD_PP_TASK_COMPLETE_INIT: | |
1850 | case AMD_PP_TASK_READJUST_POWER_STATE: | |
1851 | ret = smu_adjust_power_state_dynamic(smu, level, true); | |
1852 | break; | |
1853 | default: | |
1854 | break; | |
1855 | } | |
1856 | ||
3697b339 EQ |
1857 | out: |
1858 | if (lock_needed) | |
1859 | mutex_unlock(&smu->mutex); | |
1860 | ||
bc0fcffd LG |
1861 | return ret; |
1862 | } | |
1863 | ||
4abc1765 EQ |
1864 | int smu_switch_power_profile(struct smu_context *smu, |
1865 | enum PP_SMC_POWER_PROFILE type, | |
1866 | bool en) | |
1867 | { | |
1868 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1869 | long workload; | |
1870 | uint32_t index; | |
1871 | ||
1872 | if (!smu->pm_enabled) | |
1873 | return -EINVAL; | |
1874 | ||
1875 | if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) | |
1876 | return -EINVAL; | |
1877 | ||
1878 | mutex_lock(&smu->mutex); | |
1879 | ||
1880 | if (!en) { | |
1881 | smu->workload_mask &= ~(1 << smu->workload_prority[type]); | |
1882 | index = fls(smu->workload_mask); | |
1883 | index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; | |
1884 | workload = smu->workload_setting[index]; | |
1885 | } else { | |
1886 | smu->workload_mask |= (1 << smu->workload_prority[type]); | |
1887 | index = fls(smu->workload_mask); | |
1888 | index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; | |
1889 | workload = smu->workload_setting[index]; | |
1890 | } | |
1891 | ||
1892 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) | |
3697b339 | 1893 | smu_set_power_profile_mode(smu, &workload, 0, false); |
4abc1765 EQ |
1894 | |
1895 | mutex_unlock(&smu->mutex); | |
1896 | ||
1897 | return 0; | |
1898 | } | |
1899 | ||
a38470f0 KW |
1900 | enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) |
1901 | { | |
1902 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
8e33376b | 1903 | enum amd_dpm_forced_level level; |
a38470f0 | 1904 | |
af1ec44f | 1905 | if (!smu->is_apu && !smu_dpm_ctx->dpm_context) |
a38470f0 KW |
1906 | return -EINVAL; |
1907 | ||
1908 | mutex_lock(&(smu->mutex)); | |
8e33376b | 1909 | level = smu_dpm_ctx->dpm_level; |
a38470f0 KW |
1910 | mutex_unlock(&(smu->mutex)); |
1911 | ||
8e33376b | 1912 | return level; |
a38470f0 KW |
1913 | } |
1914 | ||
1915 | int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) | |
1916 | { | |
a38470f0 | 1917 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); |
780f3a9c | 1918 | int ret = 0; |
a38470f0 | 1919 | |
af1ec44f | 1920 | if (!smu->is_apu && !smu_dpm_ctx->dpm_context) |
a38470f0 KW |
1921 | return -EINVAL; |
1922 | ||
3697b339 EQ |
1923 | mutex_lock(&smu->mutex); |
1924 | ||
780f3a9c | 1925 | ret = smu_enable_umd_pstate(smu, &level); |
3697b339 EQ |
1926 | if (ret) { |
1927 | mutex_unlock(&smu->mutex); | |
6f6a7bba | 1928 | return ret; |
3697b339 | 1929 | } |
a38470f0 | 1930 | |
780f3a9c | 1931 | ret = smu_handle_task(smu, level, |
3697b339 EQ |
1932 | AMD_PP_TASK_READJUST_POWER_STATE, |
1933 | false); | |
1934 | ||
1935 | mutex_unlock(&smu->mutex); | |
a38470f0 KW |
1936 | |
1937 | return ret; | |
1938 | } | |
1939 | ||
2e13c755 | 1940 | int smu_set_display_count(struct smu_context *smu, uint32_t count) |
1941 | { | |
1942 | int ret = 0; | |
1943 | ||
1944 | mutex_lock(&smu->mutex); | |
1945 | ret = smu_init_display_count(smu, count); | |
1946 | mutex_unlock(&smu->mutex); | |
1947 | ||
1948 | return ret; | |
1949 | } | |
1950 | ||
f78c47f6 EQ |
1951 | int smu_force_clk_levels(struct smu_context *smu, |
1952 | enum smu_clk_type clk_type, | |
3697b339 EQ |
1953 | uint32_t mask, |
1954 | bool lock_needed) | |
f78c47f6 EQ |
1955 | { |
1956 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |
1957 | int ret = 0; | |
1958 | ||
1959 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { | |
1960 | pr_debug("force clock level is for dpm manual mode only.\n"); | |
1961 | return -EINVAL; | |
1962 | } | |
1963 | ||
3697b339 EQ |
1964 | if (lock_needed) |
1965 | mutex_lock(&smu->mutex); | |
1966 | ||
f78c47f6 EQ |
1967 | if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) |
1968 | ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); | |
1969 | ||
3697b339 EQ |
1970 | if (lock_needed) |
1971 | mutex_unlock(&smu->mutex); | |
1972 | ||
f78c47f6 EQ |
1973 | return ret; |
1974 | } | |
1975 | ||
0e0b89c0 EQ |
1976 | int smu_set_mp1_state(struct smu_context *smu, |
1977 | enum pp_mp1_state mp1_state) | |
1978 | { | |
1979 | uint16_t msg; | |
1980 | int ret; | |
1981 | ||
1982 | /* | |
1983 | * The SMC is not fully ready. That may be | |
1984 | * expected as the IP may be masked. | |
1985 | * So, just return without error. | |
1986 | */ | |
1987 | if (!smu->pm_enabled) | |
1988 | return 0; | |
1989 | ||
3697b339 EQ |
1990 | mutex_lock(&smu->mutex); |
1991 | ||
0e0b89c0 EQ |
1992 | switch (mp1_state) { |
1993 | case PP_MP1_STATE_SHUTDOWN: | |
1994 | msg = SMU_MSG_PrepareMp1ForShutdown; | |
1995 | break; | |
1996 | case PP_MP1_STATE_UNLOAD: | |
1997 | msg = SMU_MSG_PrepareMp1ForUnload; | |
1998 | break; | |
1999 | case PP_MP1_STATE_RESET: | |
2000 | msg = SMU_MSG_PrepareMp1ForReset; | |
2001 | break; | |
2002 | case PP_MP1_STATE_NONE: | |
2003 | default: | |
3697b339 | 2004 | mutex_unlock(&smu->mutex); |
0e0b89c0 EQ |
2005 | return 0; |
2006 | } | |
2007 | ||
2008 | /* some asics may not support those messages */ | |
3697b339 EQ |
2009 | if (smu_msg_get_index(smu, msg) < 0) { |
2010 | mutex_unlock(&smu->mutex); | |
0e0b89c0 | 2011 | return 0; |
3697b339 | 2012 | } |
0e0b89c0 | 2013 | |
1c58267c | 2014 | ret = smu_send_smc_msg(smu, msg, NULL); |
0e0b89c0 EQ |
2015 | if (ret) |
2016 | pr_err("[PrepareMp1] Failed!\n"); | |
2017 | ||
3697b339 EQ |
2018 | mutex_unlock(&smu->mutex); |
2019 | ||
0e0b89c0 EQ |
2020 | return ret; |
2021 | } | |
2022 | ||
7e899409 EQ |
2023 | int smu_set_df_cstate(struct smu_context *smu, |
2024 | enum pp_df_cstate state) | |
2025 | { | |
2026 | int ret = 0; | |
2027 | ||
2028 | /* | |
2029 | * The SMC is not fully ready. That may be | |
2030 | * expected as the IP may be masked. | |
2031 | * So, just return without error. | |
2032 | */ | |
2033 | if (!smu->pm_enabled) | |
2034 | return 0; | |
2035 | ||
2036 | if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) | |
2037 | return 0; | |
2038 | ||
3697b339 EQ |
2039 | mutex_lock(&smu->mutex); |
2040 | ||
7e899409 EQ |
2041 | ret = smu->ppt_funcs->set_df_cstate(smu, state); |
2042 | if (ret) | |
2043 | pr_err("[SetDfCstate] failed!\n"); | |
2044 | ||
3697b339 EQ |
2045 | mutex_unlock(&smu->mutex); |
2046 | ||
7e899409 EQ |
2047 | return ret; |
2048 | } | |
2049 | ||
7bbdbe40 HW |
2050 | int smu_write_watermarks_table(struct smu_context *smu) |
2051 | { | |
9fa1ed5b | 2052 | void *watermarks_table = smu->smu_table.watermarks_table; |
7bbdbe40 | 2053 | |
9fa1ed5b | 2054 | if (!watermarks_table) |
7bbdbe40 HW |
2055 | return -EINVAL; |
2056 | ||
9fa1ed5b EQ |
2057 | return smu_update_table(smu, |
2058 | SMU_TABLE_WATERMARKS, | |
2059 | 0, | |
2060 | watermarks_table, | |
7bbdbe40 | 2061 | true); |
7bbdbe40 HW |
2062 | } |
2063 | ||
2064 | int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, | |
2065 | struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) | |
2066 | { | |
9fa1ed5b | 2067 | void *table = smu->smu_table.watermarks_table; |
e78adc5a | 2068 | |
9fa1ed5b EQ |
2069 | if (!table) |
2070 | return -EINVAL; | |
7bbdbe40 | 2071 | |
3697b339 EQ |
2072 | mutex_lock(&smu->mutex); |
2073 | ||
7bbdbe40 HW |
2074 | if (!smu->disable_watermark && |
2075 | smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && | |
2076 | smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { | |
2077 | smu_set_watermarks_table(smu, table, clock_ranges); | |
2622e2ae HW |
2078 | |
2079 | if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) { | |
2080 | smu->watermarks_bitmap |= WATERMARKS_EXIST; | |
2081 | smu->watermarks_bitmap &= ~WATERMARKS_LOADED; | |
2082 | } | |
7bbdbe40 HW |
2083 | } |
2084 | ||
3697b339 EQ |
2085 | mutex_unlock(&smu->mutex); |
2086 | ||
c7d5dfa8 | 2087 | return 0; |
7bbdbe40 HW |
2088 | } |
2089 | ||
9644bf5f AD |
2090 | int smu_set_ac_dc(struct smu_context *smu) |
2091 | { | |
2092 | int ret = 0; | |
2093 | ||
2094 | /* controlled by firmware */ | |
2095 | if (smu->dc_controlled_by_gpio) | |
2096 | return 0; | |
2097 | ||
2098 | mutex_lock(&smu->mutex); | |
2099 | if (smu->ppt_funcs->set_power_source) { | |
2100 | if (smu->adev->pm.ac_power) | |
2101 | ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC); | |
2102 | else | |
2103 | ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC); | |
2104 | if (ret) | |
2105 | pr_err("Failed to switch to %s mode!\n", | |
2106 | smu->adev->pm.ac_power ? "AC" : "DC"); | |
2107 | } | |
2108 | mutex_unlock(&smu->mutex); | |
2109 | ||
2110 | return ret; | |
2111 | } | |
2112 | ||
137d63ab HR |
2113 | const struct amd_ip_funcs smu_ip_funcs = { |
2114 | .name = "smu", | |
2115 | .early_init = smu_early_init, | |
bee71d26 | 2116 | .late_init = smu_late_init, |
137d63ab HR |
2117 | .sw_init = smu_sw_init, |
2118 | .sw_fini = smu_sw_fini, | |
2119 | .hw_init = smu_hw_init, | |
2120 | .hw_fini = smu_hw_fini, | |
2121 | .suspend = smu_suspend, | |
2122 | .resume = smu_resume, | |
2123 | .is_idle = NULL, | |
2124 | .check_soft_reset = NULL, | |
2125 | .wait_for_idle = NULL, | |
2126 | .soft_reset = NULL, | |
2127 | .set_clockgating_state = smu_set_clockgating_state, | |
2128 | .set_powergating_state = smu_set_powergating_state, | |
49d27e91 | 2129 | .enable_umd_pstate = smu_enable_umd_pstate, |
137d63ab | 2130 | }; |
07845526 HR |
2131 | |
2132 | const struct amdgpu_ip_block_version smu_v11_0_ip_block = | |
2133 | { | |
2134 | .type = AMD_IP_BLOCK_TYPE_SMC, | |
2135 | .major = 11, | |
2136 | .minor = 0, | |
2137 | .rev = 0, | |
2138 | .funcs = &smu_ip_funcs, | |
2139 | }; | |
5dbbe6a7 AL |
2140 | |
2141 | const struct amdgpu_ip_block_version smu_v12_0_ip_block = | |
2142 | { | |
2143 | .type = AMD_IP_BLOCK_TYPE_SMC, | |
2144 | .major = 12, | |
2145 | .minor = 0, | |
2146 | .rev = 0, | |
2147 | .funcs = &smu_ip_funcs, | |
2148 | }; | |
3697b339 EQ |
2149 | |
2150 | int smu_load_microcode(struct smu_context *smu) | |
2151 | { | |
2152 | int ret = 0; | |
2153 | ||
2154 | mutex_lock(&smu->mutex); | |
2155 | ||
6c45e480 EQ |
2156 | if (smu->ppt_funcs->load_microcode) |
2157 | ret = smu->ppt_funcs->load_microcode(smu); | |
3697b339 EQ |
2158 | |
2159 | mutex_unlock(&smu->mutex); | |
2160 | ||
2161 | return ret; | |
2162 | } | |
2163 | ||
2164 | int smu_check_fw_status(struct smu_context *smu) | |
2165 | { | |
2166 | int ret = 0; | |
2167 | ||
2168 | mutex_lock(&smu->mutex); | |
2169 | ||
6c45e480 EQ |
2170 | if (smu->ppt_funcs->check_fw_status) |
2171 | ret = smu->ppt_funcs->check_fw_status(smu); | |
3697b339 EQ |
2172 | |
2173 | mutex_unlock(&smu->mutex); | |
2174 | ||
2175 | return ret; | |
2176 | } | |
2177 | ||
2178 | int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) | |
2179 | { | |
2180 | int ret = 0; | |
2181 | ||
2182 | mutex_lock(&smu->mutex); | |
2183 | ||
6c45e480 EQ |
2184 | if (smu->ppt_funcs->set_gfx_cgpg) |
2185 | ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); | |
3697b339 EQ |
2186 | |
2187 | mutex_unlock(&smu->mutex); | |
2188 | ||
2189 | return ret; | |
2190 | } | |
2191 | ||
2192 | int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) | |
2193 | { | |
2194 | int ret = 0; | |
2195 | ||
2196 | mutex_lock(&smu->mutex); | |
2197 | ||
6c45e480 EQ |
2198 | if (smu->ppt_funcs->set_fan_speed_rpm) |
2199 | ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); | |
3697b339 EQ |
2200 | |
2201 | mutex_unlock(&smu->mutex); | |
2202 | ||
2203 | return ret; | |
2204 | } | |
2205 | ||
2206 | int smu_get_power_limit(struct smu_context *smu, | |
2207 | uint32_t *limit, | |
2208 | bool def, | |
2209 | bool lock_needed) | |
2210 | { | |
2211 | int ret = 0; | |
2212 | ||
2213 | if (lock_needed) | |
2214 | mutex_lock(&smu->mutex); | |
2215 | ||
2216 | if (smu->ppt_funcs->get_power_limit) | |
2217 | ret = smu->ppt_funcs->get_power_limit(smu, limit, def); | |
2218 | ||
2219 | if (lock_needed) | |
2220 | mutex_unlock(&smu->mutex); | |
2221 | ||
2222 | return ret; | |
2223 | } | |
2224 | ||
2225 | int smu_set_power_limit(struct smu_context *smu, uint32_t limit) | |
2226 | { | |
2227 | int ret = 0; | |
2228 | ||
2229 | mutex_lock(&smu->mutex); | |
2230 | ||
6c45e480 EQ |
2231 | if (smu->ppt_funcs->set_power_limit) |
2232 | ret = smu->ppt_funcs->set_power_limit(smu, limit); | |
3697b339 EQ |
2233 | |
2234 | mutex_unlock(&smu->mutex); | |
2235 | ||
2236 | return ret; | |
2237 | } | |
2238 | ||
2239 | int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) | |
2240 | { | |
2241 | int ret = 0; | |
2242 | ||
2243 | mutex_lock(&smu->mutex); | |
2244 | ||
2245 | if (smu->ppt_funcs->print_clk_levels) | |
2246 | ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); | |
2247 | ||
2248 | mutex_unlock(&smu->mutex); | |
2249 | ||
2250 | return ret; | |
2251 | } | |
2252 | ||
2253 | int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) | |
2254 | { | |
2255 | int ret = 0; | |
2256 | ||
2257 | mutex_lock(&smu->mutex); | |
2258 | ||
2259 | if (smu->ppt_funcs->get_od_percentage) | |
2260 | ret = smu->ppt_funcs->get_od_percentage(smu, type); | |
2261 | ||
2262 | mutex_unlock(&smu->mutex); | |
2263 | ||
2264 | return ret; | |
2265 | } | |
2266 | ||
2267 | int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) | |
2268 | { | |
2269 | int ret = 0; | |
2270 | ||
2271 | mutex_lock(&smu->mutex); | |
2272 | ||
2273 | if (smu->ppt_funcs->set_od_percentage) | |
2274 | ret = smu->ppt_funcs->set_od_percentage(smu, type, value); | |
2275 | ||
2276 | mutex_unlock(&smu->mutex); | |
2277 | ||
2278 | return ret; | |
2279 | } | |
2280 | ||
2281 | int smu_od_edit_dpm_table(struct smu_context *smu, | |
2282 | enum PP_OD_DPM_TABLE_COMMAND type, | |
2283 | long *input, uint32_t size) | |
2284 | { | |
2285 | int ret = 0; | |
2286 | ||
2287 | mutex_lock(&smu->mutex); | |
2288 | ||
2289 | if (smu->ppt_funcs->od_edit_dpm_table) | |
2290 | ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); | |
2291 | ||
2292 | mutex_unlock(&smu->mutex); | |
2293 | ||
2294 | return ret; | |
2295 | } | |
2296 | ||
2297 | int smu_read_sensor(struct smu_context *smu, | |
2298 | enum amd_pp_sensors sensor, | |
2299 | void *data, uint32_t *size) | |
2300 | { | |
2301 | int ret = 0; | |
2302 | ||
2303 | mutex_lock(&smu->mutex); | |
2304 | ||
2305 | if (smu->ppt_funcs->read_sensor) | |
2306 | ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); | |
2307 | ||
2308 | mutex_unlock(&smu->mutex); | |
2309 | ||
2310 | return ret; | |
2311 | } | |
2312 | ||
2313 | int smu_get_power_profile_mode(struct smu_context *smu, char *buf) | |
2314 | { | |
2315 | int ret = 0; | |
2316 | ||
2317 | mutex_lock(&smu->mutex); | |
2318 | ||
2319 | if (smu->ppt_funcs->get_power_profile_mode) | |
2320 | ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); | |
2321 | ||
2322 | mutex_unlock(&smu->mutex); | |
2323 | ||
2324 | return ret; | |
2325 | } | |
2326 | ||
2327 | int smu_set_power_profile_mode(struct smu_context *smu, | |
2328 | long *param, | |
2329 | uint32_t param_size, | |
2330 | bool lock_needed) | |
2331 | { | |
2332 | int ret = 0; | |
2333 | ||
2334 | if (lock_needed) | |
2335 | mutex_lock(&smu->mutex); | |
2336 | ||
2337 | if (smu->ppt_funcs->set_power_profile_mode) | |
2338 | ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); | |
2339 | ||
2340 | if (lock_needed) | |
2341 | mutex_unlock(&smu->mutex); | |
2342 | ||
2343 | return ret; | |
2344 | } | |
2345 | ||
2346 | ||
2347 | int smu_get_fan_control_mode(struct smu_context *smu) | |
2348 | { | |
2349 | int ret = 0; | |
2350 | ||
2351 | mutex_lock(&smu->mutex); | |
2352 | ||
6c45e480 EQ |
2353 | if (smu->ppt_funcs->get_fan_control_mode) |
2354 | ret = smu->ppt_funcs->get_fan_control_mode(smu); | |
3697b339 EQ |
2355 | |
2356 | mutex_unlock(&smu->mutex); | |
2357 | ||
2358 | return ret; | |
2359 | } | |
2360 | ||
2361 | int smu_set_fan_control_mode(struct smu_context *smu, int value) | |
2362 | { | |
2363 | int ret = 0; | |
2364 | ||
2365 | mutex_lock(&smu->mutex); | |
2366 | ||
6c45e480 EQ |
2367 | if (smu->ppt_funcs->set_fan_control_mode) |
2368 | ret = smu->ppt_funcs->set_fan_control_mode(smu, value); | |
3697b339 EQ |
2369 | |
2370 | mutex_unlock(&smu->mutex); | |
2371 | ||
2372 | return ret; | |
2373 | } | |
2374 | ||
2375 | int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) | |
2376 | { | |
2377 | int ret = 0; | |
2378 | ||
2379 | mutex_lock(&smu->mutex); | |
2380 | ||
2381 | if (smu->ppt_funcs->get_fan_speed_percent) | |
2382 | ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); | |
2383 | ||
2384 | mutex_unlock(&smu->mutex); | |
2385 | ||
2386 | return ret; | |
2387 | } | |
2388 | ||
2389 | int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) | |
2390 | { | |
2391 | int ret = 0; | |
2392 | ||
2393 | mutex_lock(&smu->mutex); | |
2394 | ||
6c45e480 EQ |
2395 | if (smu->ppt_funcs->set_fan_speed_percent) |
2396 | ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); | |
3697b339 EQ |
2397 | |
2398 | mutex_unlock(&smu->mutex); | |
2399 | ||
2400 | return ret; | |
2401 | } | |
2402 | ||
2403 | int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) | |
2404 | { | |
2405 | int ret = 0; | |
2406 | ||
2407 | mutex_lock(&smu->mutex); | |
2408 | ||
2409 | if (smu->ppt_funcs->get_fan_speed_rpm) | |
2410 | ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); | |
2411 | ||
2412 | mutex_unlock(&smu->mutex); | |
2413 | ||
2414 | return ret; | |
2415 | } | |
2416 | ||
2417 | int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) | |
2418 | { | |
2419 | int ret = 0; | |
2420 | ||
2421 | mutex_lock(&smu->mutex); | |
2422 | ||
6c45e480 EQ |
2423 | if (smu->ppt_funcs->set_deep_sleep_dcefclk) |
2424 | ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk); | |
3697b339 EQ |
2425 | |
2426 | mutex_unlock(&smu->mutex); | |
2427 | ||
2428 | return ret; | |
2429 | } | |
2430 | ||
2431 | int smu_set_active_display_count(struct smu_context *smu, uint32_t count) | |
2432 | { | |
2433 | int ret = 0; | |
2434 | ||
6c45e480 EQ |
2435 | if (smu->ppt_funcs->set_active_display_count) |
2436 | ret = smu->ppt_funcs->set_active_display_count(smu, count); | |
3697b339 | 2437 | |
3697b339 EQ |
2438 | return ret; |
2439 | } | |
2440 | ||
2441 | int smu_get_clock_by_type(struct smu_context *smu, | |
2442 | enum amd_pp_clock_type type, | |
2443 | struct amd_pp_clocks *clocks) | |
2444 | { | |
2445 | int ret = 0; | |
2446 | ||
2447 | mutex_lock(&smu->mutex); | |
2448 | ||
6c45e480 EQ |
2449 | if (smu->ppt_funcs->get_clock_by_type) |
2450 | ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); | |
3697b339 EQ |
2451 | |
2452 | mutex_unlock(&smu->mutex); | |
2453 | ||
2454 | return ret; | |
2455 | } | |
2456 | ||
2457 | int smu_get_max_high_clocks(struct smu_context *smu, | |
2458 | struct amd_pp_simple_clock_info *clocks) | |
2459 | { | |
2460 | int ret = 0; | |
2461 | ||
2462 | mutex_lock(&smu->mutex); | |
2463 | ||
6c45e480 EQ |
2464 | if (smu->ppt_funcs->get_max_high_clocks) |
2465 | ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); | |
3697b339 EQ |
2466 | |
2467 | mutex_unlock(&smu->mutex); | |
2468 | ||
2469 | return ret; | |
2470 | } | |
2471 | ||
2472 | int smu_get_clock_by_type_with_latency(struct smu_context *smu, | |
2473 | enum smu_clk_type clk_type, | |
2474 | struct pp_clock_levels_with_latency *clocks) | |
2475 | { | |
2476 | int ret = 0; | |
2477 | ||
2478 | mutex_lock(&smu->mutex); | |
2479 | ||
2480 | if (smu->ppt_funcs->get_clock_by_type_with_latency) | |
2481 | ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); | |
2482 | ||
2483 | mutex_unlock(&smu->mutex); | |
2484 | ||
2485 | return ret; | |
2486 | } | |
2487 | ||
2488 | int smu_get_clock_by_type_with_voltage(struct smu_context *smu, | |
2489 | enum amd_pp_clock_type type, | |
2490 | struct pp_clock_levels_with_voltage *clocks) | |
2491 | { | |
2492 | int ret = 0; | |
2493 | ||
2494 | mutex_lock(&smu->mutex); | |
2495 | ||
2496 | if (smu->ppt_funcs->get_clock_by_type_with_voltage) | |
2497 | ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); | |
2498 | ||
2499 | mutex_unlock(&smu->mutex); | |
2500 | ||
2501 | return ret; | |
2502 | } | |
2503 | ||
2504 | ||
2505 | int smu_display_clock_voltage_request(struct smu_context *smu, | |
2506 | struct pp_display_clock_request *clock_req) | |
2507 | { | |
2508 | int ret = 0; | |
2509 | ||
2510 | mutex_lock(&smu->mutex); | |
2511 | ||
6c45e480 EQ |
2512 | if (smu->ppt_funcs->display_clock_voltage_request) |
2513 | ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); | |
3697b339 EQ |
2514 | |
2515 | mutex_unlock(&smu->mutex); | |
2516 | ||
2517 | return ret; | |
2518 | } | |
2519 | ||
2520 | ||
2521 | int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) | |
2522 | { | |
2523 | int ret = -EINVAL; | |
2524 | ||
2525 | mutex_lock(&smu->mutex); | |
2526 | ||
2527 | if (smu->ppt_funcs->display_disable_memory_clock_switch) | |
2528 | ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); | |
2529 | ||
2530 | mutex_unlock(&smu->mutex); | |
2531 | ||
2532 | return ret; | |
2533 | } | |
2534 | ||
2535 | int smu_notify_smu_enable_pwe(struct smu_context *smu) | |
2536 | { | |
2537 | int ret = 0; | |
2538 | ||
2539 | mutex_lock(&smu->mutex); | |
2540 | ||
6c45e480 EQ |
2541 | if (smu->ppt_funcs->notify_smu_enable_pwe) |
2542 | ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); | |
3697b339 EQ |
2543 | |
2544 | mutex_unlock(&smu->mutex); | |
2545 | ||
2546 | return ret; | |
2547 | } | |
2548 | ||
2549 | int smu_set_xgmi_pstate(struct smu_context *smu, | |
2550 | uint32_t pstate) | |
2551 | { | |
2552 | int ret = 0; | |
2553 | ||
2554 | mutex_lock(&smu->mutex); | |
2555 | ||
6c45e480 EQ |
2556 | if (smu->ppt_funcs->set_xgmi_pstate) |
2557 | ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); | |
3697b339 EQ |
2558 | |
2559 | mutex_unlock(&smu->mutex); | |
2560 | ||
2561 | return ret; | |
2562 | } | |
2563 | ||
2564 | int smu_set_azalia_d3_pme(struct smu_context *smu) | |
2565 | { | |
2566 | int ret = 0; | |
2567 | ||
2568 | mutex_lock(&smu->mutex); | |
2569 | ||
6c45e480 EQ |
2570 | if (smu->ppt_funcs->set_azalia_d3_pme) |
2571 | ret = smu->ppt_funcs->set_azalia_d3_pme(smu); | |
3697b339 EQ |
2572 | |
2573 | mutex_unlock(&smu->mutex); | |
2574 | ||
2575 | return ret; | |
2576 | } | |
2577 | ||
2578 | bool smu_baco_is_support(struct smu_context *smu) | |
2579 | { | |
2580 | bool ret = false; | |
2581 | ||
2582 | mutex_lock(&smu->mutex); | |
2583 | ||
e78adc5a | 2584 | if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) |
6c45e480 | 2585 | ret = smu->ppt_funcs->baco_is_support(smu); |
3697b339 EQ |
2586 | |
2587 | mutex_unlock(&smu->mutex); | |
2588 | ||
2589 | return ret; | |
2590 | } | |
2591 | ||
2592 | int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) | |
2593 | { | |
6c45e480 | 2594 | if (smu->ppt_funcs->baco_get_state) |
3697b339 EQ |
2595 | return -EINVAL; |
2596 | ||
2597 | mutex_lock(&smu->mutex); | |
6c45e480 | 2598 | *state = smu->ppt_funcs->baco_get_state(smu); |
3697b339 EQ |
2599 | mutex_unlock(&smu->mutex); |
2600 | ||
2601 | return 0; | |
2602 | } | |
2603 | ||
11520f27 | 2604 | int smu_baco_enter(struct smu_context *smu) |
3697b339 EQ |
2605 | { |
2606 | int ret = 0; | |
2607 | ||
2608 | mutex_lock(&smu->mutex); | |
2609 | ||
11520f27 AD |
2610 | if (smu->ppt_funcs->baco_enter) |
2611 | ret = smu->ppt_funcs->baco_enter(smu); | |
2612 | ||
2613 | mutex_unlock(&smu->mutex); | |
2614 | ||
2615 | return ret; | |
2616 | } | |
2617 | ||
2618 | int smu_baco_exit(struct smu_context *smu) | |
2619 | { | |
2620 | int ret = 0; | |
2621 | ||
2622 | mutex_lock(&smu->mutex); | |
2623 | ||
2624 | if (smu->ppt_funcs->baco_exit) | |
2625 | ret = smu->ppt_funcs->baco_exit(smu); | |
3697b339 EQ |
2626 | |
2627 | mutex_unlock(&smu->mutex); | |
2628 | ||
2629 | return ret; | |
2630 | } | |
2631 | ||
2632 | int smu_mode2_reset(struct smu_context *smu) | |
2633 | { | |
2634 | int ret = 0; | |
2635 | ||
2636 | mutex_lock(&smu->mutex); | |
2637 | ||
6c45e480 EQ |
2638 | if (smu->ppt_funcs->mode2_reset) |
2639 | ret = smu->ppt_funcs->mode2_reset(smu); | |
3697b339 EQ |
2640 | |
2641 | mutex_unlock(&smu->mutex); | |
2642 | ||
2643 | return ret; | |
2644 | } | |
2645 | ||
2646 | int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, | |
2647 | struct pp_smu_nv_clock_table *max_clocks) | |
2648 | { | |
2649 | int ret = 0; | |
2650 | ||
2651 | mutex_lock(&smu->mutex); | |
2652 | ||
6c45e480 EQ |
2653 | if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) |
2654 | ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); | |
3697b339 EQ |
2655 | |
2656 | mutex_unlock(&smu->mutex); | |
2657 | ||
2658 | return ret; | |
2659 | } | |
2660 | ||
2661 | int smu_get_uclk_dpm_states(struct smu_context *smu, | |
2662 | unsigned int *clock_values_in_khz, | |
2663 | unsigned int *num_states) | |
2664 | { | |
2665 | int ret = 0; | |
2666 | ||
2667 | mutex_lock(&smu->mutex); | |
2668 | ||
2669 | if (smu->ppt_funcs->get_uclk_dpm_states) | |
2670 | ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); | |
2671 | ||
2672 | mutex_unlock(&smu->mutex); | |
2673 | ||
2674 | return ret; | |
2675 | } | |
2676 | ||
2677 | enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) | |
2678 | { | |
2679 | enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; | |
2680 | ||
2681 | mutex_lock(&smu->mutex); | |
2682 | ||
2683 | if (smu->ppt_funcs->get_current_power_state) | |
2684 | pm_state = smu->ppt_funcs->get_current_power_state(smu); | |
2685 | ||
2686 | mutex_unlock(&smu->mutex); | |
2687 | ||
2688 | return pm_state; | |
2689 | } | |
2690 | ||
2691 | int smu_get_dpm_clock_table(struct smu_context *smu, | |
2692 | struct dpm_clocks *clock_table) | |
2693 | { | |
2694 | int ret = 0; | |
2695 | ||
2696 | mutex_lock(&smu->mutex); | |
2697 | ||
2698 | if (smu->ppt_funcs->get_dpm_clock_table) | |
2699 | ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); | |
2700 | ||
2701 | mutex_unlock(&smu->mutex); | |
2702 | ||
2703 | return ret; | |
2704 | } | |
73abde4d MC |
2705 | |
2706 | uint32_t smu_get_pptable_power_limit(struct smu_context *smu) | |
2707 | { | |
2708 | uint32_t ret = 0; | |
2709 | ||
2710 | if (smu->ppt_funcs->get_pptable_power_limit) | |
2711 | ret = smu->ppt_funcs->get_pptable_power_limit(smu); | |
2712 | ||
2713 | return ret; | |
2714 | } |