2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "smu_v11_0.h"
30 #include "smu_v12_0.h"
32 #include "vega20_ppt.h"
33 #include "arcturus_ppt.h"
34 #include "navi10_ppt.h"
35 #include "renoir_ppt.h"
37 #undef __SMU_DUMMY_MAP
38 #define __SMU_DUMMY_MAP(type) #type
39 static const char* __smu_message_names
[] = {
43 const char *smu_get_message_name(struct smu_context
*smu
, enum smu_message_type type
)
45 if (type
< 0 || type
>= SMU_MSG_MAX_COUNT
)
46 return "unknown smu message";
47 return __smu_message_names
[type
];
50 #undef __SMU_DUMMY_MAP
51 #define __SMU_DUMMY_MAP(fea) #fea
52 static const char* __smu_feature_names
[] = {
56 const char *smu_get_feature_name(struct smu_context
*smu
, enum smu_feature_mask feature
)
58 if (feature
< 0 || feature
>= SMU_FEATURE_COUNT
)
59 return "unknown smu feature";
60 return __smu_feature_names
[feature
];
63 size_t smu_sys_get_pp_feature_mask(struct smu_context
*smu
, char *buf
)
67 uint32_t feature_mask
[2] = { 0 };
68 int32_t feature_index
= 0;
70 uint32_t sort_feature
[SMU_FEATURE_COUNT
];
71 uint64_t hw_feature_count
= 0;
73 mutex_lock(&smu
->mutex
);
75 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
79 size
= sprintf(buf
+ size
, "features high: 0x%08x low: 0x%08x\n",
80 feature_mask
[1], feature_mask
[0]);
82 for (i
= 0; i
< SMU_FEATURE_COUNT
; i
++) {
83 feature_index
= smu_feature_get_index(smu
, i
);
84 if (feature_index
< 0)
86 sort_feature
[feature_index
] = i
;
90 for (i
= 0; i
< hw_feature_count
; i
++) {
91 size
+= sprintf(buf
+ size
, "%02d. %-20s (%2d) : %s\n",
93 smu_get_feature_name(smu
, sort_feature
[i
]),
95 !!smu_feature_is_enabled(smu
, sort_feature
[i
]) ?
96 "enabled" : "disabled");
100 mutex_unlock(&smu
->mutex
);
105 static int smu_feature_update_enable_state(struct smu_context
*smu
,
106 uint64_t feature_mask
,
109 struct smu_feature
*feature
= &smu
->smu_feature
;
110 uint32_t feature_low
= 0, feature_high
= 0;
113 if (!smu
->pm_enabled
)
116 feature_low
= (feature_mask
>> 0 ) & 0xffffffff;
117 feature_high
= (feature_mask
>> 32) & 0xffffffff;
120 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnableSmuFeaturesLow
,
124 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnableSmuFeaturesHigh
,
129 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DisableSmuFeaturesLow
,
133 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DisableSmuFeaturesHigh
,
139 mutex_lock(&feature
->mutex
);
141 bitmap_or(feature
->enabled
, feature
->enabled
,
142 (unsigned long *)(&feature_mask
), SMU_FEATURE_MAX
);
144 bitmap_andnot(feature
->enabled
, feature
->enabled
,
145 (unsigned long *)(&feature_mask
), SMU_FEATURE_MAX
);
146 mutex_unlock(&feature
->mutex
);
151 int smu_sys_set_pp_feature_mask(struct smu_context
*smu
, uint64_t new_mask
)
154 uint32_t feature_mask
[2] = { 0 };
155 uint64_t feature_2_enabled
= 0;
156 uint64_t feature_2_disabled
= 0;
157 uint64_t feature_enables
= 0;
159 mutex_lock(&smu
->mutex
);
161 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
165 feature_enables
= ((uint64_t)feature_mask
[1] << 32 | (uint64_t)feature_mask
[0]);
167 feature_2_enabled
= ~feature_enables
& new_mask
;
168 feature_2_disabled
= feature_enables
& ~new_mask
;
170 if (feature_2_enabled
) {
171 ret
= smu_feature_update_enable_state(smu
, feature_2_enabled
, true);
175 if (feature_2_disabled
) {
176 ret
= smu_feature_update_enable_state(smu
, feature_2_disabled
, false);
182 mutex_unlock(&smu
->mutex
);
187 int smu_get_smc_version(struct smu_context
*smu
, uint32_t *if_version
, uint32_t *smu_version
)
191 if (!if_version
&& !smu_version
)
195 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetDriverIfVersion
, if_version
);
201 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetSmuVersion
, smu_version
);
209 int smu_set_soft_freq_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
210 uint32_t min
, uint32_t max
, bool lock_needed
)
214 if (!smu_clk_dpm_is_enabled(smu
, clk_type
))
218 mutex_lock(&smu
->mutex
);
219 ret
= smu_set_soft_freq_limited_range(smu
, clk_type
, min
, max
);
221 mutex_unlock(&smu
->mutex
);
226 int smu_set_hard_freq_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
227 uint32_t min
, uint32_t max
)
229 int ret
= 0, clk_id
= 0;
232 if (min
<= 0 && max
<= 0)
235 if (!smu_clk_dpm_is_enabled(smu
, clk_type
))
238 clk_id
= smu_clk_get_index(smu
, clk_type
);
243 param
= (uint32_t)((clk_id
<< 16) | (max
& 0xffff));
244 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMaxByFreq
,
251 param
= (uint32_t)((clk_id
<< 16) | (min
& 0xffff));
252 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinByFreq
,
262 int smu_get_dpm_freq_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
263 uint32_t *min
, uint32_t *max
, bool lock_needed
)
265 uint32_t clock_limit
;
272 mutex_lock(&smu
->mutex
);
274 if (!smu_clk_dpm_is_enabled(smu
, clk_type
)) {
278 clock_limit
= smu
->smu_table
.boot_values
.uclk
;
282 clock_limit
= smu
->smu_table
.boot_values
.gfxclk
;
285 clock_limit
= smu
->smu_table
.boot_values
.socclk
;
292 /* clock in Mhz unit */
294 *min
= clock_limit
/ 100;
296 *max
= clock_limit
/ 100;
299 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
300 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
302 ret
= smu_get_dpm_ultimate_freq(smu
, clk_type
, min
, max
);
306 mutex_unlock(&smu
->mutex
);
311 int smu_get_dpm_freq_by_index(struct smu_context
*smu
, enum smu_clk_type clk_type
,
312 uint16_t level
, uint32_t *value
)
314 int ret
= 0, clk_id
= 0;
320 if (!smu_clk_dpm_is_enabled(smu
, clk_type
))
323 clk_id
= smu_clk_get_index(smu
, clk_type
);
327 param
= (uint32_t)(((clk_id
& 0xffff) << 16) | (level
& 0xffff));
329 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetDpmFreqByIndex
,
334 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
335 * now, we un-support it */
336 *value
= param
& 0x7fffffff;
341 int smu_get_dpm_level_count(struct smu_context
*smu
, enum smu_clk_type clk_type
,
344 return smu_get_dpm_freq_by_index(smu
, clk_type
, 0xff, value
);
347 int smu_get_dpm_level_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
348 uint32_t *min_value
, uint32_t *max_value
)
351 uint32_t level_count
= 0;
353 if (!min_value
&& !max_value
)
357 /* by default, level 0 clock value as min value */
358 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, 0, min_value
);
364 ret
= smu_get_dpm_level_count(smu
, clk_type
, &level_count
);
368 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, level_count
- 1, max_value
);
376 bool smu_clk_dpm_is_enabled(struct smu_context
*smu
, enum smu_clk_type clk_type
)
378 enum smu_feature_mask feature_id
= 0;
383 feature_id
= SMU_FEATURE_DPM_UCLK_BIT
;
387 feature_id
= SMU_FEATURE_DPM_GFXCLK_BIT
;
390 feature_id
= SMU_FEATURE_DPM_SOCCLK_BIT
;
396 if(!smu_feature_is_enabled(smu
, feature_id
)) {
404 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
406 * @smu: smu_context pointer
407 * @block_type: the IP block to power gate/ungate
408 * @gate: to power gate if true, ungate otherwise
410 * This API uses no smu->mutex lock protection due to:
411 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
412 * This is guarded to be race condition free by the caller.
413 * 2. Or get called on user setting request of power_dpm_force_performance_level.
414 * Under this case, the smu->mutex lock protection is already enforced on
415 * the parent API smu_force_performance_level of the call path.
417 int smu_dpm_set_power_gate(struct smu_context
*smu
, uint32_t block_type
,
422 switch (block_type
) {
423 case AMD_IP_BLOCK_TYPE_UVD
:
424 ret
= smu_dpm_set_uvd_enable(smu
, !gate
);
426 case AMD_IP_BLOCK_TYPE_VCE
:
427 ret
= smu_dpm_set_vce_enable(smu
, !gate
);
429 case AMD_IP_BLOCK_TYPE_GFX
:
430 ret
= smu_gfx_off_control(smu
, gate
);
432 case AMD_IP_BLOCK_TYPE_SDMA
:
433 ret
= smu_powergate_sdma(smu
, gate
);
435 case AMD_IP_BLOCK_TYPE_JPEG
:
436 ret
= smu_dpm_set_jpeg_enable(smu
, !gate
);
445 int smu_get_power_num_states(struct smu_context
*smu
,
446 struct pp_states_info
*state_info
)
451 /* not support power state */
452 memset(state_info
, 0, sizeof(struct pp_states_info
));
453 state_info
->nums
= 1;
454 state_info
->states
[0] = POWER_STATE_TYPE_DEFAULT
;
459 int smu_common_read_sensor(struct smu_context
*smu
, enum amd_pp_sensors sensor
,
460 void *data
, uint32_t *size
)
462 struct smu_power_context
*smu_power
= &smu
->smu_power
;
463 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
470 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
471 *((uint32_t *)data
) = smu
->pstate_sclk
;
474 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
475 *((uint32_t *)data
) = smu
->pstate_mclk
;
478 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
479 ret
= smu_feature_get_enabled_mask(smu
, (uint32_t *)data
, 2);
482 case AMDGPU_PP_SENSOR_UVD_POWER
:
483 *(uint32_t *)data
= smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UVD_BIT
) ? 1 : 0;
486 case AMDGPU_PP_SENSOR_VCE_POWER
:
487 *(uint32_t *)data
= smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_VCE_BIT
) ? 1 : 0;
490 case AMDGPU_PP_SENSOR_VCN_POWER_STATE
:
491 *(uint32_t *)data
= power_gate
->vcn_gated
? 0 : 1;
505 int smu_update_table(struct smu_context
*smu
, enum smu_table_id table_index
, int argument
,
506 void *table_data
, bool drv2smu
)
508 struct smu_table_context
*smu_table
= &smu
->smu_table
;
509 struct amdgpu_device
*adev
= smu
->adev
;
510 struct smu_table
*table
= &smu_table
->driver_table
;
511 int table_id
= smu_table_get_index(smu
, table_index
);
515 if (!table_data
|| table_id
>= SMU_TABLE_COUNT
|| table_id
< 0)
518 table_size
= smu_table
->tables
[table_index
].size
;
521 memcpy(table
->cpu_addr
, table_data
, table_size
);
523 * Flush hdp cache: to guard the content seen by
524 * GPU is consitent with CPU.
526 amdgpu_asic_flush_hdp(adev
, NULL
);
529 ret
= smu_send_smc_msg_with_param(smu
, drv2smu
?
530 SMU_MSG_TransferTableDram2Smu
:
531 SMU_MSG_TransferTableSmu2Dram
,
532 table_id
| ((argument
& 0xFFFF) << 16),
538 amdgpu_asic_flush_hdp(adev
, NULL
);
539 memcpy(table_data
, table
->cpu_addr
, table_size
);
545 bool is_support_sw_smu(struct amdgpu_device
*adev
)
547 if (adev
->asic_type
== CHIP_VEGA20
)
548 return (amdgpu_dpm
== 2) ? true : false;
549 else if (adev
->asic_type
>= CHIP_ARCTURUS
) {
550 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
558 bool is_support_sw_smu_xgmi(struct amdgpu_device
*adev
)
560 if (!is_support_sw_smu(adev
))
563 if (adev
->asic_type
== CHIP_VEGA20
)
569 int smu_sys_get_pp_table(struct smu_context
*smu
, void **table
)
571 struct smu_table_context
*smu_table
= &smu
->smu_table
;
572 uint32_t powerplay_table_size
;
574 if (!smu_table
->power_play_table
&& !smu_table
->hardcode_pptable
)
577 mutex_lock(&smu
->mutex
);
579 if (smu_table
->hardcode_pptable
)
580 *table
= smu_table
->hardcode_pptable
;
582 *table
= smu_table
->power_play_table
;
584 powerplay_table_size
= smu_table
->power_play_table_size
;
586 mutex_unlock(&smu
->mutex
);
588 return powerplay_table_size
;
591 int smu_sys_set_pp_table(struct smu_context
*smu
, void *buf
, size_t size
)
593 struct smu_table_context
*smu_table
= &smu
->smu_table
;
594 ATOM_COMMON_TABLE_HEADER
*header
= (ATOM_COMMON_TABLE_HEADER
*)buf
;
597 if (!smu
->pm_enabled
)
599 if (header
->usStructureSize
!= size
) {
600 pr_err("pp table size not matched !\n");
604 mutex_lock(&smu
->mutex
);
605 if (!smu_table
->hardcode_pptable
)
606 smu_table
->hardcode_pptable
= kzalloc(size
, GFP_KERNEL
);
607 if (!smu_table
->hardcode_pptable
) {
612 memcpy(smu_table
->hardcode_pptable
, buf
, size
);
613 smu_table
->power_play_table
= smu_table
->hardcode_pptable
;
614 smu_table
->power_play_table_size
= size
;
617 * Special hw_fini action(for Navi1x, the DPMs disablement will be
618 * skipped) may be needed for custom pptable uploading.
620 smu
->uploading_custom_pp_table
= true;
622 ret
= smu_reset(smu
);
624 pr_info("smu reset failed, ret = %d\n", ret
);
626 smu
->uploading_custom_pp_table
= false;
629 mutex_unlock(&smu
->mutex
);
633 int smu_feature_init_dpm(struct smu_context
*smu
)
635 struct smu_feature
*feature
= &smu
->smu_feature
;
637 uint32_t allowed_feature_mask
[SMU_FEATURE_MAX
/32];
639 if (!smu
->pm_enabled
)
641 mutex_lock(&feature
->mutex
);
642 bitmap_zero(feature
->allowed
, SMU_FEATURE_MAX
);
643 mutex_unlock(&feature
->mutex
);
645 ret
= smu_get_allowed_feature_mask(smu
, allowed_feature_mask
,
650 mutex_lock(&feature
->mutex
);
651 bitmap_or(feature
->allowed
, feature
->allowed
,
652 (unsigned long *)allowed_feature_mask
,
653 feature
->feature_num
);
654 mutex_unlock(&feature
->mutex
);
660 int smu_feature_is_enabled(struct smu_context
*smu
, enum smu_feature_mask mask
)
662 struct smu_feature
*feature
= &smu
->smu_feature
;
669 feature_id
= smu_feature_get_index(smu
, mask
);
673 WARN_ON(feature_id
> feature
->feature_num
);
675 mutex_lock(&feature
->mutex
);
676 ret
= test_bit(feature_id
, feature
->enabled
);
677 mutex_unlock(&feature
->mutex
);
682 int smu_feature_set_enabled(struct smu_context
*smu
, enum smu_feature_mask mask
,
685 struct smu_feature
*feature
= &smu
->smu_feature
;
688 feature_id
= smu_feature_get_index(smu
, mask
);
692 WARN_ON(feature_id
> feature
->feature_num
);
694 return smu_feature_update_enable_state(smu
,
699 int smu_feature_is_supported(struct smu_context
*smu
, enum smu_feature_mask mask
)
701 struct smu_feature
*feature
= &smu
->smu_feature
;
705 feature_id
= smu_feature_get_index(smu
, mask
);
709 WARN_ON(feature_id
> feature
->feature_num
);
711 mutex_lock(&feature
->mutex
);
712 ret
= test_bit(feature_id
, feature
->supported
);
713 mutex_unlock(&feature
->mutex
);
718 int smu_feature_set_supported(struct smu_context
*smu
,
719 enum smu_feature_mask mask
,
722 struct smu_feature
*feature
= &smu
->smu_feature
;
726 feature_id
= smu_feature_get_index(smu
, mask
);
730 WARN_ON(feature_id
> feature
->feature_num
);
732 mutex_lock(&feature
->mutex
);
734 test_and_set_bit(feature_id
, feature
->supported
);
736 test_and_clear_bit(feature_id
, feature
->supported
);
737 mutex_unlock(&feature
->mutex
);
742 static int smu_set_funcs(struct amdgpu_device
*adev
)
744 struct smu_context
*smu
= &adev
->smu
;
746 if (adev
->pm
.pp_feature
& PP_OVERDRIVE_MASK
)
747 smu
->od_enabled
= true;
749 switch (adev
->asic_type
) {
751 adev
->pm
.pp_feature
&= ~PP_GFXOFF_MASK
;
752 vega20_set_ppt_funcs(smu
);
757 navi10_set_ppt_funcs(smu
);
760 adev
->pm
.pp_feature
&= ~PP_GFXOFF_MASK
;
761 arcturus_set_ppt_funcs(smu
);
762 /* OD is not supported on Arcturus */
763 smu
->od_enabled
=false;
766 renoir_set_ppt_funcs(smu
);
775 static int smu_early_init(void *handle
)
777 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
778 struct smu_context
*smu
= &adev
->smu
;
781 smu
->pm_enabled
= !!amdgpu_dpm
;
783 mutex_init(&smu
->mutex
);
785 return smu_set_funcs(adev
);
788 static int smu_late_init(void *handle
)
790 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
791 struct smu_context
*smu
= &adev
->smu
;
793 if (!smu
->pm_enabled
)
796 smu_handle_task(&adev
->smu
,
797 smu
->smu_dpm
.dpm_level
,
798 AMD_PP_TASK_COMPLETE_INIT
,
804 int smu_get_atom_data_table(struct smu_context
*smu
, uint32_t table
,
805 uint16_t *size
, uint8_t *frev
, uint8_t *crev
,
808 struct amdgpu_device
*adev
= smu
->adev
;
811 if (!amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, table
,
812 size
, frev
, crev
, &data_start
))
815 *addr
= (uint8_t *)adev
->mode_info
.atom_context
->bios
+ data_start
;
820 static int smu_initialize_pptable(struct smu_context
*smu
)
826 static int smu_smc_table_sw_init(struct smu_context
*smu
)
830 ret
= smu_initialize_pptable(smu
);
832 pr_err("Failed to init smu_initialize_pptable!\n");
837 * Create smu_table structure, and init smc tables such as
838 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
840 ret
= smu_init_smc_tables(smu
);
842 pr_err("Failed to init smc tables!\n");
847 * Create smu_power_context structure, and allocate smu_dpm_context and
848 * context size to fill the smu_power_context data.
850 ret
= smu_init_power(smu
);
852 pr_err("Failed to init smu_init_power!\n");
859 static int smu_smc_table_sw_fini(struct smu_context
*smu
)
863 ret
= smu_fini_smc_tables(smu
);
865 pr_err("Failed to smu_fini_smc_tables!\n");
872 static int smu_sw_init(void *handle
)
874 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
875 struct smu_context
*smu
= &adev
->smu
;
878 smu
->pool_size
= adev
->pm
.smu_prv_buffer_size
;
879 smu
->smu_feature
.feature_num
= SMU_FEATURE_MAX
;
880 mutex_init(&smu
->smu_feature
.mutex
);
881 bitmap_zero(smu
->smu_feature
.supported
, SMU_FEATURE_MAX
);
882 bitmap_zero(smu
->smu_feature
.enabled
, SMU_FEATURE_MAX
);
883 bitmap_zero(smu
->smu_feature
.allowed
, SMU_FEATURE_MAX
);
885 mutex_init(&smu
->smu_baco
.mutex
);
886 smu
->smu_baco
.state
= SMU_BACO_STATE_EXIT
;
887 smu
->smu_baco
.platform_support
= false;
889 mutex_init(&smu
->sensor_lock
);
890 mutex_init(&smu
->metrics_lock
);
891 mutex_init(&smu
->message_lock
);
893 smu
->watermarks_bitmap
= 0;
894 smu
->power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
895 smu
->default_power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
897 smu
->workload_mask
= 1 << smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
];
898 smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
] = 0;
899 smu
->workload_prority
[PP_SMC_POWER_PROFILE_FULLSCREEN3D
] = 1;
900 smu
->workload_prority
[PP_SMC_POWER_PROFILE_POWERSAVING
] = 2;
901 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VIDEO
] = 3;
902 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VR
] = 4;
903 smu
->workload_prority
[PP_SMC_POWER_PROFILE_COMPUTE
] = 5;
904 smu
->workload_prority
[PP_SMC_POWER_PROFILE_CUSTOM
] = 6;
906 smu
->workload_setting
[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
907 smu
->workload_setting
[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D
;
908 smu
->workload_setting
[2] = PP_SMC_POWER_PROFILE_POWERSAVING
;
909 smu
->workload_setting
[3] = PP_SMC_POWER_PROFILE_VIDEO
;
910 smu
->workload_setting
[4] = PP_SMC_POWER_PROFILE_VR
;
911 smu
->workload_setting
[5] = PP_SMC_POWER_PROFILE_COMPUTE
;
912 smu
->workload_setting
[6] = PP_SMC_POWER_PROFILE_CUSTOM
;
913 smu
->display_config
= &adev
->pm
.pm_display_cfg
;
915 smu
->smu_dpm
.dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
916 smu
->smu_dpm
.requested_dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
917 ret
= smu_init_microcode(smu
);
919 pr_err("Failed to load smu firmware!\n");
923 ret
= smu_smc_table_sw_init(smu
);
925 pr_err("Failed to sw init smc table!\n");
929 ret
= smu_register_irq_handler(smu
);
931 pr_err("Failed to register smc irq handler!\n");
935 if (adev
->smu
.ppt_funcs
->i2c_eeprom_init
) {
936 ret
= smu_i2c_eeprom_init(smu
, &adev
->pm
.smu_i2c
);
945 static int smu_sw_fini(void *handle
)
947 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
948 struct smu_context
*smu
= &adev
->smu
;
951 if (adev
->smu
.ppt_funcs
->i2c_eeprom_fini
)
952 smu_i2c_eeprom_fini(smu
, &adev
->pm
.smu_i2c
);
954 kfree(smu
->irq_source
);
955 smu
->irq_source
= NULL
;
957 ret
= smu_smc_table_sw_fini(smu
);
959 pr_err("Failed to sw fini smc table!\n");
963 ret
= smu_fini_power(smu
);
965 pr_err("Failed to init smu_fini_power!\n");
972 static int smu_init_fb_allocations(struct smu_context
*smu
)
974 struct amdgpu_device
*adev
= smu
->adev
;
975 struct smu_table_context
*smu_table
= &smu
->smu_table
;
976 struct smu_table
*tables
= smu_table
->tables
;
977 struct smu_table
*driver_table
= &(smu_table
->driver_table
);
978 uint32_t max_table_size
= 0;
981 /* VRAM allocation for tool table */
982 if (tables
[SMU_TABLE_PMSTATUSLOG
].size
) {
983 ret
= amdgpu_bo_create_kernel(adev
,
984 tables
[SMU_TABLE_PMSTATUSLOG
].size
,
985 tables
[SMU_TABLE_PMSTATUSLOG
].align
,
986 tables
[SMU_TABLE_PMSTATUSLOG
].domain
,
987 &tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
988 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
989 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
991 pr_err("VRAM allocation for tool table failed!\n");
996 /* VRAM allocation for driver table */
997 for (i
= 0; i
< SMU_TABLE_COUNT
; i
++) {
998 if (tables
[i
].size
== 0)
1001 if (i
== SMU_TABLE_PMSTATUSLOG
)
1004 if (max_table_size
< tables
[i
].size
)
1005 max_table_size
= tables
[i
].size
;
1008 driver_table
->size
= max_table_size
;
1009 driver_table
->align
= PAGE_SIZE
;
1010 driver_table
->domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1012 ret
= amdgpu_bo_create_kernel(adev
,
1014 driver_table
->align
,
1015 driver_table
->domain
,
1017 &driver_table
->mc_address
,
1018 &driver_table
->cpu_addr
);
1020 pr_err("VRAM allocation for driver table failed!\n");
1021 if (tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
)
1022 amdgpu_bo_free_kernel(&tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
1023 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
1024 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
1030 static int smu_fini_fb_allocations(struct smu_context
*smu
)
1032 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1033 struct smu_table
*tables
= smu_table
->tables
;
1034 struct smu_table
*driver_table
= &(smu_table
->driver_table
);
1039 if (tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
)
1040 amdgpu_bo_free_kernel(&tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
1041 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
1042 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
1044 amdgpu_bo_free_kernel(&driver_table
->bo
,
1045 &driver_table
->mc_address
,
1046 &driver_table
->cpu_addr
);
1051 static int smu_smc_table_hw_init(struct smu_context
*smu
,
1054 struct amdgpu_device
*adev
= smu
->adev
;
1057 if (smu_is_dpm_running(smu
) && adev
->in_suspend
) {
1058 pr_info("dpm has been enabled\n");
1062 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1063 ret
= smu_init_display_count(smu
, 0);
1069 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1070 ret
= smu_get_vbios_bootup_values(smu
);
1074 ret
= smu_setup_pptable(smu
);
1078 ret
= smu_get_clk_info_from_vbios(smu
);
1083 * check if the format_revision in vbios is up to pptable header
1084 * version, and the structure size is not 0.
1086 ret
= smu_check_pptable(smu
);
1091 * allocate vram bos to store smc table contents.
1093 ret
= smu_init_fb_allocations(smu
);
1098 * Parse pptable format and fill PPTable_t smc_pptable to
1099 * smu_table_context structure. And read the smc_dpm_table from vbios,
1100 * then fill it into smc_pptable.
1102 ret
= smu_parse_pptable(smu
);
1107 * Send msg GetDriverIfVersion to check if the return value is equal
1108 * with DRIVER_IF_VERSION of smc header.
1110 ret
= smu_check_fw_version(smu
);
1115 ret
= smu_set_driver_table_location(smu
);
1119 /* smu_dump_pptable(smu); */
1120 if (!amdgpu_sriov_vf(adev
)) {
1122 * Copy pptable bo in the vram to smc with SMU MSGs such as
1123 * SetDriverDramAddr and TransferTableDram2Smu.
1125 ret
= smu_write_pptable(smu
);
1129 /* issue Run*Btc msg */
1130 ret
= smu_run_btc(smu
);
1133 ret
= smu_feature_set_allowed_mask(smu
);
1137 ret
= smu_system_features_control(smu
, true);
1141 if (adev
->asic_type
== CHIP_NAVI10
) {
1142 if ((adev
->pdev
->device
== 0x731f && (adev
->pdev
->revision
== 0xc2 ||
1143 adev
->pdev
->revision
== 0xc3 ||
1144 adev
->pdev
->revision
== 0xca ||
1145 adev
->pdev
->revision
== 0xcb)) ||
1146 (adev
->pdev
->device
== 0x66af && (adev
->pdev
->revision
== 0xf3 ||
1147 adev
->pdev
->revision
== 0xf4 ||
1148 adev
->pdev
->revision
== 0xf5 ||
1149 adev
->pdev
->revision
== 0xf6))) {
1150 ret
= smu_disable_umc_cdr_12gbps_workaround(smu
);
1152 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1158 if (smu
->ppt_funcs
->set_power_source
) {
1160 * For Navi1X, manually switch it to AC mode as PMFW
1161 * may boot it with DC mode.
1163 if (adev
->pm
.ac_power
)
1164 ret
= smu_set_power_source(smu
, SMU_POWER_SOURCE_AC
);
1166 ret
= smu_set_power_source(smu
, SMU_POWER_SOURCE_DC
);
1168 pr_err("Failed to switch to %s mode!\n", adev
->pm
.ac_power
? "AC" : "DC");
1173 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1174 ret
= smu_notify_display_change(smu
);
1179 * Set min deep sleep dce fclk with bootup value from vbios via
1180 * SetMinDeepSleepDcefclk MSG.
1182 ret
= smu_set_min_dcef_deep_sleep(smu
);
1188 * Set initialized values (get from vbios) to dpm tables context such as
1189 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1193 ret
= smu_populate_smc_tables(smu
);
1197 ret
= smu_init_max_sustainable_clocks(smu
);
1202 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1203 ret
= smu_override_pcie_parameters(smu
);
1208 ret
= smu_set_default_od_settings(smu
, initialize
);
1213 ret
= smu_populate_umd_state_clk(smu
);
1217 ret
= smu_get_power_limit(smu
, &smu
->default_power_limit
, false, false);
1223 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1225 if (!amdgpu_sriov_vf(adev
)) {
1226 ret
= smu_set_tool_table_location(smu
);
1228 if (!smu_is_dpm_running(smu
))
1229 pr_info("dpm has been disabled\n");
1235 * smu_alloc_memory_pool - allocate memory pool in the system memory
1237 * @smu: amdgpu_device pointer
1239 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1240 * and DramLogSetDramAddr can notify it changed.
1242 * Returns 0 on success, error on failure.
1244 static int smu_alloc_memory_pool(struct smu_context
*smu
)
1246 struct amdgpu_device
*adev
= smu
->adev
;
1247 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1248 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
1249 uint64_t pool_size
= smu
->pool_size
;
1252 if (pool_size
== SMU_MEMORY_POOL_SIZE_ZERO
)
1255 memory_pool
->size
= pool_size
;
1256 memory_pool
->align
= PAGE_SIZE
;
1257 memory_pool
->domain
= AMDGPU_GEM_DOMAIN_GTT
;
1259 switch (pool_size
) {
1260 case SMU_MEMORY_POOL_SIZE_256_MB
:
1261 case SMU_MEMORY_POOL_SIZE_512_MB
:
1262 case SMU_MEMORY_POOL_SIZE_1_GB
:
1263 case SMU_MEMORY_POOL_SIZE_2_GB
:
1264 ret
= amdgpu_bo_create_kernel(adev
,
1267 memory_pool
->domain
,
1269 &memory_pool
->mc_address
,
1270 &memory_pool
->cpu_addr
);
1279 static int smu_free_memory_pool(struct smu_context
*smu
)
1281 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1282 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
1284 if (memory_pool
->size
== SMU_MEMORY_POOL_SIZE_ZERO
)
1287 amdgpu_bo_free_kernel(&memory_pool
->bo
,
1288 &memory_pool
->mc_address
,
1289 &memory_pool
->cpu_addr
);
1291 memset(memory_pool
, 0, sizeof(struct smu_table
));
1296 static int smu_start_smc_engine(struct smu_context
*smu
)
1298 struct amdgpu_device
*adev
= smu
->adev
;
1301 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1302 if (adev
->asic_type
< CHIP_NAVI10
) {
1303 if (smu
->ppt_funcs
->load_microcode
) {
1304 ret
= smu
->ppt_funcs
->load_microcode(smu
);
1311 if (smu
->ppt_funcs
->check_fw_status
) {
1312 ret
= smu
->ppt_funcs
->check_fw_status(smu
);
1314 pr_err("SMC is not ready\n");
1320 static int smu_hw_init(void *handle
)
1323 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1324 struct smu_context
*smu
= &adev
->smu
;
1326 ret
= smu_start_smc_engine(smu
);
1328 pr_err("SMU is not ready yet!\n");
1333 smu_powergate_sdma(&adev
->smu
, false);
1334 smu_powergate_vcn(&adev
->smu
, false);
1335 smu_powergate_jpeg(&adev
->smu
, false);
1336 smu_set_gfx_cgpg(&adev
->smu
, true);
1339 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1342 if (!smu
->pm_enabled
)
1345 ret
= smu_feature_init_dpm(smu
);
1349 ret
= smu_smc_table_hw_init(smu
, true);
1353 ret
= smu_alloc_memory_pool(smu
);
1358 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1361 ret
= smu_notify_memory_pool_location(smu
);
1365 ret
= smu_start_thermal_control(smu
);
1369 if (!smu
->pm_enabled
)
1370 adev
->pm
.dpm_enabled
= false;
1372 adev
->pm
.dpm_enabled
= true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1374 pr_info("SMU is initialized successfully!\n");
1382 static int smu_stop_dpms(struct smu_context
*smu
)
1384 return smu_system_features_control(smu
, false);
1387 static int smu_hw_fini(void *handle
)
1389 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1390 struct smu_context
*smu
= &adev
->smu
;
1391 struct smu_table_context
*table_context
= &smu
->smu_table
;
1394 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1398 smu_powergate_sdma(&adev
->smu
, true);
1399 smu_powergate_vcn(&adev
->smu
, true);
1400 smu_powergate_jpeg(&adev
->smu
, true);
1403 if (!smu
->pm_enabled
)
1406 if (!amdgpu_sriov_vf(adev
)){
1407 ret
= smu_stop_thermal_control(smu
);
1409 pr_warn("Fail to stop thermal control!\n");
1414 * For custom pptable uploading, skip the DPM features
1415 * disable process on Navi1x ASICs.
1416 * - As the gfx related features are under control of
1417 * RLC on those ASICs. RLC reinitialization will be
1418 * needed to reenable them. That will cost much more
1421 * - SMU firmware can handle the DPM reenablement
1424 if (!smu
->uploading_custom_pp_table
||
1425 !((adev
->asic_type
>= CHIP_NAVI10
) &&
1426 (adev
->asic_type
<= CHIP_NAVI12
))) {
1427 ret
= smu_stop_dpms(smu
);
1429 pr_warn("Fail to stop Dpms!\n");
1435 kfree(table_context
->driver_pptable
);
1436 table_context
->driver_pptable
= NULL
;
1438 kfree(table_context
->max_sustainable_clocks
);
1439 table_context
->max_sustainable_clocks
= NULL
;
1441 kfree(table_context
->overdrive_table
);
1442 table_context
->overdrive_table
= NULL
;
1444 ret
= smu_fini_fb_allocations(smu
);
1448 ret
= smu_free_memory_pool(smu
);
1455 int smu_reset(struct smu_context
*smu
)
1457 struct amdgpu_device
*adev
= smu
->adev
;
1460 ret
= smu_hw_fini(adev
);
1464 ret
= smu_hw_init(adev
);
1471 static int smu_disable_dpm(struct smu_context
*smu
)
1473 struct amdgpu_device
*adev
= smu
->adev
;
1474 uint32_t smu_version
;
1476 bool use_baco
= !smu
->is_apu
&&
1477 ((adev
->in_gpu_reset
&&
1478 (amdgpu_asic_reset_method(adev
) == AMD_RESET_METHOD_BACO
)) ||
1479 ((adev
->in_runpm
|| adev
->in_hibernate
) && amdgpu_asic_supports_baco(adev
)));
1481 ret
= smu_get_smc_version(smu
, NULL
, &smu_version
);
1483 pr_err("Failed to get smu version.\n");
1488 * Disable all enabled SMU features.
1489 * This should be handled in SMU FW, as a backup
1490 * driver can issue call to SMU FW until sequence
1491 * in SMU FW is operational.
1493 ret
= smu_system_features_control(smu
, false);
1495 pr_err("Failed to disable smu features.\n");
1500 * Arcturus does not have BACO bit in disable feature mask.
1501 * Enablement of BACO bit on Arcturus should be skipped.
1503 if (adev
->asic_type
== CHIP_ARCTURUS
) {
1504 if (use_baco
&& (smu_version
> 0x360e00))
1508 /* For baco, need to leave BACO feature enabled */
1511 * Correct the way for checking whether SMU_FEATURE_BACO_BIT
1514 * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
1515 * always return false as the 'smu_system_features_control(smu, false)'
1516 * was just issued above which disabled all SMU features.
1518 * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
1519 * now for the checking.
1521 if (smu_feature_get_index(smu
, SMU_FEATURE_BACO_BIT
) >= 0) {
1522 ret
= smu_feature_set_enabled(smu
, SMU_FEATURE_BACO_BIT
, true);
1524 pr_warn("set BACO feature enabled failed, return %d\n", ret
);
1533 static int smu_suspend(void *handle
)
1535 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1536 struct smu_context
*smu
= &adev
->smu
;
1539 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1542 if (!smu
->pm_enabled
)
1545 if(!amdgpu_sriov_vf(adev
)) {
1546 ret
= smu_disable_dpm(smu
);
1551 smu
->watermarks_bitmap
&= ~(WATERMARKS_LOADED
);
1553 if (adev
->asic_type
>= CHIP_NAVI10
&&
1554 adev
->gfx
.rlc
.funcs
->stop
)
1555 adev
->gfx
.rlc
.funcs
->stop(adev
);
1557 smu_set_gfx_cgpg(&adev
->smu
, false);
1562 static int smu_resume(void *handle
)
1565 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1566 struct smu_context
*smu
= &adev
->smu
;
1568 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1571 if (!smu
->pm_enabled
)
1574 pr_info("SMU is resuming...\n");
1576 ret
= smu_start_smc_engine(smu
);
1578 pr_err("SMU is not ready yet!\n");
1582 ret
= smu_smc_table_hw_init(smu
, false);
1586 ret
= smu_start_thermal_control(smu
);
1591 smu_set_gfx_cgpg(&adev
->smu
, true);
1593 smu
->disable_uclk_switch
= 0;
1595 pr_info("SMU is resumed successfully!\n");
1603 int smu_display_configuration_change(struct smu_context
*smu
,
1604 const struct amd_pp_display_configuration
*display_config
)
1607 int num_of_active_display
= 0;
1609 if (!smu
->pm_enabled
|| !is_support_sw_smu(smu
->adev
))
1612 if (!display_config
)
1615 mutex_lock(&smu
->mutex
);
1617 if (smu
->ppt_funcs
->set_deep_sleep_dcefclk
)
1618 smu
->ppt_funcs
->set_deep_sleep_dcefclk(smu
,
1619 display_config
->min_dcef_deep_sleep_set_clk
/ 100);
1621 for (index
= 0; index
< display_config
->num_path_including_non_display
; index
++) {
1622 if (display_config
->displays
[index
].controller_id
!= 0)
1623 num_of_active_display
++;
1626 smu_set_active_display_count(smu
, num_of_active_display
);
1628 smu_store_cc6_data(smu
, display_config
->cpu_pstate_separation_time
,
1629 display_config
->cpu_cc6_disable
,
1630 display_config
->cpu_pstate_disable
,
1631 display_config
->nb_pstate_switch_disable
);
1633 mutex_unlock(&smu
->mutex
);
1638 static int smu_get_clock_info(struct smu_context
*smu
,
1639 struct smu_clock_info
*clk_info
,
1640 enum smu_perf_level_designation designation
)
1643 struct smu_performance_level level
= {0};
1648 ret
= smu_get_perf_level(smu
, PERF_LEVEL_ACTIVITY
, &level
);
1652 clk_info
->min_mem_clk
= level
.memory_clock
;
1653 clk_info
->min_eng_clk
= level
.core_clock
;
1654 clk_info
->min_bus_bandwidth
= level
.non_local_mem_freq
* level
.non_local_mem_width
;
1656 ret
= smu_get_perf_level(smu
, designation
, &level
);
1660 clk_info
->min_mem_clk
= level
.memory_clock
;
1661 clk_info
->min_eng_clk
= level
.core_clock
;
1662 clk_info
->min_bus_bandwidth
= level
.non_local_mem_freq
* level
.non_local_mem_width
;
1667 int smu_get_current_clocks(struct smu_context
*smu
,
1668 struct amd_pp_clock_info
*clocks
)
1670 struct amd_pp_simple_clock_info simple_clocks
= {0};
1671 struct smu_clock_info hw_clocks
;
1674 if (!is_support_sw_smu(smu
->adev
))
1677 mutex_lock(&smu
->mutex
);
1679 smu_get_dal_power_level(smu
, &simple_clocks
);
1681 if (smu
->support_power_containment
)
1682 ret
= smu_get_clock_info(smu
, &hw_clocks
,
1683 PERF_LEVEL_POWER_CONTAINMENT
);
1685 ret
= smu_get_clock_info(smu
, &hw_clocks
, PERF_LEVEL_ACTIVITY
);
1688 pr_err("Error in smu_get_clock_info\n");
1692 clocks
->min_engine_clock
= hw_clocks
.min_eng_clk
;
1693 clocks
->max_engine_clock
= hw_clocks
.max_eng_clk
;
1694 clocks
->min_memory_clock
= hw_clocks
.min_mem_clk
;
1695 clocks
->max_memory_clock
= hw_clocks
.max_mem_clk
;
1696 clocks
->min_bus_bandwidth
= hw_clocks
.min_bus_bandwidth
;
1697 clocks
->max_bus_bandwidth
= hw_clocks
.max_bus_bandwidth
;
1698 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1699 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1701 if (simple_clocks
.level
== 0)
1702 clocks
->max_clocks_state
= PP_DAL_POWERLEVEL_7
;
1704 clocks
->max_clocks_state
= simple_clocks
.level
;
1706 if (!smu_get_current_shallow_sleep_clocks(smu
, &hw_clocks
)) {
1707 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1708 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1712 mutex_unlock(&smu
->mutex
);
1716 static int smu_set_clockgating_state(void *handle
,
1717 enum amd_clockgating_state state
)
1722 static int smu_set_powergating_state(void *handle
,
1723 enum amd_powergating_state state
)
1728 static int smu_enable_umd_pstate(void *handle
,
1729 enum amd_dpm_forced_level
*level
)
1731 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
1732 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
1733 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
1734 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
1736 struct smu_context
*smu
= (struct smu_context
*)(handle
);
1737 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1739 if (!smu
->is_apu
&& (!smu
->pm_enabled
|| !smu_dpm_ctx
->dpm_context
))
1742 if (!(smu_dpm_ctx
->dpm_level
& profile_mode_mask
)) {
1743 /* enter umd pstate, save current level, disable gfx cg*/
1744 if (*level
& profile_mode_mask
) {
1745 smu_dpm_ctx
->saved_dpm_level
= smu_dpm_ctx
->dpm_level
;
1746 smu_dpm_ctx
->enable_umd_pstate
= true;
1747 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1748 AMD_IP_BLOCK_TYPE_GFX
,
1749 AMD_PG_STATE_UNGATE
);
1750 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1751 AMD_IP_BLOCK_TYPE_GFX
,
1752 AMD_CG_STATE_UNGATE
);
1755 /* exit umd pstate, restore level, enable gfx cg*/
1756 if (!(*level
& profile_mode_mask
)) {
1757 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
1758 *level
= smu_dpm_ctx
->saved_dpm_level
;
1759 smu_dpm_ctx
->enable_umd_pstate
= false;
1760 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1761 AMD_IP_BLOCK_TYPE_GFX
,
1763 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1764 AMD_IP_BLOCK_TYPE_GFX
,
1772 int smu_adjust_power_state_dynamic(struct smu_context
*smu
,
1773 enum amd_dpm_forced_level level
,
1774 bool skip_display_settings
)
1779 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1781 if (!smu
->pm_enabled
)
1784 if (!skip_display_settings
) {
1785 ret
= smu_display_config_changed(smu
);
1787 pr_err("Failed to change display config!");
1792 ret
= smu_apply_clocks_adjust_rules(smu
);
1794 pr_err("Failed to apply clocks adjust rules!");
1798 if (!skip_display_settings
) {
1799 ret
= smu_notify_smc_display_config(smu
);
1801 pr_err("Failed to notify smc display config!");
1806 if (smu_dpm_ctx
->dpm_level
!= level
) {
1807 ret
= smu_asic_set_performance_level(smu
, level
);
1809 pr_err("Failed to set performance level!");
1813 /* update the saved copy */
1814 smu_dpm_ctx
->dpm_level
= level
;
1817 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1818 index
= fls(smu
->workload_mask
);
1819 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1820 workload
= smu
->workload_setting
[index
];
1822 if (smu
->power_profile_mode
!= workload
)
1823 smu_set_power_profile_mode(smu
, &workload
, 0, false);
1829 int smu_handle_task(struct smu_context
*smu
,
1830 enum amd_dpm_forced_level level
,
1831 enum amd_pp_task task_id
,
1837 mutex_lock(&smu
->mutex
);
1840 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
:
1841 ret
= smu_pre_display_config_changed(smu
);
1844 ret
= smu_set_cpu_power_state(smu
);
1847 ret
= smu_adjust_power_state_dynamic(smu
, level
, false);
1849 case AMD_PP_TASK_COMPLETE_INIT
:
1850 case AMD_PP_TASK_READJUST_POWER_STATE
:
1851 ret
= smu_adjust_power_state_dynamic(smu
, level
, true);
1859 mutex_unlock(&smu
->mutex
);
1864 int smu_switch_power_profile(struct smu_context
*smu
,
1865 enum PP_SMC_POWER_PROFILE type
,
1868 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1872 if (!smu
->pm_enabled
)
1875 if (!(type
< PP_SMC_POWER_PROFILE_CUSTOM
))
1878 mutex_lock(&smu
->mutex
);
1881 smu
->workload_mask
&= ~(1 << smu
->workload_prority
[type
]);
1882 index
= fls(smu
->workload_mask
);
1883 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1884 workload
= smu
->workload_setting
[index
];
1886 smu
->workload_mask
|= (1 << smu
->workload_prority
[type
]);
1887 index
= fls(smu
->workload_mask
);
1888 index
= index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1889 workload
= smu
->workload_setting
[index
];
1892 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
)
1893 smu_set_power_profile_mode(smu
, &workload
, 0, false);
1895 mutex_unlock(&smu
->mutex
);
1900 enum amd_dpm_forced_level
smu_get_performance_level(struct smu_context
*smu
)
1902 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1903 enum amd_dpm_forced_level level
;
1905 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1908 mutex_lock(&(smu
->mutex
));
1909 level
= smu_dpm_ctx
->dpm_level
;
1910 mutex_unlock(&(smu
->mutex
));
1915 int smu_force_performance_level(struct smu_context
*smu
, enum amd_dpm_forced_level level
)
1917 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1920 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1923 mutex_lock(&smu
->mutex
);
1925 ret
= smu_enable_umd_pstate(smu
, &level
);
1927 mutex_unlock(&smu
->mutex
);
1931 ret
= smu_handle_task(smu
, level
,
1932 AMD_PP_TASK_READJUST_POWER_STATE
,
1935 mutex_unlock(&smu
->mutex
);
1940 int smu_set_display_count(struct smu_context
*smu
, uint32_t count
)
1944 mutex_lock(&smu
->mutex
);
1945 ret
= smu_init_display_count(smu
, count
);
1946 mutex_unlock(&smu
->mutex
);
1951 int smu_force_clk_levels(struct smu_context
*smu
,
1952 enum smu_clk_type clk_type
,
1956 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1959 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1960 pr_debug("force clock level is for dpm manual mode only.\n");
1965 mutex_lock(&smu
->mutex
);
1967 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->force_clk_levels
)
1968 ret
= smu
->ppt_funcs
->force_clk_levels(smu
, clk_type
, mask
);
1971 mutex_unlock(&smu
->mutex
);
1976 int smu_set_mp1_state(struct smu_context
*smu
,
1977 enum pp_mp1_state mp1_state
)
1983 * The SMC is not fully ready. That may be
1984 * expected as the IP may be masked.
1985 * So, just return without error.
1987 if (!smu
->pm_enabled
)
1990 mutex_lock(&smu
->mutex
);
1992 switch (mp1_state
) {
1993 case PP_MP1_STATE_SHUTDOWN
:
1994 msg
= SMU_MSG_PrepareMp1ForShutdown
;
1996 case PP_MP1_STATE_UNLOAD
:
1997 msg
= SMU_MSG_PrepareMp1ForUnload
;
1999 case PP_MP1_STATE_RESET
:
2000 msg
= SMU_MSG_PrepareMp1ForReset
;
2002 case PP_MP1_STATE_NONE
:
2004 mutex_unlock(&smu
->mutex
);
2008 /* some asics may not support those messages */
2009 if (smu_msg_get_index(smu
, msg
) < 0) {
2010 mutex_unlock(&smu
->mutex
);
2014 ret
= smu_send_smc_msg(smu
, msg
, NULL
);
2016 pr_err("[PrepareMp1] Failed!\n");
2018 mutex_unlock(&smu
->mutex
);
2023 int smu_set_df_cstate(struct smu_context
*smu
,
2024 enum pp_df_cstate state
)
2029 * The SMC is not fully ready. That may be
2030 * expected as the IP may be masked.
2031 * So, just return without error.
2033 if (!smu
->pm_enabled
)
2036 if (!smu
->ppt_funcs
|| !smu
->ppt_funcs
->set_df_cstate
)
2039 mutex_lock(&smu
->mutex
);
2041 ret
= smu
->ppt_funcs
->set_df_cstate(smu
, state
);
2043 pr_err("[SetDfCstate] failed!\n");
2045 mutex_unlock(&smu
->mutex
);
2050 int smu_write_watermarks_table(struct smu_context
*smu
)
2052 void *watermarks_table
= smu
->smu_table
.watermarks_table
;
2054 if (!watermarks_table
)
2057 return smu_update_table(smu
,
2058 SMU_TABLE_WATERMARKS
,
2064 int smu_set_watermarks_for_clock_ranges(struct smu_context
*smu
,
2065 struct dm_pp_wm_sets_with_clock_ranges_soc15
*clock_ranges
)
2067 void *table
= smu
->smu_table
.watermarks_table
;
2072 mutex_lock(&smu
->mutex
);
2074 if (!smu
->disable_watermark
&&
2075 smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
) &&
2076 smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_SOCCLK_BIT
)) {
2077 smu_set_watermarks_table(smu
, table
, clock_ranges
);
2079 if (!(smu
->watermarks_bitmap
& WATERMARKS_EXIST
)) {
2080 smu
->watermarks_bitmap
|= WATERMARKS_EXIST
;
2081 smu
->watermarks_bitmap
&= ~WATERMARKS_LOADED
;
2085 mutex_unlock(&smu
->mutex
);
2090 int smu_set_ac_dc(struct smu_context
*smu
)
2094 /* controlled by firmware */
2095 if (smu
->dc_controlled_by_gpio
)
2098 mutex_lock(&smu
->mutex
);
2099 if (smu
->ppt_funcs
->set_power_source
) {
2100 if (smu
->adev
->pm
.ac_power
)
2101 ret
= smu_set_power_source(smu
, SMU_POWER_SOURCE_AC
);
2103 ret
= smu_set_power_source(smu
, SMU_POWER_SOURCE_DC
);
2105 pr_err("Failed to switch to %s mode!\n",
2106 smu
->adev
->pm
.ac_power
? "AC" : "DC");
2108 mutex_unlock(&smu
->mutex
);
2113 const struct amd_ip_funcs smu_ip_funcs
= {
2115 .early_init
= smu_early_init
,
2116 .late_init
= smu_late_init
,
2117 .sw_init
= smu_sw_init
,
2118 .sw_fini
= smu_sw_fini
,
2119 .hw_init
= smu_hw_init
,
2120 .hw_fini
= smu_hw_fini
,
2121 .suspend
= smu_suspend
,
2122 .resume
= smu_resume
,
2124 .check_soft_reset
= NULL
,
2125 .wait_for_idle
= NULL
,
2127 .set_clockgating_state
= smu_set_clockgating_state
,
2128 .set_powergating_state
= smu_set_powergating_state
,
2129 .enable_umd_pstate
= smu_enable_umd_pstate
,
2132 const struct amdgpu_ip_block_version smu_v11_0_ip_block
=
2134 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2138 .funcs
= &smu_ip_funcs
,
2141 const struct amdgpu_ip_block_version smu_v12_0_ip_block
=
2143 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2147 .funcs
= &smu_ip_funcs
,
2150 int smu_load_microcode(struct smu_context
*smu
)
2154 mutex_lock(&smu
->mutex
);
2156 if (smu
->ppt_funcs
->load_microcode
)
2157 ret
= smu
->ppt_funcs
->load_microcode(smu
);
2159 mutex_unlock(&smu
->mutex
);
2164 int smu_check_fw_status(struct smu_context
*smu
)
2168 mutex_lock(&smu
->mutex
);
2170 if (smu
->ppt_funcs
->check_fw_status
)
2171 ret
= smu
->ppt_funcs
->check_fw_status(smu
);
2173 mutex_unlock(&smu
->mutex
);
2178 int smu_set_gfx_cgpg(struct smu_context
*smu
, bool enabled
)
2182 mutex_lock(&smu
->mutex
);
2184 if (smu
->ppt_funcs
->set_gfx_cgpg
)
2185 ret
= smu
->ppt_funcs
->set_gfx_cgpg(smu
, enabled
);
2187 mutex_unlock(&smu
->mutex
);
2192 int smu_set_fan_speed_rpm(struct smu_context
*smu
, uint32_t speed
)
2196 mutex_lock(&smu
->mutex
);
2198 if (smu
->ppt_funcs
->set_fan_speed_rpm
)
2199 ret
= smu
->ppt_funcs
->set_fan_speed_rpm(smu
, speed
);
2201 mutex_unlock(&smu
->mutex
);
2206 int smu_get_power_limit(struct smu_context
*smu
,
2214 mutex_lock(&smu
->mutex
);
2216 if (smu
->ppt_funcs
->get_power_limit
)
2217 ret
= smu
->ppt_funcs
->get_power_limit(smu
, limit
, def
);
2220 mutex_unlock(&smu
->mutex
);
2225 int smu_set_power_limit(struct smu_context
*smu
, uint32_t limit
)
2229 mutex_lock(&smu
->mutex
);
2231 if (smu
->ppt_funcs
->set_power_limit
)
2232 ret
= smu
->ppt_funcs
->set_power_limit(smu
, limit
);
2234 mutex_unlock(&smu
->mutex
);
2239 int smu_print_clk_levels(struct smu_context
*smu
, enum smu_clk_type clk_type
, char *buf
)
2243 mutex_lock(&smu
->mutex
);
2245 if (smu
->ppt_funcs
->print_clk_levels
)
2246 ret
= smu
->ppt_funcs
->print_clk_levels(smu
, clk_type
, buf
);
2248 mutex_unlock(&smu
->mutex
);
2253 int smu_get_od_percentage(struct smu_context
*smu
, enum smu_clk_type type
)
2257 mutex_lock(&smu
->mutex
);
2259 if (smu
->ppt_funcs
->get_od_percentage
)
2260 ret
= smu
->ppt_funcs
->get_od_percentage(smu
, type
);
2262 mutex_unlock(&smu
->mutex
);
2267 int smu_set_od_percentage(struct smu_context
*smu
, enum smu_clk_type type
, uint32_t value
)
2271 mutex_lock(&smu
->mutex
);
2273 if (smu
->ppt_funcs
->set_od_percentage
)
2274 ret
= smu
->ppt_funcs
->set_od_percentage(smu
, type
, value
);
2276 mutex_unlock(&smu
->mutex
);
2281 int smu_od_edit_dpm_table(struct smu_context
*smu
,
2282 enum PP_OD_DPM_TABLE_COMMAND type
,
2283 long *input
, uint32_t size
)
2287 mutex_lock(&smu
->mutex
);
2289 if (smu
->ppt_funcs
->od_edit_dpm_table
)
2290 ret
= smu
->ppt_funcs
->od_edit_dpm_table(smu
, type
, input
, size
);
2292 mutex_unlock(&smu
->mutex
);
2297 int smu_read_sensor(struct smu_context
*smu
,
2298 enum amd_pp_sensors sensor
,
2299 void *data
, uint32_t *size
)
2303 mutex_lock(&smu
->mutex
);
2305 if (smu
->ppt_funcs
->read_sensor
)
2306 ret
= smu
->ppt_funcs
->read_sensor(smu
, sensor
, data
, size
);
2308 mutex_unlock(&smu
->mutex
);
2313 int smu_get_power_profile_mode(struct smu_context
*smu
, char *buf
)
2317 mutex_lock(&smu
->mutex
);
2319 if (smu
->ppt_funcs
->get_power_profile_mode
)
2320 ret
= smu
->ppt_funcs
->get_power_profile_mode(smu
, buf
);
2322 mutex_unlock(&smu
->mutex
);
2327 int smu_set_power_profile_mode(struct smu_context
*smu
,
2329 uint32_t param_size
,
2335 mutex_lock(&smu
->mutex
);
2337 if (smu
->ppt_funcs
->set_power_profile_mode
)
2338 ret
= smu
->ppt_funcs
->set_power_profile_mode(smu
, param
, param_size
);
2341 mutex_unlock(&smu
->mutex
);
2347 int smu_get_fan_control_mode(struct smu_context
*smu
)
2351 mutex_lock(&smu
->mutex
);
2353 if (smu
->ppt_funcs
->get_fan_control_mode
)
2354 ret
= smu
->ppt_funcs
->get_fan_control_mode(smu
);
2356 mutex_unlock(&smu
->mutex
);
2361 int smu_set_fan_control_mode(struct smu_context
*smu
, int value
)
2365 mutex_lock(&smu
->mutex
);
2367 if (smu
->ppt_funcs
->set_fan_control_mode
)
2368 ret
= smu
->ppt_funcs
->set_fan_control_mode(smu
, value
);
2370 mutex_unlock(&smu
->mutex
);
2375 int smu_get_fan_speed_percent(struct smu_context
*smu
, uint32_t *speed
)
2379 mutex_lock(&smu
->mutex
);
2381 if (smu
->ppt_funcs
->get_fan_speed_percent
)
2382 ret
= smu
->ppt_funcs
->get_fan_speed_percent(smu
, speed
);
2384 mutex_unlock(&smu
->mutex
);
2389 int smu_set_fan_speed_percent(struct smu_context
*smu
, uint32_t speed
)
2393 mutex_lock(&smu
->mutex
);
2395 if (smu
->ppt_funcs
->set_fan_speed_percent
)
2396 ret
= smu
->ppt_funcs
->set_fan_speed_percent(smu
, speed
);
2398 mutex_unlock(&smu
->mutex
);
2403 int smu_get_fan_speed_rpm(struct smu_context
*smu
, uint32_t *speed
)
2407 mutex_lock(&smu
->mutex
);
2409 if (smu
->ppt_funcs
->get_fan_speed_rpm
)
2410 ret
= smu
->ppt_funcs
->get_fan_speed_rpm(smu
, speed
);
2412 mutex_unlock(&smu
->mutex
);
2417 int smu_set_deep_sleep_dcefclk(struct smu_context
*smu
, int clk
)
2421 mutex_lock(&smu
->mutex
);
2423 if (smu
->ppt_funcs
->set_deep_sleep_dcefclk
)
2424 ret
= smu
->ppt_funcs
->set_deep_sleep_dcefclk(smu
, clk
);
2426 mutex_unlock(&smu
->mutex
);
2431 int smu_set_active_display_count(struct smu_context
*smu
, uint32_t count
)
2435 if (smu
->ppt_funcs
->set_active_display_count
)
2436 ret
= smu
->ppt_funcs
->set_active_display_count(smu
, count
);
2441 int smu_get_clock_by_type(struct smu_context
*smu
,
2442 enum amd_pp_clock_type type
,
2443 struct amd_pp_clocks
*clocks
)
2447 mutex_lock(&smu
->mutex
);
2449 if (smu
->ppt_funcs
->get_clock_by_type
)
2450 ret
= smu
->ppt_funcs
->get_clock_by_type(smu
, type
, clocks
);
2452 mutex_unlock(&smu
->mutex
);
2457 int smu_get_max_high_clocks(struct smu_context
*smu
,
2458 struct amd_pp_simple_clock_info
*clocks
)
2462 mutex_lock(&smu
->mutex
);
2464 if (smu
->ppt_funcs
->get_max_high_clocks
)
2465 ret
= smu
->ppt_funcs
->get_max_high_clocks(smu
, clocks
);
2467 mutex_unlock(&smu
->mutex
);
2472 int smu_get_clock_by_type_with_latency(struct smu_context
*smu
,
2473 enum smu_clk_type clk_type
,
2474 struct pp_clock_levels_with_latency
*clocks
)
2478 mutex_lock(&smu
->mutex
);
2480 if (smu
->ppt_funcs
->get_clock_by_type_with_latency
)
2481 ret
= smu
->ppt_funcs
->get_clock_by_type_with_latency(smu
, clk_type
, clocks
);
2483 mutex_unlock(&smu
->mutex
);
2488 int smu_get_clock_by_type_with_voltage(struct smu_context
*smu
,
2489 enum amd_pp_clock_type type
,
2490 struct pp_clock_levels_with_voltage
*clocks
)
2494 mutex_lock(&smu
->mutex
);
2496 if (smu
->ppt_funcs
->get_clock_by_type_with_voltage
)
2497 ret
= smu
->ppt_funcs
->get_clock_by_type_with_voltage(smu
, type
, clocks
);
2499 mutex_unlock(&smu
->mutex
);
2505 int smu_display_clock_voltage_request(struct smu_context
*smu
,
2506 struct pp_display_clock_request
*clock_req
)
2510 mutex_lock(&smu
->mutex
);
2512 if (smu
->ppt_funcs
->display_clock_voltage_request
)
2513 ret
= smu
->ppt_funcs
->display_clock_voltage_request(smu
, clock_req
);
2515 mutex_unlock(&smu
->mutex
);
2521 int smu_display_disable_memory_clock_switch(struct smu_context
*smu
, bool disable_memory_clock_switch
)
2525 mutex_lock(&smu
->mutex
);
2527 if (smu
->ppt_funcs
->display_disable_memory_clock_switch
)
2528 ret
= smu
->ppt_funcs
->display_disable_memory_clock_switch(smu
, disable_memory_clock_switch
);
2530 mutex_unlock(&smu
->mutex
);
2535 int smu_notify_smu_enable_pwe(struct smu_context
*smu
)
2539 mutex_lock(&smu
->mutex
);
2541 if (smu
->ppt_funcs
->notify_smu_enable_pwe
)
2542 ret
= smu
->ppt_funcs
->notify_smu_enable_pwe(smu
);
2544 mutex_unlock(&smu
->mutex
);
2549 int smu_set_xgmi_pstate(struct smu_context
*smu
,
2554 mutex_lock(&smu
->mutex
);
2556 if (smu
->ppt_funcs
->set_xgmi_pstate
)
2557 ret
= smu
->ppt_funcs
->set_xgmi_pstate(smu
, pstate
);
2559 mutex_unlock(&smu
->mutex
);
2564 int smu_set_azalia_d3_pme(struct smu_context
*smu
)
2568 mutex_lock(&smu
->mutex
);
2570 if (smu
->ppt_funcs
->set_azalia_d3_pme
)
2571 ret
= smu
->ppt_funcs
->set_azalia_d3_pme(smu
);
2573 mutex_unlock(&smu
->mutex
);
2578 bool smu_baco_is_support(struct smu_context
*smu
)
2582 mutex_lock(&smu
->mutex
);
2584 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->baco_is_support
)
2585 ret
= smu
->ppt_funcs
->baco_is_support(smu
);
2587 mutex_unlock(&smu
->mutex
);
2592 int smu_baco_get_state(struct smu_context
*smu
, enum smu_baco_state
*state
)
2594 if (smu
->ppt_funcs
->baco_get_state
)
2597 mutex_lock(&smu
->mutex
);
2598 *state
= smu
->ppt_funcs
->baco_get_state(smu
);
2599 mutex_unlock(&smu
->mutex
);
2604 int smu_baco_enter(struct smu_context
*smu
)
2608 mutex_lock(&smu
->mutex
);
2610 if (smu
->ppt_funcs
->baco_enter
)
2611 ret
= smu
->ppt_funcs
->baco_enter(smu
);
2613 mutex_unlock(&smu
->mutex
);
2618 int smu_baco_exit(struct smu_context
*smu
)
2622 mutex_lock(&smu
->mutex
);
2624 if (smu
->ppt_funcs
->baco_exit
)
2625 ret
= smu
->ppt_funcs
->baco_exit(smu
);
2627 mutex_unlock(&smu
->mutex
);
2632 int smu_mode2_reset(struct smu_context
*smu
)
2636 mutex_lock(&smu
->mutex
);
2638 if (smu
->ppt_funcs
->mode2_reset
)
2639 ret
= smu
->ppt_funcs
->mode2_reset(smu
);
2641 mutex_unlock(&smu
->mutex
);
2646 int smu_get_max_sustainable_clocks_by_dc(struct smu_context
*smu
,
2647 struct pp_smu_nv_clock_table
*max_clocks
)
2651 mutex_lock(&smu
->mutex
);
2653 if (smu
->ppt_funcs
->get_max_sustainable_clocks_by_dc
)
2654 ret
= smu
->ppt_funcs
->get_max_sustainable_clocks_by_dc(smu
, max_clocks
);
2656 mutex_unlock(&smu
->mutex
);
2661 int smu_get_uclk_dpm_states(struct smu_context
*smu
,
2662 unsigned int *clock_values_in_khz
,
2663 unsigned int *num_states
)
2667 mutex_lock(&smu
->mutex
);
2669 if (smu
->ppt_funcs
->get_uclk_dpm_states
)
2670 ret
= smu
->ppt_funcs
->get_uclk_dpm_states(smu
, clock_values_in_khz
, num_states
);
2672 mutex_unlock(&smu
->mutex
);
2677 enum amd_pm_state_type
smu_get_current_power_state(struct smu_context
*smu
)
2679 enum amd_pm_state_type pm_state
= POWER_STATE_TYPE_DEFAULT
;
2681 mutex_lock(&smu
->mutex
);
2683 if (smu
->ppt_funcs
->get_current_power_state
)
2684 pm_state
= smu
->ppt_funcs
->get_current_power_state(smu
);
2686 mutex_unlock(&smu
->mutex
);
2691 int smu_get_dpm_clock_table(struct smu_context
*smu
,
2692 struct dpm_clocks
*clock_table
)
2696 mutex_lock(&smu
->mutex
);
2698 if (smu
->ppt_funcs
->get_dpm_clock_table
)
2699 ret
= smu
->ppt_funcs
->get_dpm_clock_table(smu
, clock_table
);
2701 mutex_unlock(&smu
->mutex
);
2706 uint32_t smu_get_pptable_power_limit(struct smu_context
*smu
)
2710 if (smu
->ppt_funcs
->get_pptable_power_limit
)
2711 ret
= smu
->ppt_funcs
->get_pptable_power_limit(smu
);