]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/amd/pm/amdgpu_pm.c
Merge tag 'loongarch-kvm-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhu...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / amd / pm / amdgpu_pm.c
CommitLineData
d38ceaf9 1/*
9ce6aae1
AD
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
d38ceaf9
AD
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
fdf2f6c5 25
d38ceaf9
AD
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
fdf2f6c5 31#include <linux/pci.h>
d38ceaf9
AD
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
ddf74e79 34#include <linux/nospec.h>
b9a9294b 35#include <linux/pm_runtime.h>
517cb957 36#include <asm/processor.h>
1b5708ff 37
3e38b634
EQ
38#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
39#define MAX_NUM_OF_SUBSETS 8
40
41struct od_attribute {
42 struct kobj_attribute attribute;
43 struct list_head entry;
44};
45
46struct od_kobj {
47 struct kobject kobj;
48 struct list_head entry;
49 struct list_head attribute;
50 void *priv;
51};
52
53struct od_feature_ops {
54 umode_t (*is_visible)(struct amdgpu_device *adev);
55 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
56 char *buf);
57 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
58 const char *buf, size_t count);
59};
60
61struct od_feature_item {
62 const char *name;
63 struct od_feature_ops ops;
64};
65
66struct od_feature_container {
67 char *name;
68 struct od_feature_ops ops;
69 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
70};
71
72struct od_feature_set {
73 struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
74};
75
2adc1156
EQ
76static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79} temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83};
84
3867e370
DP
85const char * const amdgpu_pp_profile_name[] = {
86 "BOOTUP_DEFAULT",
87 "3D_FULL_SCREEN",
88 "POWER_SAVING",
89 "VIDEO",
90 "VR",
91 "COMPUTE",
334682ae
KF
92 "CUSTOM",
93 "WINDOW_3D",
31865e96
PY
94 "CAPPED",
95 "UNCAPPED",
3867e370
DP
96};
97
ca8d40ca
AD
98/**
99 * DOC: power_dpm_state
100 *
dc85db25
AD
101 * The power_dpm_state file is a legacy interface and is only provided for
102 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
103 * certain power related parameters. The file power_dpm_state is used for this.
ca8d40ca 104 * It accepts the following arguments:
dc85db25 105 *
ca8d40ca 106 * - battery
dc85db25 107 *
ca8d40ca 108 * - balanced
dc85db25 109 *
ca8d40ca
AD
110 * - performance
111 *
112 * battery
113 *
114 * On older GPUs, the vbios provided a special power state for battery
115 * operation. Selecting battery switched to this state. This is no
116 * longer provided on newer GPUs so the option does nothing in that case.
117 *
118 * balanced
119 *
120 * On older GPUs, the vbios provided a special power state for balanced
121 * operation. Selecting balanced switched to this state. This is no
122 * longer provided on newer GPUs so the option does nothing in that case.
123 *
124 * performance
125 *
126 * On older GPUs, the vbios provided a special power state for performance
127 * operation. Selecting performance switched to this state. This is no
128 * longer provided on newer GPUs so the option does nothing in that case.
129 *
130 */
131
4e01847c
KW
132static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
133 struct device_attribute *attr,
134 char *buf)
d38ceaf9
AD
135{
136 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 137 struct amdgpu_device *adev = drm_to_adev(ddev);
1b5708ff 138 enum amd_pm_state_type pm;
b9a9294b 139 int ret;
1b5708ff 140
53b3f8f4 141 if (amdgpu_in_reset(adev))
48b270bb 142 return -EPERM;
d2ae842d
AD
143 if (adev->in_suspend && !adev->in_runpm)
144 return -EPERM;
48b270bb 145
b9a9294b 146 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
147 if (ret < 0) {
148 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 149 return ret;
66429300 150 }
b9a9294b 151
79c65f3f 152 amdgpu_dpm_get_current_power_state(adev, &pm);
d38ceaf9 153
b9a9294b
AD
154 pm_runtime_mark_last_busy(ddev->dev);
155 pm_runtime_put_autosuspend(ddev->dev);
156
a9ca9bb3
TT
157 return sysfs_emit(buf, "%s\n",
158 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
159 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
d38ceaf9
AD
160}
161
4e01847c
KW
162static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
163 struct device_attribute *attr,
164 const char *buf,
165 size_t count)
d38ceaf9
AD
166{
167 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 168 struct amdgpu_device *adev = drm_to_adev(ddev);
1b5708ff 169 enum amd_pm_state_type state;
b9a9294b 170 int ret;
d38ceaf9 171
53b3f8f4 172 if (amdgpu_in_reset(adev))
48b270bb 173 return -EPERM;
d2ae842d
AD
174 if (adev->in_suspend && !adev->in_runpm)
175 return -EPERM;
48b270bb 176
d38ceaf9 177 if (strncmp("battery", buf, strlen("battery")) == 0)
1b5708ff 178 state = POWER_STATE_TYPE_BATTERY;
d38ceaf9 179 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
1b5708ff 180 state = POWER_STATE_TYPE_BALANCED;
d38ceaf9 181 else if (strncmp("performance", buf, strlen("performance")) == 0)
1b5708ff 182 state = POWER_STATE_TYPE_PERFORMANCE;
27414cd4
AD
183 else
184 return -EINVAL;
d38ceaf9 185
b9a9294b 186 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
187 if (ret < 0) {
188 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 189 return ret;
66429300 190 }
b9a9294b 191
79c65f3f 192 amdgpu_dpm_set_power_state(adev, state);
1b5708ff 193
b9a9294b
AD
194 pm_runtime_mark_last_busy(ddev->dev);
195 pm_runtime_put_autosuspend(ddev->dev);
196
d38ceaf9
AD
197 return count;
198}
199
8567f681
AD
200
201/**
202 * DOC: power_dpm_force_performance_level
203 *
204 * The amdgpu driver provides a sysfs API for adjusting certain power
205 * related parameters. The file power_dpm_force_performance_level is
206 * used for this. It accepts the following arguments:
dc85db25 207 *
8567f681 208 * - auto
dc85db25 209 *
8567f681 210 * - low
dc85db25 211 *
8567f681 212 * - high
dc85db25 213 *
8567f681 214 * - manual
dc85db25 215 *
8567f681 216 * - profile_standard
dc85db25 217 *
8567f681 218 * - profile_min_sclk
dc85db25 219 *
8567f681 220 * - profile_min_mclk
dc85db25 221 *
8567f681
AD
222 * - profile_peak
223 *
224 * auto
225 *
226 * When auto is selected, the driver will attempt to dynamically select
227 * the optimal power profile for current conditions in the driver.
228 *
229 * low
230 *
231 * When low is selected, the clocks are forced to the lowest power state.
232 *
233 * high
234 *
235 * When high is selected, the clocks are forced to the highest power state.
236 *
237 * manual
238 *
239 * When manual is selected, the user can manually adjust which power states
240 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
241 * and pp_dpm_pcie files and adjust the power state transition heuristics
242 * via the pp_power_profile_mode sysfs file.
243 *
244 * profile_standard
245 * profile_min_sclk
246 * profile_min_mclk
247 * profile_peak
248 *
249 * When the profiling modes are selected, clock and power gating are
250 * disabled and the clocks are set for different profiling cases. This
251 * mode is recommended for profiling specific work loads where you do
252 * not want clock or power gating for clock fluctuation to interfere
253 * with your results. profile_standard sets the clocks to a fixed clock
254 * level which varies from asic to asic. profile_min_sclk forces the sclk
255 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
256 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
257 *
258 */
259
4e01847c
KW
260static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
261 struct device_attribute *attr,
262 char *buf)
d38ceaf9
AD
263{
264 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 265 struct amdgpu_device *adev = drm_to_adev(ddev);
cd4d7464 266 enum amd_dpm_forced_level level = 0xff;
b9a9294b 267 int ret;
d38ceaf9 268
53b3f8f4 269 if (amdgpu_in_reset(adev))
48b270bb 270 return -EPERM;
d2ae842d
AD
271 if (adev->in_suspend && !adev->in_runpm)
272 return -EPERM;
48b270bb 273
b9a9294b 274 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
275 if (ret < 0) {
276 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 277 return ret;
66429300 278 }
0c67df48 279
79c65f3f 280 level = amdgpu_dpm_get_performance_level(adev);
cd4d7464 281
b9a9294b
AD
282 pm_runtime_mark_last_busy(ddev->dev);
283 pm_runtime_put_autosuspend(ddev->dev);
284
a9ca9bb3
TT
285 return sysfs_emit(buf, "%s\n",
286 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
287 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
288 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
289 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
290 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
291 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
292 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
293 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
294 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
295 "unknown");
d38ceaf9
AD
296}
297
4e01847c
KW
298static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf,
301 size_t count)
d38ceaf9
AD
302{
303 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 304 struct amdgpu_device *adev = drm_to_adev(ddev);
e5d03ac2 305 enum amd_dpm_forced_level level;
d38ceaf9
AD
306 int ret = 0;
307
53b3f8f4 308 if (amdgpu_in_reset(adev))
48b270bb 309 return -EPERM;
d2ae842d
AD
310 if (adev->in_suspend && !adev->in_runpm)
311 return -EPERM;
48b270bb 312
d38ceaf9 313 if (strncmp("low", buf, strlen("low")) == 0) {
e5d03ac2 314 level = AMD_DPM_FORCED_LEVEL_LOW;
d38ceaf9 315 } else if (strncmp("high", buf, strlen("high")) == 0) {
e5d03ac2 316 level = AMD_DPM_FORCED_LEVEL_HIGH;
d38ceaf9 317 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
e5d03ac2 318 level = AMD_DPM_FORCED_LEVEL_AUTO;
f3898ea1 319 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
e5d03ac2 320 level = AMD_DPM_FORCED_LEVEL_MANUAL;
570272d2
RZ
321 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
323 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
325 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
327 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
328 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
329 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
6be64246
LL
331 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
570272d2 333 } else {
b9a9294b 334 return -EINVAL;
d38ceaf9 335 }
1b5708ff 336
b9a9294b 337 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
338 if (ret < 0) {
339 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 340 return ret;
66429300 341 }
b9a9294b 342
8cda7a4f 343 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
79c65f3f
EQ
344 if (amdgpu_dpm_force_performance_level(adev, level)) {
345 pm_runtime_mark_last_busy(ddev->dev);
346 pm_runtime_put_autosuspend(ddev->dev);
8cda7a4f 347 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
79c65f3f 348 return -EINVAL;
d38ceaf9 349 }
8cda7a4f
AD
350 /* override whatever a user ctx may have set */
351 adev->pm.stable_pstate_ctx = NULL;
352 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
79c65f3f 353
b9a9294b
AD
354 pm_runtime_mark_last_busy(ddev->dev);
355 pm_runtime_put_autosuspend(ddev->dev);
570272d2 356
f1403342 357 return count;
d38ceaf9
AD
358}
359
f3898ea1
EH
360static ssize_t amdgpu_get_pp_num_states(struct device *dev,
361 struct device_attribute *attr,
362 char *buf)
363{
364 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 365 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 366 struct pp_states_info data;
09b6744c
DP
367 uint32_t i;
368 int buf_len, ret;
f3898ea1 369
53b3f8f4 370 if (amdgpu_in_reset(adev))
48b270bb 371 return -EPERM;
d2ae842d
AD
372 if (adev->in_suspend && !adev->in_runpm)
373 return -EPERM;
48b270bb 374
b9a9294b 375 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
376 if (ret < 0) {
377 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 378 return ret;
66429300 379 }
b9a9294b 380
79c65f3f 381 if (amdgpu_dpm_get_pp_num_states(adev, &data))
6f81b2d0 382 memset(&data, 0, sizeof(data));
f3898ea1 383
b9a9294b
AD
384 pm_runtime_mark_last_busy(ddev->dev);
385 pm_runtime_put_autosuspend(ddev->dev);
386
09b6744c 387 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
f3898ea1 388 for (i = 0; i < data.nums; i++)
09b6744c 389 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
f3898ea1
EH
390 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
391 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
392 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
393 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
394
395 return buf_len;
396}
397
398static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
399 struct device_attribute *attr,
400 char *buf)
401{
402 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 403 struct amdgpu_device *adev = drm_to_adev(ddev);
2b24c199 404 struct pp_states_info data = {0};
f3898ea1 405 enum amd_pm_state_type pm = 0;
ea2d0bf8 406 int i = 0, ret = 0;
f3898ea1 407
53b3f8f4 408 if (amdgpu_in_reset(adev))
48b270bb 409 return -EPERM;
d2ae842d
AD
410 if (adev->in_suspend && !adev->in_runpm)
411 return -EPERM;
48b270bb 412
b9a9294b 413 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
414 if (ret < 0) {
415 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 416 return ret;
66429300 417 }
b9a9294b 418
79c65f3f
EQ
419 amdgpu_dpm_get_current_power_state(adev, &pm);
420
421 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
f3898ea1 422
b9a9294b
AD
423 pm_runtime_mark_last_busy(ddev->dev);
424 pm_runtime_put_autosuspend(ddev->dev);
425
79c65f3f
EQ
426 if (ret)
427 return ret;
428
ea2d0bf8
KW
429 for (i = 0; i < data.nums; i++) {
430 if (pm == data.states[i])
431 break;
f3898ea1
EH
432 }
433
ea2d0bf8
KW
434 if (i == data.nums)
435 i = -EINVAL;
436
a9ca9bb3 437 return sysfs_emit(buf, "%d\n", i);
f3898ea1
EH
438}
439
440static ssize_t amdgpu_get_pp_force_state(struct device *dev,
441 struct device_attribute *attr,
442 char *buf)
443{
444 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 445 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 446
53b3f8f4 447 if (amdgpu_in_reset(adev))
48b270bb 448 return -EPERM;
d2ae842d
AD
449 if (adev->in_suspend && !adev->in_runpm)
450 return -EPERM;
48b270bb 451
d698a2c4 452 if (adev->pm.pp_force_state_enabled)
cd4d7464
RZ
453 return amdgpu_get_pp_cur_state(dev, attr, buf);
454 else
a9ca9bb3 455 return sysfs_emit(buf, "\n");
f3898ea1
EH
456}
457
458static ssize_t amdgpu_set_pp_force_state(struct device *dev,
459 struct device_attribute *attr,
460 const char *buf,
461 size_t count)
462{
463 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 464 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 465 enum amd_pm_state_type state = 0;
79c65f3f 466 struct pp_states_info data;
041bf022 467 unsigned long idx;
f3898ea1
EH
468 int ret;
469
53b3f8f4 470 if (amdgpu_in_reset(adev))
48b270bb 471 return -EPERM;
d2ae842d
AD
472 if (adev->in_suspend && !adev->in_runpm)
473 return -EPERM;
48b270bb 474
d698a2c4 475 adev->pm.pp_force_state_enabled = false;
b9a9294b 476
79c65f3f
EQ
477 if (strlen(buf) == 1)
478 return count;
f3898ea1 479
79c65f3f
EQ
480 ret = kstrtoul(buf, 0, &idx);
481 if (ret || idx >= ARRAY_SIZE(data.states))
482 return -EINVAL;
b9a9294b 483
79c65f3f 484 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
b9a9294b 485
79c65f3f
EQ
486 ret = pm_runtime_get_sync(ddev->dev);
487 if (ret < 0) {
b9a9294b 488 pm_runtime_put_autosuspend(ddev->dev);
79c65f3f
EQ
489 return ret;
490 }
491
492 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
493 if (ret)
494 goto err_out;
495
496 state = data.states[idx];
497
498 /* only set user selected power states */
499 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
500 state != POWER_STATE_TYPE_DEFAULT) {
501 ret = amdgpu_dpm_dispatch_task(adev,
502 AMD_PP_TASK_ENABLE_USER_STATE, &state);
503 if (ret)
504 goto err_out;
505
d698a2c4 506 adev->pm.pp_force_state_enabled = true;
f3898ea1 507 }
b9a9294b 508
79c65f3f
EQ
509 pm_runtime_mark_last_busy(ddev->dev);
510 pm_runtime_put_autosuspend(ddev->dev);
511
f3898ea1 512 return count;
79c65f3f
EQ
513
514err_out:
515 pm_runtime_mark_last_busy(ddev->dev);
516 pm_runtime_put_autosuspend(ddev->dev);
517 return ret;
f3898ea1
EH
518}
519
d54bb40f
AD
520/**
521 * DOC: pp_table
522 *
523 * The amdgpu driver provides a sysfs API for uploading new powerplay
524 * tables. The file pp_table is used for this. Reading the file
525 * will dump the current power play table. Writing to the file
526 * will attempt to upload a new powerplay table and re-initialize
527 * powerplay using that new table.
528 *
529 */
530
f3898ea1
EH
531static ssize_t amdgpu_get_pp_table(struct device *dev,
532 struct device_attribute *attr,
533 char *buf)
534{
535 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 536 struct amdgpu_device *adev = drm_to_adev(ddev);
f3898ea1 537 char *table = NULL;
b9a9294b 538 int size, ret;
f3898ea1 539
53b3f8f4 540 if (amdgpu_in_reset(adev))
48b270bb 541 return -EPERM;
d2ae842d
AD
542 if (adev->in_suspend && !adev->in_runpm)
543 return -EPERM;
48b270bb 544
b9a9294b 545 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
546 if (ret < 0) {
547 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 548 return ret;
66429300 549 }
b9a9294b 550
79c65f3f
EQ
551 size = amdgpu_dpm_get_pp_table(adev, &table);
552
553 pm_runtime_mark_last_busy(ddev->dev);
554 pm_runtime_put_autosuspend(ddev->dev);
555
556 if (size <= 0)
557 return size;
f3898ea1
EH
558
559 if (size >= PAGE_SIZE)
560 size = PAGE_SIZE - 1;
561
1684d3ba 562 memcpy(buf, table, size);
f3898ea1
EH
563
564 return size;
565}
566
567static ssize_t amdgpu_set_pp_table(struct device *dev,
568 struct device_attribute *attr,
569 const char *buf,
570 size_t count)
571{
572 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 573 struct amdgpu_device *adev = drm_to_adev(ddev);
289921b0 574 int ret = 0;
f3898ea1 575
53b3f8f4 576 if (amdgpu_in_reset(adev))
48b270bb 577 return -EPERM;
d2ae842d
AD
578 if (adev->in_suspend && !adev->in_runpm)
579 return -EPERM;
48b270bb 580
b9a9294b 581 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
582 if (ret < 0) {
583 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 584 return ret;
66429300 585 }
b9a9294b 586
8f4828d0 587 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
f3898ea1 588
b9a9294b
AD
589 pm_runtime_mark_last_busy(ddev->dev);
590 pm_runtime_put_autosuspend(ddev->dev);
591
79c65f3f
EQ
592 if (ret)
593 return ret;
594
f3898ea1
EH
595 return count;
596}
597
4e418c34
AD
598/**
599 * DOC: pp_od_clk_voltage
600 *
601 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
602 * in each power level within a power state. The pp_od_clk_voltage is used for
603 * this.
604 *
ccda42a4
AD
605 * Note that the actual memory controller clock rate are exposed, not
606 * the effective memory clock of the DRAMs. To translate it, use the
607 * following formula:
608 *
609 * Clock conversion (Mhz):
610 *
611 * HBM: effective_memory_clock = memory_controller_clock * 1
612 *
613 * G5: effective_memory_clock = memory_controller_clock * 1
614 *
615 * G6: effective_memory_clock = memory_controller_clock * 2
616 *
617 * DRAM data rate (MT/s):
618 *
619 * HBM: effective_memory_clock * 2 = data_rate
620 *
621 * G5: effective_memory_clock * 4 = data_rate
622 *
623 * G6: effective_memory_clock * 8 = data_rate
624 *
625 * Bandwidth (MB/s):
626 *
627 * data_rate * vram_bit_width / 8 = memory_bandwidth
628 *
629 * Some examples:
630 *
631 * G5 on RX460:
632 *
633 * memory_controller_clock = 1750 Mhz
634 *
635 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
636 *
637 * data rate = 1750 * 4 = 7000 MT/s
638 *
639 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
640 *
641 * G6 on RX5700:
642 *
643 * memory_controller_clock = 875 Mhz
644 *
645 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
646 *
647 * data rate = 1750 * 8 = 14000 MT/s
648 *
649 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
650 *
d5bf2653
EQ
651 * < For Vega10 and previous ASICs >
652 *
4e418c34 653 * Reading the file will display:
dc85db25 654 *
4e418c34 655 * - a list of engine clock levels and voltages labeled OD_SCLK
dc85db25 656 *
4e418c34 657 * - a list of memory clock levels and voltages labeled OD_MCLK
dc85db25 658 *
4e418c34
AD
659 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
660 *
661 * To manually adjust these settings, first select manual using
662 * power_dpm_force_performance_level. Enter a new value for each
663 * level by writing a string that contains "s/m level clock voltage" to
664 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
665 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
666 * 810 mV. When you have edited all of the states as needed, write
667 * "c" (commit) to the file to commit your changes. If you want to reset to the
668 * default power levels, write "r" (reset) to the file to reset them.
669 *
d5bf2653 670 *
bd09331a 671 * < For Vega20 and newer ASICs >
d5bf2653
EQ
672 *
673 * Reading the file will display:
674 *
675 * - minimum and maximum engine clock labeled OD_SCLK
676 *
37a58f69
EQ
677 * - minimum(not available for Vega20 and Navi1x) and maximum memory
678 * clock labeled OD_MCLK
d5bf2653 679 *
b1f82cb2 680 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
8f4f5f0b
EQ
681 * They can be used to calibrate the sclk voltage curve. This is
682 * available for Vega20 and NV1X.
683 *
a2b6df4f 684 * - voltage offset(in mV) applied on target voltage calculation.
e835bc26
EQ
685 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
686 * Cavefish and some later SMU13 ASICs. For these ASICs, the target
687 * voltage calculation can be illustrated by "voltage = voltage
688 * calculated from v/f curve + overdrive vddgfx offset"
a2b6df4f 689 *
e835bc26
EQ
690 * - a list of valid ranges for sclk, mclk, voltage curve points
691 * or voltage offset labeled OD_RANGE
d5bf2653 692 *
0487bbb4
AD
693 * < For APUs >
694 *
695 * Reading the file will display:
696 *
697 * - minimum and maximum engine clock labeled OD_SCLK
698 *
699 * - a list of valid ranges for sclk labeled OD_RANGE
700 *
3dc8077f
AD
701 * < For VanGogh >
702 *
703 * Reading the file will display:
704 *
705 * - minimum and maximum engine clock labeled OD_SCLK
706 * - minimum and maximum core clocks labeled OD_CCLK
707 *
708 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
709 *
d5bf2653
EQ
710 * To manually adjust these settings:
711 *
712 * - First select manual using power_dpm_force_performance_level
713 *
714 * - For clock frequency setting, enter a new value by writing a
715 * string that contains "s/m index clock" to the file. The index
716 * should be 0 if to set minimum clock. And 1 if to set maximum
717 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
3dc8077f
AD
718 * "m 1 800" will update maximum mclk to be 800Mhz. For core
719 * clocks on VanGogh, the string contains "p core index clock".
720 * E.g., "p 2 0 800" would set the minimum core clock on core
721 * 2 to 800Mhz.
d5bf2653 722 *
e835bc26
EQ
723 * For sclk voltage curve supported by Vega20 and NV1X, enter the new
724 * values by writing a string that contains "vc point clock voltage"
725 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
726 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
727 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
728 * voltage 1000mV.
729 *
730 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
731 * Cavefish and some later SMU13 ASICs, enter the new value by writing a
732 * string that contains "vo offset". E.g., "vo -10" will update the extra
733 * voltage offset applied to the whole v/f curve line as -10mv.
a2b6df4f 734 *
d5bf2653
EQ
735 * - When you have edited all of the states as needed, write "c" (commit)
736 * to the file to commit your changes
737 *
738 * - If you want to reset to the default power levels, write "r" (reset)
739 * to the file to reset them
740 *
4e418c34
AD
741 */
742
e3933f26
RZ
743static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
744 struct device_attribute *attr,
745 const char *buf,
746 size_t count)
747{
748 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 749 struct amdgpu_device *adev = drm_to_adev(ddev);
e3933f26
RZ
750 int ret;
751 uint32_t parameter_size = 0;
752 long parameter[64];
753 char buf_cpy[128];
754 char *tmp_str;
755 char *sub_str;
756 const char delimiter[3] = {' ', '\n', '\0'};
757 uint32_t type;
758
53b3f8f4 759 if (amdgpu_in_reset(adev))
48b270bb 760 return -EPERM;
d2ae842d
AD
761 if (adev->in_suspend && !adev->in_runpm)
762 return -EPERM;
48b270bb 763
08e9ebc7 764 if (count > 127 || count == 0)
e3933f26
RZ
765 return -EINVAL;
766
767 if (*buf == 's')
768 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
0d90d0dd
HR
769 else if (*buf == 'p')
770 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
e3933f26
RZ
771 else if (*buf == 'm')
772 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
e1b3bcaa 773 else if (*buf == 'r')
e3933f26
RZ
774 type = PP_OD_RESTORE_DEFAULT_TABLE;
775 else if (*buf == 'c')
776 type = PP_OD_COMMIT_DPM_TABLE;
d5bf2653
EQ
777 else if (!strncmp(buf, "vc", 2))
778 type = PP_OD_EDIT_VDDC_CURVE;
a2b6df4f
EQ
779 else if (!strncmp(buf, "vo", 2))
780 type = PP_OD_EDIT_VDDGFX_OFFSET;
e3933f26
RZ
781 else
782 return -EINVAL;
783
08e9ebc7
BN
784 memcpy(buf_cpy, buf, count);
785 buf_cpy[count] = 0;
e3933f26
RZ
786
787 tmp_str = buf_cpy;
788
a2b6df4f
EQ
789 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
790 (type == PP_OD_EDIT_VDDGFX_OFFSET))
d5bf2653 791 tmp_str++;
e3933f26
RZ
792 while (isspace(*++tmp_str));
793
ce7c1d04 794 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
aec1d870
MC
795 if (strlen(sub_str) == 0)
796 continue;
e3933f26
RZ
797 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
798 if (ret)
799 return -EINVAL;
800 parameter_size++;
801
08e9ebc7
BN
802 if (!tmp_str)
803 break;
804
e3933f26
RZ
805 while (isspace(*tmp_str))
806 tmp_str++;
807 }
808
b9a9294b 809 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
810 if (ret < 0) {
811 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 812 return ret;
66429300 813 }
b9a9294b 814
79c65f3f
EQ
815 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
816 type,
817 parameter,
818 parameter_size))
819 goto err_out;
12a6727d 820
79c65f3f
EQ
821 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
822 parameter, parameter_size))
823 goto err_out;
e388cc47 824
8f4828d0 825 if (type == PP_OD_COMMIT_DPM_TABLE) {
79c65f3f
EQ
826 if (amdgpu_dpm_dispatch_task(adev,
827 AMD_PP_TASK_READJUST_POWER_STATE,
828 NULL))
829 goto err_out;
e3933f26 830 }
8f4828d0 831
b9a9294b
AD
832 pm_runtime_mark_last_busy(ddev->dev);
833 pm_runtime_put_autosuspend(ddev->dev);
e3933f26 834
f1403342 835 return count;
79c65f3f
EQ
836
837err_out:
838 pm_runtime_mark_last_busy(ddev->dev);
839 pm_runtime_put_autosuspend(ddev->dev);
840 return -EINVAL;
e3933f26
RZ
841}
842
843static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
844 struct device_attribute *attr,
845 char *buf)
846{
847 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 848 struct amdgpu_device *adev = drm_to_adev(ddev);
c8cb19c7 849 int size = 0;
b9a9294b 850 int ret;
c8cb19c7
DP
851 enum pp_clock_type od_clocks[6] = {
852 OD_SCLK,
853 OD_MCLK,
854 OD_VDDC_CURVE,
855 OD_RANGE,
856 OD_VDDGFX_OFFSET,
857 OD_CCLK,
858 };
859 uint clk_index;
e3933f26 860
53b3f8f4 861 if (amdgpu_in_reset(adev))
48b270bb 862 return -EPERM;
d2ae842d
AD
863 if (adev->in_suspend && !adev->in_runpm)
864 return -EPERM;
48b270bb 865
b9a9294b 866 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
867 if (ret < 0) {
868 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 869 return ret;
66429300 870 }
b9a9294b 871
c8cb19c7
DP
872 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
873 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
874 if (ret)
875 break;
e3933f26 876 }
c8cb19c7
DP
877 if (ret == -ENOENT) {
878 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
ab22ecab
JE
879 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
880 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
881 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
882 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
883 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
c8cb19c7
DP
884 }
885
886 if (size == 0)
887 size = sysfs_emit(buf, "\n");
888
b9a9294b
AD
889 pm_runtime_mark_last_busy(ddev->dev);
890 pm_runtime_put_autosuspend(ddev->dev);
e3933f26 891
b9a9294b 892 return size;
e3933f26
RZ
893}
894
7ca881a8 895/**
98eb03bb 896 * DOC: pp_features
7ca881a8
EQ
897 *
898 * The amdgpu driver provides a sysfs API for adjusting what powerplay
98eb03bb 899 * features to be enabled. The file pp_features is used for this. And
7ca881a8
EQ
900 * this is only available for Vega10 and later dGPUs.
901 *
902 * Reading back the file will show you the followings:
903 * - Current ppfeature masks
904 * - List of the all supported powerplay features with their naming,
905 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
906 *
907 * To manually enable or disable a specific feature, just set or clear
908 * the corresponding bit from original ppfeature masks and input the
909 * new ppfeature masks.
910 */
4e01847c
KW
911static ssize_t amdgpu_set_pp_features(struct device *dev,
912 struct device_attribute *attr,
913 const char *buf,
914 size_t count)
7ca881a8
EQ
915{
916 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 917 struct amdgpu_device *adev = drm_to_adev(ddev);
7ca881a8
EQ
918 uint64_t featuremask;
919 int ret;
920
53b3f8f4 921 if (amdgpu_in_reset(adev))
48b270bb 922 return -EPERM;
d2ae842d
AD
923 if (adev->in_suspend && !adev->in_runpm)
924 return -EPERM;
48b270bb 925
7ca881a8
EQ
926 ret = kstrtou64(buf, 0, &featuremask);
927 if (ret)
928 return -EINVAL;
929
b9a9294b 930 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
931 if (ret < 0) {
932 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 933 return ret;
66429300 934 }
b9a9294b 935
79c65f3f
EQ
936 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
937
b9a9294b
AD
938 pm_runtime_mark_last_busy(ddev->dev);
939 pm_runtime_put_autosuspend(ddev->dev);
7ca881a8 940
79c65f3f
EQ
941 if (ret)
942 return -EINVAL;
943
7ca881a8
EQ
944 return count;
945}
946
4e01847c
KW
947static ssize_t amdgpu_get_pp_features(struct device *dev,
948 struct device_attribute *attr,
949 char *buf)
7ca881a8
EQ
950{
951 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 952 struct amdgpu_device *adev = drm_to_adev(ddev);
b9a9294b
AD
953 ssize_t size;
954 int ret;
7ca881a8 955
53b3f8f4 956 if (amdgpu_in_reset(adev))
48b270bb 957 return -EPERM;
d2ae842d
AD
958 if (adev->in_suspend && !adev->in_runpm)
959 return -EPERM;
48b270bb 960
b9a9294b 961 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
962 if (ret < 0) {
963 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 964 return ret;
66429300 965 }
b9a9294b 966
79c65f3f
EQ
967 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
968 if (size <= 0)
09b6744c 969 size = sysfs_emit(buf, "\n");
b9a9294b
AD
970
971 pm_runtime_mark_last_busy(ddev->dev);
972 pm_runtime_put_autosuspend(ddev->dev);
7ca881a8 973
b9a9294b 974 return size;
7ca881a8
EQ
975}
976
271dc908 977/**
a667b75c 978 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
271dc908
AD
979 *
980 * The amdgpu driver provides a sysfs API for adjusting what power levels
981 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
d7e28e2d
EQ
982 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
983 * this.
d7337ca2 984 *
d7e28e2d
EQ
985 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
986 * Vega10 and later ASICs.
828e37ef 987 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
271dc908
AD
988 *
989 * Reading back the files will show you the available power levels within
615585d0
LL
990 * the power state and the clock information for those levels. If deep sleep is
991 * applied to a clock, the level will be denoted by a special level 'S:'
bb619539
HC
992 * E.g., ::
993 *
994 * S: 19Mhz *
995 * 0: 615Mhz
996 * 1: 800Mhz
997 * 2: 888Mhz
998 * 3: 1000Mhz
615585d0 999 *
271dc908
AD
1000 *
1001 * To manually adjust these states, first select manual using
48edde39 1002 * power_dpm_force_performance_level.
a667b75c 1003 * Secondly, enter a new value for each level by inputing a string that
48edde39 1004 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
a667b75c
AD
1005 * E.g.,
1006 *
1007 * .. code-block:: bash
1008 *
1009 * echo "4 5 6" > pp_dpm_sclk
1010 *
1011 * will enable sclk levels 4, 5, and 6.
d7e28e2d
EQ
1012 *
1013 * NOTE: change to the dcefclk max dpm level is not supported now
271dc908
AD
1014 */
1015
2ea092e5
DP
1016static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1017 enum pp_clock_type type,
f3898ea1
EH
1018 char *buf)
1019{
1020 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1021 struct amdgpu_device *adev = drm_to_adev(ddev);
c8cb19c7
DP
1022 int size = 0;
1023 int ret = 0;
f3898ea1 1024
53b3f8f4 1025 if (amdgpu_in_reset(adev))
48b270bb 1026 return -EPERM;
d2ae842d
AD
1027 if (adev->in_suspend && !adev->in_runpm)
1028 return -EPERM;
48b270bb 1029
b9a9294b 1030 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1031 if (ret < 0) {
1032 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1033 return ret;
66429300 1034 }
b9a9294b 1035
c8cb19c7
DP
1036 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1037 if (ret == -ENOENT)
1038 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1039
1040 if (size == 0)
09b6744c 1041 size = sysfs_emit(buf, "\n");
b9a9294b
AD
1042
1043 pm_runtime_mark_last_busy(ddev->dev);
1044 pm_runtime_put_autosuspend(ddev->dev);
1045
1046 return size;
f3898ea1
EH
1047}
1048
4b4bd048
KC
1049/*
1050 * Worst case: 32 bits individually specified, in octal at 12 characters
1051 * per line (+1 for \n).
1052 */
1053#define AMDGPU_MASK_BUF_MAX (32 * 13)
1054
1055static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
f3898ea1 1056{
f3898ea1 1057 int ret;
c915ef89 1058 unsigned long level;
48edde39 1059 char *sub_str = NULL;
1060 char *tmp;
4b4bd048 1061 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
48edde39 1062 const char delimiter[3] = {' ', '\n', '\0'};
4b4bd048 1063 size_t bytes;
f3898ea1 1064
4b4bd048
KC
1065 *mask = 0;
1066
1067 bytes = min(count, sizeof(buf_cpy) - 1);
1068 memcpy(buf_cpy, buf, bytes);
1069 buf_cpy[bytes] = '\0';
48edde39 1070 tmp = buf_cpy;
ce7c1d04 1071 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
48edde39 1072 if (strlen(sub_str)) {
c915ef89
DC
1073 ret = kstrtoul(sub_str, 0, &level);
1074 if (ret || level > 31)
4b4bd048
KC
1075 return -EINVAL;
1076 *mask |= 1 << level;
48edde39 1077 } else
1078 break;
f3898ea1 1079 }
4b4bd048
KC
1080
1081 return 0;
1082}
1083
2ea092e5
DP
1084static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1085 enum pp_clock_type type,
4b4bd048
KC
1086 const char *buf,
1087 size_t count)
1088{
1089 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1090 struct amdgpu_device *adev = drm_to_adev(ddev);
4b4bd048
KC
1091 int ret;
1092 uint32_t mask = 0;
1093
53b3f8f4 1094 if (amdgpu_in_reset(adev))
48b270bb 1095 return -EPERM;
d2ae842d
AD
1096 if (adev->in_suspend && !adev->in_runpm)
1097 return -EPERM;
48b270bb 1098
4b4bd048
KC
1099 ret = amdgpu_read_mask(buf, count, &mask);
1100 if (ret)
1101 return ret;
1102
b9a9294b 1103 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1104 if (ret < 0) {
1105 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1106 return ret;
66429300 1107 }
b9a9294b 1108
79c65f3f 1109 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
241dbbb1 1110
b9a9294b
AD
1111 pm_runtime_mark_last_busy(ddev->dev);
1112 pm_runtime_put_autosuspend(ddev->dev);
1113
241dbbb1
EQ
1114 if (ret)
1115 return -EINVAL;
cd4d7464 1116
f3898ea1
EH
1117 return count;
1118}
1119
2ea092e5 1120static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
f3898ea1
EH
1121 struct device_attribute *attr,
1122 char *buf)
1123{
2ea092e5
DP
1124 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1125}
b9a9294b 1126
2ea092e5
DP
1127static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1128 struct device_attribute *attr,
1129 const char *buf,
1130 size_t count)
1131{
1132 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1133}
b9a9294b 1134
2ea092e5
DP
1135static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1136 struct device_attribute *attr,
1137 char *buf)
1138{
1139 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
f3898ea1
EH
1140}
1141
1142static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1143 struct device_attribute *attr,
1144 const char *buf,
1145 size_t count)
1146{
2ea092e5 1147 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
f3898ea1
EH
1148}
1149
d7337ca2
EQ
1150static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1151 struct device_attribute *attr,
1152 char *buf)
1153{
2ea092e5 1154 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
d7337ca2
EQ
1155}
1156
1157static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1158 struct device_attribute *attr,
1159 const char *buf,
1160 size_t count)
1161{
2ea092e5 1162 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
d7337ca2
EQ
1163}
1164
828e37ef
EQ
1165static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1166 struct device_attribute *attr,
1167 char *buf)
1168{
2ea092e5 1169 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
828e37ef
EQ
1170}
1171
1172static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1173 struct device_attribute *attr,
1174 const char *buf,
1175 size_t count)
1176{
2ea092e5 1177 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
828e37ef
EQ
1178}
1179
9577b0ec
XD
1180static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1181 struct device_attribute *attr,
1182 char *buf)
1183{
2ea092e5 1184 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
9577b0ec
XD
1185}
1186
1187static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1188 struct device_attribute *attr,
1189 const char *buf,
1190 size_t count)
1191{
2ea092e5 1192 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
9577b0ec
XD
1193}
1194
d7001e72
TL
1195static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1196 struct device_attribute *attr,
1197 char *buf)
1198{
1199 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1200}
1201
1202static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1203 struct device_attribute *attr,
1204 const char *buf,
1205 size_t count)
1206{
1207 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1208}
1209
9577b0ec
XD
1210static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1211 struct device_attribute *attr,
1212 char *buf)
1213{
2ea092e5 1214 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
9577b0ec
XD
1215}
1216
1217static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1218 struct device_attribute *attr,
1219 const char *buf,
1220 size_t count)
1221{
2ea092e5 1222 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
9577b0ec
XD
1223}
1224
d7001e72
TL
1225static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1226 struct device_attribute *attr,
1227 char *buf)
1228{
1229 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1230}
1231
1232static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1233 struct device_attribute *attr,
1234 const char *buf,
1235 size_t count)
1236{
1237 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1238}
1239
d7e28e2d
EQ
1240static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1241 struct device_attribute *attr,
1242 char *buf)
1243{
2ea092e5 1244 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
d7e28e2d
EQ
1245}
1246
1247static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1248 struct device_attribute *attr,
1249 const char *buf,
1250 size_t count)
1251{
2ea092e5 1252 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
d7e28e2d
EQ
1253}
1254
f3898ea1
EH
1255static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1256 struct device_attribute *attr,
1257 char *buf)
1258{
2ea092e5 1259 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
f3898ea1
EH
1260}
1261
1262static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1263 struct device_attribute *attr,
1264 const char *buf,
1265 size_t count)
1266{
2ea092e5 1267 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
f3898ea1
EH
1268}
1269
428bafa8
EH
1270static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1271 struct device_attribute *attr,
1272 char *buf)
1273{
1274 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1275 struct amdgpu_device *adev = drm_to_adev(ddev);
428bafa8 1276 uint32_t value = 0;
b9a9294b 1277 int ret;
428bafa8 1278
53b3f8f4 1279 if (amdgpu_in_reset(adev))
48b270bb 1280 return -EPERM;
d2ae842d
AD
1281 if (adev->in_suspend && !adev->in_runpm)
1282 return -EPERM;
48b270bb 1283
b9a9294b 1284 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1285 if (ret < 0) {
1286 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1287 return ret;
66429300 1288 }
b9a9294b 1289
79c65f3f 1290 value = amdgpu_dpm_get_sclk_od(adev);
428bafa8 1291
b9a9294b
AD
1292 pm_runtime_mark_last_busy(ddev->dev);
1293 pm_runtime_put_autosuspend(ddev->dev);
1294
a9ca9bb3 1295 return sysfs_emit(buf, "%d\n", value);
428bafa8
EH
1296}
1297
1298static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1299 struct device_attribute *attr,
1300 const char *buf,
1301 size_t count)
1302{
1303 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1304 struct amdgpu_device *adev = drm_to_adev(ddev);
428bafa8
EH
1305 int ret;
1306 long int value;
1307
53b3f8f4 1308 if (amdgpu_in_reset(adev))
48b270bb 1309 return -EPERM;
d2ae842d
AD
1310 if (adev->in_suspend && !adev->in_runpm)
1311 return -EPERM;
48b270bb 1312
428bafa8
EH
1313 ret = kstrtol(buf, 0, &value);
1314
b9a9294b
AD
1315 if (ret)
1316 return -EINVAL;
1317
1318 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1319 if (ret < 0) {
1320 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1321 return ret;
66429300 1322 }
428bafa8 1323
79c65f3f 1324 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
428bafa8 1325
b9a9294b
AD
1326 pm_runtime_mark_last_busy(ddev->dev);
1327 pm_runtime_put_autosuspend(ddev->dev);
1328
428bafa8
EH
1329 return count;
1330}
1331
f2bdc05f
EH
1332static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1333 struct device_attribute *attr,
1334 char *buf)
1335{
1336 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1337 struct amdgpu_device *adev = drm_to_adev(ddev);
f2bdc05f 1338 uint32_t value = 0;
b9a9294b 1339 int ret;
f2bdc05f 1340
53b3f8f4 1341 if (amdgpu_in_reset(adev))
48b270bb 1342 return -EPERM;
d2ae842d
AD
1343 if (adev->in_suspend && !adev->in_runpm)
1344 return -EPERM;
48b270bb 1345
b9a9294b 1346 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1347 if (ret < 0) {
1348 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1349 return ret;
66429300 1350 }
b9a9294b 1351
79c65f3f 1352 value = amdgpu_dpm_get_mclk_od(adev);
f2bdc05f 1353
b9a9294b
AD
1354 pm_runtime_mark_last_busy(ddev->dev);
1355 pm_runtime_put_autosuspend(ddev->dev);
1356
a9ca9bb3 1357 return sysfs_emit(buf, "%d\n", value);
f2bdc05f
EH
1358}
1359
1360static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1361 struct device_attribute *attr,
1362 const char *buf,
1363 size_t count)
1364{
1365 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1366 struct amdgpu_device *adev = drm_to_adev(ddev);
f2bdc05f
EH
1367 int ret;
1368 long int value;
1369
53b3f8f4 1370 if (amdgpu_in_reset(adev))
48b270bb 1371 return -EPERM;
d2ae842d
AD
1372 if (adev->in_suspend && !adev->in_runpm)
1373 return -EPERM;
48b270bb 1374
f2bdc05f
EH
1375 ret = kstrtol(buf, 0, &value);
1376
b9a9294b
AD
1377 if (ret)
1378 return -EINVAL;
1379
1380 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1381 if (ret < 0) {
1382 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1383 return ret;
66429300 1384 }
f2bdc05f 1385
79c65f3f 1386 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
f2bdc05f 1387
b9a9294b
AD
1388 pm_runtime_mark_last_busy(ddev->dev);
1389 pm_runtime_put_autosuspend(ddev->dev);
1390
f2bdc05f
EH
1391 return count;
1392}
1393
6b2576f5
AD
1394/**
1395 * DOC: pp_power_profile_mode
1396 *
1397 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1398 * related to switching between power levels in a power state. The file
1399 * pp_power_profile_mode is used for this.
1400 *
1401 * Reading this file outputs a list of all of the predefined power profiles
1402 * and the relevant heuristics settings for that profile.
1403 *
1404 * To select a profile or create a custom profile, first select manual using
1405 * power_dpm_force_performance_level. Writing the number of a predefined
1406 * profile to pp_power_profile_mode will enable those heuristics. To
1407 * create a custom set of heuristics, write a string of numbers to the file
1408 * starting with the number of the custom profile along with a setting
1409 * for each heuristic parameter. Due to differences across asic families
1410 * the heuristic parameters vary from family to family.
1411 *
1412 */
1413
37c5c4db
RZ
1414static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1415 struct device_attribute *attr,
1416 char *buf)
1417{
1418 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1419 struct amdgpu_device *adev = drm_to_adev(ddev);
b9a9294b
AD
1420 ssize_t size;
1421 int ret;
37c5c4db 1422
53b3f8f4 1423 if (amdgpu_in_reset(adev))
48b270bb 1424 return -EPERM;
d2ae842d
AD
1425 if (adev->in_suspend && !adev->in_runpm)
1426 return -EPERM;
48b270bb 1427
b9a9294b 1428 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1429 if (ret < 0) {
1430 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1431 return ret;
66429300 1432 }
b9a9294b 1433
79c65f3f
EQ
1434 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1435 if (size <= 0)
09b6744c 1436 size = sysfs_emit(buf, "\n");
b9a9294b
AD
1437
1438 pm_runtime_mark_last_busy(ddev->dev);
1439 pm_runtime_put_autosuspend(ddev->dev);
37c5c4db 1440
b9a9294b 1441 return size;
37c5c4db
RZ
1442}
1443
1444
1445static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1446 struct device_attribute *attr,
1447 const char *buf,
1448 size_t count)
1449{
7c8e0835 1450 int ret;
37c5c4db 1451 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1452 struct amdgpu_device *adev = drm_to_adev(ddev);
37c5c4db
RZ
1453 uint32_t parameter_size = 0;
1454 long parameter[64];
1455 char *sub_str, buf_cpy[128];
1456 char *tmp_str;
1457 uint32_t i = 0;
1458 char tmp[2];
1459 long int profile_mode = 0;
1460 const char delimiter[3] = {' ', '\n', '\0'};
1461
53b3f8f4 1462 if (amdgpu_in_reset(adev))
48b270bb 1463 return -EPERM;
d2ae842d
AD
1464 if (adev->in_suspend && !adev->in_runpm)
1465 return -EPERM;
48b270bb 1466
37c5c4db
RZ
1467 tmp[0] = *(buf);
1468 tmp[1] = '\0';
1469 ret = kstrtol(tmp, 0, &profile_mode);
1470 if (ret)
b9a9294b 1471 return -EINVAL;
37c5c4db
RZ
1472
1473 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1474 if (count < 2 || count > 127)
1475 return -EINVAL;
1476 while (isspace(*++buf))
1477 i++;
1478 memcpy(buf_cpy, buf, count-i);
1479 tmp_str = buf_cpy;
ce7c1d04 1480 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
c2efbc3f
EQ
1481 if (strlen(sub_str) == 0)
1482 continue;
37c5c4db 1483 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
b9a9294b
AD
1484 if (ret)
1485 return -EINVAL;
37c5c4db
RZ
1486 parameter_size++;
1487 while (isspace(*tmp_str))
1488 tmp_str++;
1489 }
1490 }
1491 parameter[parameter_size] = profile_mode;
b9a9294b
AD
1492
1493 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1494 if (ret < 0) {
1495 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1496 return ret;
66429300 1497 }
b9a9294b 1498
79c65f3f 1499 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
b9a9294b
AD
1500
1501 pm_runtime_mark_last_busy(ddev->dev);
1502 pm_runtime_put_autosuspend(ddev->dev);
1503
37c5c4db
RZ
1504 if (!ret)
1505 return count;
b9a9294b 1506
37c5c4db
RZ
1507 return -EINVAL;
1508}
1509
a5600853
AD
1510static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1511 enum amd_pp_sensors sensor,
1512 void *query)
d78c227f
ML
1513{
1514 int r, size = sizeof(uint32_t);
1515
1516 if (amdgpu_in_reset(adev))
1517 return -EPERM;
1518 if (adev->in_suspend && !adev->in_runpm)
1519 return -EPERM;
1520
1521 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1522 if (r < 0) {
1523 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1524 return r;
1525 }
1526
1527 /* get the sensor value */
1528 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1529
1530 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1531 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1532
1533 return r;
1534}
1535
b374d82d 1536/**
f503fe69 1537 * DOC: gpu_busy_percent
b374d82d
TSD
1538 *
1539 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1540 * is as a percentage. The file gpu_busy_percent is used for this.
1541 * The SMU firmware computes a percentage of load based on the
1542 * aggregate activity level in the IP cores.
1543 */
4e01847c
KW
1544static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1545 struct device_attribute *attr,
1546 char *buf)
b374d82d
TSD
1547{
1548 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1549 struct amdgpu_device *adev = drm_to_adev(ddev);
d78c227f
ML
1550 unsigned int value;
1551 int r;
b9a9294b 1552
d78c227f 1553 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
b374d82d
TSD
1554 if (r)
1555 return r;
1556
a9ca9bb3 1557 return sysfs_emit(buf, "%d\n", value);
b374d82d
TSD
1558}
1559
f120386d
EQ
1560/**
1561 * DOC: mem_busy_percent
1562 *
1563 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1564 * is as a percentage. The file mem_busy_percent is used for this.
1565 * The SMU firmware computes a percentage of load based on the
1566 * aggregate activity level in the IP cores.
1567 */
4e01847c
KW
1568static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1569 struct device_attribute *attr,
1570 char *buf)
f120386d
EQ
1571{
1572 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1573 struct amdgpu_device *adev = drm_to_adev(ddev);
d78c227f
ML
1574 unsigned int value;
1575 int r;
b9a9294b 1576
d78c227f 1577 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
f120386d
EQ
1578 if (r)
1579 return r;
1580
a9ca9bb3 1581 return sysfs_emit(buf, "%d\n", value);
f120386d
EQ
1582}
1583
b45e18ac
KR
1584/**
1585 * DOC: pcie_bw
1586 *
1587 * The amdgpu driver provides a sysfs API for estimating how much data
1588 * has been received and sent by the GPU in the last second through PCIe.
1589 * The file pcie_bw is used for this.
1590 * The Perf counters count the number of received and sent messages and return
1591 * those values, as well as the maximum payload size of a PCIe packet (mps).
1592 * Note that it is not possible to easily and quickly obtain the size of each
1593 * packet transmitted, so we output the max payload size (mps) to allow for
1594 * quick estimation of the PCIe bandwidth usage
1595 */
1596static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1597 struct device_attribute *attr,
1598 char *buf)
1599{
1600 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1601 struct amdgpu_device *adev = drm_to_adev(ddev);
d08d692e 1602 uint64_t count0 = 0, count1 = 0;
b9a9294b 1603 int ret;
b45e18ac 1604
53b3f8f4 1605 if (amdgpu_in_reset(adev))
48b270bb 1606 return -EPERM;
d2ae842d
AD
1607 if (adev->in_suspend && !adev->in_runpm)
1608 return -EPERM;
48b270bb 1609
d08d692e
AD
1610 if (adev->flags & AMD_IS_APU)
1611 return -ENODATA;
1612
1613 if (!adev->asic_funcs->get_pcie_usage)
1614 return -ENODATA;
1615
b9a9294b 1616 ret = pm_runtime_get_sync(ddev->dev);
66429300
AD
1617 if (ret < 0) {
1618 pm_runtime_put_autosuspend(ddev->dev);
b9a9294b 1619 return ret;
66429300 1620 }
b9a9294b 1621
b45e18ac 1622 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
b9a9294b
AD
1623
1624 pm_runtime_mark_last_busy(ddev->dev);
1625 pm_runtime_put_autosuspend(ddev->dev);
1626
a9ca9bb3
TT
1627 return sysfs_emit(buf, "%llu %llu %i\n",
1628 count0, count1, pcie_get_mps(adev->pdev));
b45e18ac
KR
1629}
1630
fb2dbfd2
KR
1631/**
1632 * DOC: unique_id
1633 *
1634 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1635 * The file unique_id is used for this.
1636 * This will provide a Unique ID that will persist from machine to machine
1637 *
1638 * NOTE: This will only work for GFX9 and newer. This file will be absent
1639 * on unsupported ASICs (GFX8 and older)
1640 */
1641static ssize_t amdgpu_get_unique_id(struct device *dev,
1642 struct device_attribute *attr,
1643 char *buf)
1644{
1645 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1646 struct amdgpu_device *adev = drm_to_adev(ddev);
fb2dbfd2 1647
53b3f8f4 1648 if (amdgpu_in_reset(adev))
48b270bb 1649 return -EPERM;
d2ae842d
AD
1650 if (adev->in_suspend && !adev->in_runpm)
1651 return -EPERM;
48b270bb 1652
fb2dbfd2 1653 if (adev->unique_id)
a9ca9bb3 1654 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
fb2dbfd2
KR
1655
1656 return 0;
1657}
1658
b265bdbd
EQ
1659/**
1660 * DOC: thermal_throttling_logging
1661 *
1662 * Thermal throttling pulls down the clock frequency and thus the performance.
1663 * It's an useful mechanism to protect the chip from overheating. Since it
1664 * impacts performance, the user controls whether it is enabled and if so,
1665 * the log frequency.
1666 *
1667 * Reading back the file shows you the status(enabled or disabled) and
1668 * the interval(in seconds) between each thermal logging.
1669 *
1670 * Writing an integer to the file, sets a new logging interval, in seconds.
1671 * The value should be between 1 and 3600. If the value is less than 1,
1672 * thermal logging is disabled. Values greater than 3600 are ignored.
1673 */
1674static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1675 struct device_attribute *attr,
1676 char *buf)
1677{
1678 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1679 struct amdgpu_device *adev = drm_to_adev(ddev);
b265bdbd 1680
a9ca9bb3
TT
1681 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1682 adev_to_drm(adev)->unique,
1683 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1684 adev->throttling_logging_rs.interval / HZ + 1);
b265bdbd
EQ
1685}
1686
1687static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1688 struct device_attribute *attr,
1689 const char *buf,
1690 size_t count)
1691{
1692 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1693 struct amdgpu_device *adev = drm_to_adev(ddev);
b265bdbd
EQ
1694 long throttling_logging_interval;
1695 unsigned long flags;
1696 int ret = 0;
1697
1698 ret = kstrtol(buf, 0, &throttling_logging_interval);
1699 if (ret)
1700 return ret;
1701
1702 if (throttling_logging_interval > 3600)
1703 return -EINVAL;
1704
1705 if (throttling_logging_interval > 0) {
1706 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1707 /*
1708 * Reset the ratelimit timer internals.
1709 * This can effectively restart the timer.
1710 */
1711 adev->throttling_logging_rs.interval =
1712 (throttling_logging_interval - 1) * HZ;
1713 adev->throttling_logging_rs.begin = 0;
1714 adev->throttling_logging_rs.printed = 0;
1715 adev->throttling_logging_rs.missed = 0;
1716 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1717
1718 atomic_set(&adev->throttling_logging_enabled, 1);
1719 } else {
1720 atomic_set(&adev->throttling_logging_enabled, 0);
1721 }
1722
1723 return count;
1724}
1725
c3ed0e72
KL
1726/**
1727 * DOC: apu_thermal_cap
1728 *
1729 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1730 * limit temperature in millidegrees Celsius
1731 *
1732 * Reading back the file shows you core limit value
1733 *
1734 * Writing an integer to the file, sets a new thermal limit. The value
1735 * should be between 0 and 100. If the value is less than 0 or greater
1736 * than 100, then the write request will be ignored.
1737 */
1738static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1739 struct device_attribute *attr,
1740 char *buf)
1741{
1742 int ret, size;
1743 u32 limit;
1744 struct drm_device *ddev = dev_get_drvdata(dev);
1745 struct amdgpu_device *adev = drm_to_adev(ddev);
1746
1747 ret = pm_runtime_get_sync(ddev->dev);
1748 if (ret < 0) {
1749 pm_runtime_put_autosuspend(ddev->dev);
1750 return ret;
1751 }
1752
1753 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1754 if (!ret)
1755 size = sysfs_emit(buf, "%u\n", limit);
1756 else
1757 size = sysfs_emit(buf, "failed to get thermal limit\n");
1758
1759 pm_runtime_mark_last_busy(ddev->dev);
1760 pm_runtime_put_autosuspend(ddev->dev);
1761
1762 return size;
1763}
1764
1765static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1766 struct device_attribute *attr,
1767 const char *buf,
1768 size_t count)
1769{
1770 int ret;
1771 u32 value;
1772 struct drm_device *ddev = dev_get_drvdata(dev);
1773 struct amdgpu_device *adev = drm_to_adev(ddev);
1774
1775 ret = kstrtou32(buf, 10, &value);
1776 if (ret)
1777 return ret;
1778
4d2c09d6 1779 if (value > 100) {
c3ed0e72
KL
1780 dev_err(dev, "Invalid argument !\n");
1781 return -EINVAL;
1782 }
1783
1784 ret = pm_runtime_get_sync(ddev->dev);
1785 if (ret < 0) {
1786 pm_runtime_put_autosuspend(ddev->dev);
1787 return ret;
1788 }
1789
1790 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1791 if (ret) {
1792 dev_err(dev, "failed to update thermal limit\n");
1793 return ret;
1794 }
1795
1796 pm_runtime_mark_last_busy(ddev->dev);
1797 pm_runtime_put_autosuspend(ddev->dev);
1798
1799 return count;
1800}
1801
25c933b1
EQ
1802/**
1803 * DOC: gpu_metrics
1804 *
1805 * The amdgpu driver provides a sysfs API for retrieving current gpu
1806 * metrics data. The file gpu_metrics is used for this. Reading the
1807 * file will dump all the current gpu metrics data.
1808 *
1809 * These data include temperature, frequency, engines utilization,
1810 * power consume, throttler status, fan speed and cpu core statistics(
1811 * available for APU only). That's it will give a snapshot of all sensors
1812 * at the same time.
1813 */
1814static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1815 struct device_attribute *attr,
1816 char *buf)
1817{
1818 struct drm_device *ddev = dev_get_drvdata(dev);
1348969a 1819 struct amdgpu_device *adev = drm_to_adev(ddev);
25c933b1
EQ
1820 void *gpu_metrics;
1821 ssize_t size = 0;
1822 int ret;
1823
53b3f8f4 1824 if (amdgpu_in_reset(adev))
25c933b1 1825 return -EPERM;
d2ae842d
AD
1826 if (adev->in_suspend && !adev->in_runpm)
1827 return -EPERM;
25c933b1
EQ
1828
1829 ret = pm_runtime_get_sync(ddev->dev);
1830 if (ret < 0) {
1831 pm_runtime_put_autosuspend(ddev->dev);
1832 return ret;
1833 }
1834
79c65f3f 1835 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
25c933b1
EQ
1836 if (size <= 0)
1837 goto out;
1838
1839 if (size >= PAGE_SIZE)
1840 size = PAGE_SIZE - 1;
1841
1842 memcpy(buf, gpu_metrics, size);
1843
1844out:
1845 pm_runtime_mark_last_busy(ddev->dev);
1846 pm_runtime_put_autosuspend(ddev->dev);
1847
1848 return size;
1849}
1850
494c1432 1851static int amdgpu_show_powershift_percent(struct device *dev,
d78c227f 1852 char *buf, enum amd_pp_sensors sensor)
a7673a1c
S
1853{
1854 struct drm_device *ddev = dev_get_drvdata(dev);
1855 struct amdgpu_device *adev = drm_to_adev(ddev);
494c1432
S
1856 uint32_t ss_power;
1857 int r = 0, i;
1858
d78c227f 1859 r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
494c1432
S
1860 if (r == -EOPNOTSUPP) {
1861 /* sensor not available on dGPU, try to read from APU */
1862 adev = NULL;
1863 mutex_lock(&mgpu_info.mutex);
1864 for (i = 0; i < mgpu_info.num_gpu; i++) {
1865 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1866 adev = mgpu_info.gpu_ins[i].adev;
1867 break;
1868 }
1869 }
1870 mutex_unlock(&mgpu_info.mutex);
1871 if (adev)
d78c227f 1872 r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
a7673a1c
S
1873 }
1874
d78c227f
ML
1875 if (r)
1876 return r;
a7673a1c 1877
d78c227f 1878 return sysfs_emit(buf, "%u%%\n", ss_power);
494c1432 1879}
d78c227f 1880
494c1432
S
1881/**
1882 * DOC: smartshift_apu_power
1883 *
1884 * The amdgpu driver provides a sysfs API for reporting APU power
1885 * shift in percentage if platform supports smartshift. Value 0 means that
1886 * there is no powershift and values between [1-100] means that the power
1887 * is shifted to APU, the percentage of boost is with respect to APU power
1888 * limit on the platform.
1889 */
a7673a1c 1890
494c1432
S
1891static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1892 char *buf)
1893{
d78c227f 1894 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
494c1432 1895}
a7673a1c 1896
494c1432
S
1897/**
1898 * DOC: smartshift_dgpu_power
1899 *
1900 * The amdgpu driver provides a sysfs API for reporting dGPU power
1901 * shift in percentage if platform supports smartshift. Value 0 means that
1902 * there is no powershift and values between [1-100] means that the power is
1903 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1904 * limit on the platform.
1905 */
1906
1907static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1908 char *buf)
1909{
d78c227f 1910 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
a7673a1c
S
1911}
1912
30d95a37
S
1913/**
1914 * DOC: smartshift_bias
1915 *
1916 * The amdgpu driver provides a sysfs API for reporting the
1917 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1918 * and the default is 0. -100 sets maximum preference to APU
1919 * and 100 sets max perference to dGPU.
1920 */
1921
1922static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1923 struct device_attribute *attr,
1924 char *buf)
1925{
1926 int r = 0;
1927
1928 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1929
1930 return r;
1931}
1932
1933static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1934 struct device_attribute *attr,
1935 const char *buf, size_t count)
1936{
1937 struct drm_device *ddev = dev_get_drvdata(dev);
1938 struct amdgpu_device *adev = drm_to_adev(ddev);
1939 int r = 0;
1940 int bias = 0;
1941
1942 if (amdgpu_in_reset(adev))
1943 return -EPERM;
1944 if (adev->in_suspend && !adev->in_runpm)
1945 return -EPERM;
1946
1947 r = pm_runtime_get_sync(ddev->dev);
1948 if (r < 0) {
1949 pm_runtime_put_autosuspend(ddev->dev);
1950 return r;
1951 }
1952
1953 r = kstrtoint(buf, 10, &bias);
1954 if (r)
1955 goto out;
1956
1957 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1958 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1959 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1960 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1961
1962 amdgpu_smartshift_bias = bias;
1963 r = count;
1964
bd4b9bb7 1965 /* TODO: update bias level with SMU message */
30d95a37
S
1966
1967out:
1968 pm_runtime_mark_last_busy(ddev->dev);
1969 pm_runtime_put_autosuspend(ddev->dev);
1970 return r;
1971}
1972
a7673a1c
S
1973static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1974 uint32_t mask, enum amdgpu_device_attr_states *states)
1975{
494c1432 1976 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
a7673a1c
S
1977 *states = ATTR_STATE_UNSUPPORTED;
1978
1979 return 0;
1980}
1981
30d95a37
S
1982static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1983 uint32_t mask, enum amdgpu_device_attr_states *states)
1984{
d78c227f 1985 uint32_t ss_power;
30d95a37
S
1986
1987 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1988 *states = ATTR_STATE_UNSUPPORTED;
d78c227f
ML
1989 else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1990 (void *)&ss_power))
30d95a37 1991 *states = ATTR_STATE_UNSUPPORTED;
d78c227f
ML
1992 else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1993 (void *)&ss_power))
30d95a37
S
1994 *states = ATTR_STATE_UNSUPPORTED;
1995
1996 return 0;
1997}
1998
21e43386
LM
1999/* Following items will be read out to indicate current plpd policy:
2000 * - -1: none
2001 * - 0: disallow
2002 * - 1: default
2003 * - 2: optimized
2004 */
2005static ssize_t amdgpu_get_xgmi_plpd_policy(struct device *dev,
2006 struct device_attribute *attr,
2007 char *buf)
2008{
2009 struct drm_device *ddev = dev_get_drvdata(dev);
2010 struct amdgpu_device *adev = drm_to_adev(ddev);
2011 char *mode_desc = "none";
2012 int mode;
2013
2014 if (amdgpu_in_reset(adev))
2015 return -EPERM;
2016 if (adev->in_suspend && !adev->in_runpm)
2017 return -EPERM;
2018
2019 mode = amdgpu_dpm_get_xgmi_plpd_mode(adev, &mode_desc);
2020
2021 return sysfs_emit(buf, "%d: %s\n", mode, mode_desc);
2022}
2023
2024/* Following argument value is expected from user to change plpd policy
2025 * - arg 0: disallow plpd
2026 * - arg 1: default policy
2027 * - arg 2: optimized policy
2028 */
2029static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev,
2030 struct device_attribute *attr,
2031 const char *buf, size_t count)
2032{
2033 struct drm_device *ddev = dev_get_drvdata(dev);
2034 struct amdgpu_device *adev = drm_to_adev(ddev);
2035 int mode, ret;
2036
2037 if (amdgpu_in_reset(adev))
2038 return -EPERM;
2039 if (adev->in_suspend && !adev->in_runpm)
2040 return -EPERM;
2041
2042 ret = kstrtos32(buf, 0, &mode);
2043 if (ret)
2044 return -EINVAL;
2045
2046 ret = pm_runtime_get_sync(ddev->dev);
2047 if (ret < 0) {
2048 pm_runtime_put_autosuspend(ddev->dev);
2049 return ret;
2050 }
2051
2052 ret = amdgpu_dpm_set_xgmi_plpd_mode(adev, mode);
2053
2054 pm_runtime_mark_last_busy(ddev->dev);
2055 pm_runtime_put_autosuspend(ddev->dev);
2056
2057 if (ret)
2058 return ret;
2059
2060 return count;
2061}
2062
4e01847c
KW
2063static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2064 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4215a119 2065 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
7884d0e9
JG
2066 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2067 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2068 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2069 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c
KW
2070 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2071 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2072 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2073 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
9577b0ec 2074 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
d7001e72 2075 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
9577b0ec 2076 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
d7001e72 2077 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
f3527a64
MN
2078 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2079 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c
KW
2080 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2081 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
ac82902d 2082 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c 2083 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
ac82902d
VC
2084 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2085 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
4e01847c 2086 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
ac82902d
VC
2087 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2088 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2089 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
c3ed0e72 2090 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
ac82902d 2091 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
a7673a1c
S
2092 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2093 .attr_update = ss_power_attr_update),
2094 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2095 .attr_update = ss_power_attr_update),
30d95a37
S
2096 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2097 .attr_update = ss_bias_attr_update),
21e43386 2098 AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC),
4e01847c
KW
2099};
2100
2101static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
ba02fd6b 2102 uint32_t mask, enum amdgpu_device_attr_states *states)
4e01847c
KW
2103{
2104 struct device_attribute *dev_attr = &attr->dev_attr;
4e8303cf
LL
2105 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
2106 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4e01847c 2107 const char *attr_name = dev_attr->attr.name;
4e01847c
KW
2108
2109 if (!(attr->flags & mask)) {
ba02fd6b 2110 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2111 return 0;
2112 }
2113
2114#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2115
2116 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
8ecad8d6 2117 if (gc_ver < IP_VERSION(9, 0, 0))
ba02fd6b 2118 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2119 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
8ecad8d6 2120 if (gc_ver < IP_VERSION(9, 0, 0) ||
0127ab1b 2121 !amdgpu_device_has_display_hardware(adev))
ba02fd6b 2122 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2123 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
8ecad8d6 2124 if (mp1_ver < IP_VERSION(10, 0, 0))
ba02fd6b 2125 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2126 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
ba02fd6b 2127 *states = ATTR_STATE_UNSUPPORTED;
79c65f3f 2128 if (amdgpu_dpm_is_overdrive_supported(adev))
ba02fd6b 2129 *states = ATTR_STATE_SUPPORTED;
4e01847c 2130 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
8ecad8d6 2131 if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
ba02fd6b 2132 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2133 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2134 /* PCIe Perf counters won't work on APU nodes */
2135 if (adev->flags & AMD_IS_APU)
ba02fd6b 2136 *states = ATTR_STATE_UNSUPPORTED;
4e01847c 2137 } else if (DEVICE_ATTR_IS(unique_id)) {
60044748
KR
2138 switch (gc_ver) {
2139 case IP_VERSION(9, 0, 1):
2140 case IP_VERSION(9, 4, 0):
2141 case IP_VERSION(9, 4, 1):
2142 case IP_VERSION(9, 4, 2):
baf65745 2143 case IP_VERSION(9, 4, 3):
ebd9c071 2144 case IP_VERSION(10, 3, 0):
276c03a0 2145 case IP_VERSION(11, 0, 0):
35e67ca6
KR
2146 case IP_VERSION(11, 0, 1):
2147 case IP_VERSION(11, 0, 2):
d82758ad 2148 case IP_VERSION(11, 0, 3):
60044748
KR
2149 *states = ATTR_STATE_SUPPORTED;
2150 break;
2151 default:
ba02fd6b 2152 *states = ATTR_STATE_UNSUPPORTED;
60044748 2153 }
4e01847c 2154 } else if (DEVICE_ATTR_IS(pp_features)) {
fc8e84a2
LL
2155 if ((adev->flags & AMD_IS_APU &&
2156 gc_ver != IP_VERSION(9, 4, 3)) ||
2157 gc_ver < IP_VERSION(9, 0, 0))
ba02fd6b 2158 *states = ATTR_STATE_UNSUPPORTED;
25c933b1 2159 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
8ecad8d6 2160 if (gc_ver < IP_VERSION(9, 1, 0))
25c933b1 2161 *states = ATTR_STATE_UNSUPPORTED;
9577b0ec 2162 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
8ecad8d6 2163 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
a68bec2c 2164 gc_ver == IP_VERSION(10, 3, 0) ||
64440743 2165 gc_ver == IP_VERSION(10, 1, 2) ||
3929f338 2166 gc_ver == IP_VERSION(11, 0, 0) ||
2f68c414 2167 gc_ver == IP_VERSION(11, 0, 2) ||
707b570f
AK
2168 gc_ver == IP_VERSION(11, 0, 3) ||
2169 gc_ver == IP_VERSION(9, 4, 3)))
9577b0ec 2170 *states = ATTR_STATE_UNSUPPORTED;
0b872f65
TL
2171 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2172 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
feae1bd8
TL
2173 gc_ver == IP_VERSION(10, 3, 0) ||
2174 gc_ver == IP_VERSION(11, 0, 2) ||
2175 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
0b872f65 2176 *states = ATTR_STATE_UNSUPPORTED;
9577b0ec 2177 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
8ecad8d6 2178 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
a68bec2c 2179 gc_ver == IP_VERSION(10, 3, 0) ||
64440743 2180 gc_ver == IP_VERSION(10, 1, 2) ||
3929f338 2181 gc_ver == IP_VERSION(11, 0, 0) ||
2f68c414 2182 gc_ver == IP_VERSION(11, 0, 2) ||
707b570f
AK
2183 gc_ver == IP_VERSION(11, 0, 3) ||
2184 gc_ver == IP_VERSION(9, 4, 3)))
9577b0ec 2185 *states = ATTR_STATE_UNSUPPORTED;
0b872f65
TL
2186 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2187 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
feae1bd8
TL
2188 gc_ver == IP_VERSION(10, 3, 0) ||
2189 gc_ver == IP_VERSION(11, 0, 2) ||
2190 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
0b872f65 2191 *states = ATTR_STATE_UNSUPPORTED;
a7505591 2192 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
79c65f3f 2193 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
a7505591 2194 *states = ATTR_STATE_UNSUPPORTED;
b57c4f1c
VZ
2195 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2196 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
1b852572 2197 *states = ATTR_STATE_UNSUPPORTED;
21e43386
LM
2198 } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
2199 if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
2200 *states = ATTR_STATE_UNSUPPORTED;
dbfbf474 2201 } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
8cfd6a05
LL
2202 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2203 *states = ATTR_STATE_UNSUPPORTED;
dbfbf474 2204 } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
8cfd6a05
LL
2205 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2206 *states = ATTR_STATE_UNSUPPORTED;
2207 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2208 u32 limit;
2209
2210 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2211 -EOPNOTSUPP)
2212 *states = ATTR_STATE_UNSUPPORTED;
8abf799e
LL
2213 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2214 if (gc_ver == IP_VERSION(9, 4, 2) ||
2215 gc_ver == IP_VERSION(9, 4, 3))
2216 *states = ATTR_STATE_UNSUPPORTED;
4e01847c
KW
2217 }
2218
8ecad8d6
LL
2219 switch (gc_ver) {
2220 case IP_VERSION(9, 4, 1):
2221 case IP_VERSION(9, 4, 2):
1d0e622f 2222 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
4e01847c
KW
2223 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2224 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2225 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2226 dev_attr->attr.mode &= ~S_IWUGO;
2227 dev_attr->store = NULL;
2228 }
1d0e622f 2229 break;
1b852572
DS
2230 case IP_VERSION(10, 3, 0):
2231 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2232 amdgpu_sriov_vf(adev)) {
2233 dev_attr->attr.mode &= ~0222;
2234 dev_attr->store = NULL;
2235 }
2236 break;
1d0e622f
KW
2237 default:
2238 break;
4e01847c
KW
2239 }
2240
ede14a1b
DP
2241 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2242 /* SMU MP1 does not support dcefclk level setting */
8ecad8d6 2243 if (gc_ver >= IP_VERSION(10, 0, 0)) {
ede14a1b
DP
2244 dev_attr->attr.mode &= ~S_IWUGO;
2245 dev_attr->store = NULL;
2246 }
2247 }
2248
e610941c
YY
2249 /* setting should not be allowed from VF if not in one VF mode */
2250 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
11c9cc95
MN
2251 dev_attr->attr.mode &= ~S_IWUGO;
2252 dev_attr->store = NULL;
2253 }
2254
4e01847c
KW
2255#undef DEVICE_ATTR_IS
2256
2257 return 0;
2258}
2259
2260
2261static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2262 struct amdgpu_device_attr *attr,
ba02fd6b 2263 uint32_t mask, struct list_head *attr_list)
4e01847c
KW
2264{
2265 int ret = 0;
ba02fd6b
KW
2266 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2267 struct amdgpu_device_attr_entry *attr_entry;
25e6373a
YW
2268 struct device_attribute *dev_attr;
2269 const char *name;
ba02fd6b 2270
4e01847c 2271 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
ba02fd6b 2272 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
4e01847c 2273
25e6373a
YW
2274 if (!attr)
2275 return -EINVAL;
2276
2277 dev_attr = &attr->dev_attr;
2278 name = dev_attr->attr.name;
4e01847c 2279
8a81028b 2280 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
4e01847c 2281
ba02fd6b 2282 ret = attr_update(adev, attr, mask, &attr_states);
4e01847c
KW
2283 if (ret) {
2284 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2285 name, ret);
2286 return ret;
2287 }
2288
ba02fd6b 2289 if (attr_states == ATTR_STATE_UNSUPPORTED)
4e01847c
KW
2290 return 0;
2291
2292 ret = device_create_file(adev->dev, dev_attr);
2293 if (ret) {
2294 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2295 name, ret);
2296 }
2297
ba02fd6b
KW
2298 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2299 if (!attr_entry)
2300 return -ENOMEM;
2301
2302 attr_entry->attr = attr;
2303 INIT_LIST_HEAD(&attr_entry->entry);
2304
2305 list_add_tail(&attr_entry->entry, attr_list);
4e01847c
KW
2306
2307 return ret;
2308}
2309
2310static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2311{
2312 struct device_attribute *dev_attr = &attr->dev_attr;
2313
4e01847c 2314 device_remove_file(adev->dev, dev_attr);
4e01847c
KW
2315}
2316
ba02fd6b
KW
2317static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2318 struct list_head *attr_list);
2319
4e01847c
KW
2320static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2321 struct amdgpu_device_attr *attrs,
2322 uint32_t counts,
ba02fd6b
KW
2323 uint32_t mask,
2324 struct list_head *attr_list)
4e01847c
KW
2325{
2326 int ret = 0;
2327 uint32_t i = 0;
2328
2329 for (i = 0; i < counts; i++) {
ba02fd6b 2330 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
4e01847c
KW
2331 if (ret)
2332 goto failed;
2333 }
2334
2335 return 0;
2336
2337failed:
ba02fd6b 2338 amdgpu_device_attr_remove_groups(adev, attr_list);
4e01847c
KW
2339
2340 return ret;
2341}
2342
2343static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
ba02fd6b 2344 struct list_head *attr_list)
4e01847c 2345{
ba02fd6b 2346 struct amdgpu_device_attr_entry *entry, *entry_tmp;
4e01847c 2347
ba02fd6b
KW
2348 if (list_empty(attr_list))
2349 return ;
2350
2351 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2352 amdgpu_device_attr_remove(adev, entry->attr);
2353 list_del(&entry->entry);
2354 kfree(entry);
2355 }
4e01847c 2356}
e3933f26 2357
d38ceaf9
AD
2358static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2359 struct device_attribute *attr,
2360 char *buf)
2361{
2362 struct amdgpu_device *adev = dev_get_drvdata(dev);
a34d1166 2363 int channel = to_sensor_dev_attr(attr)->index;
d78c227f 2364 int r, temp = 0;
48b270bb 2365
a34d1166
EQ
2366 if (channel >= PP_TEMP_MAX)
2367 return -EINVAL;
2368
2369 switch (channel) {
2370 case PP_TEMP_JUNCTION:
2371 /* get current junction temperature */
d78c227f
ML
2372 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2373 (void *)&temp);
a34d1166
EQ
2374 break;
2375 case PP_TEMP_EDGE:
2376 /* get current edge temperature */
d78c227f
ML
2377 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2378 (void *)&temp);
a34d1166
EQ
2379 break;
2380 case PP_TEMP_MEM:
2381 /* get current memory temperature */
d78c227f
ML
2382 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2383 (void *)&temp);
b9a9294b
AD
2384 break;
2385 default:
2386 r = -EINVAL;
a34d1166
EQ
2387 break;
2388 }
d38ceaf9 2389
b9a9294b
AD
2390 if (r)
2391 return r;
2392
a9ca9bb3 2393 return sysfs_emit(buf, "%d\n", temp);
d38ceaf9
AD
2394}
2395
2396static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2397 struct device_attribute *attr,
2398 char *buf)
2399{
2400 struct amdgpu_device *adev = dev_get_drvdata(dev);
2401 int hyst = to_sensor_dev_attr(attr)->index;
2402 int temp;
2403
2404 if (hyst)
2405 temp = adev->pm.dpm.thermal.min_temp;
2406 else
2407 temp = adev->pm.dpm.thermal.max_temp;
2408
a9ca9bb3 2409 return sysfs_emit(buf, "%d\n", temp);
d38ceaf9
AD
2410}
2411
437ccd17
EQ
2412static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2413 struct device_attribute *attr,
2414 char *buf)
2415{
2416 struct amdgpu_device *adev = dev_get_drvdata(dev);
2417 int hyst = to_sensor_dev_attr(attr)->index;
2418 int temp;
2419
2420 if (hyst)
2421 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2422 else
2423 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2424
a9ca9bb3 2425 return sysfs_emit(buf, "%d\n", temp);
437ccd17
EQ
2426}
2427
2428static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2429 struct device_attribute *attr,
2430 char *buf)
2431{
2432 struct amdgpu_device *adev = dev_get_drvdata(dev);
2433 int hyst = to_sensor_dev_attr(attr)->index;
2434 int temp;
2435
2436 if (hyst)
2437 temp = adev->pm.dpm.thermal.min_mem_temp;
2438 else
2439 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2440
a9ca9bb3 2441 return sysfs_emit(buf, "%d\n", temp);
437ccd17
EQ
2442}
2443
2adc1156
EQ
2444static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2445 struct device_attribute *attr,
2446 char *buf)
2447{
2448 int channel = to_sensor_dev_attr(attr)->index;
2449
2450 if (channel >= PP_TEMP_MAX)
2451 return -EINVAL;
2452
a9ca9bb3 2453 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2adc1156
EQ
2454}
2455
901cb599
EQ
2456static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2457 struct device_attribute *attr,
2458 char *buf)
2459{
2460 struct amdgpu_device *adev = dev_get_drvdata(dev);
2461 int channel = to_sensor_dev_attr(attr)->index;
2462 int temp = 0;
2463
2464 if (channel >= PP_TEMP_MAX)
2465 return -EINVAL;
2466
2467 switch (channel) {
2468 case PP_TEMP_JUNCTION:
2469 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2470 break;
2471 case PP_TEMP_EDGE:
2472 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2473 break;
2474 case PP_TEMP_MEM:
2475 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2476 break;
2477 }
2478
a9ca9bb3 2479 return sysfs_emit(buf, "%d\n", temp);
901cb599
EQ
2480}
2481
d38ceaf9
AD
2482static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2483 struct device_attribute *attr,
2484 char *buf)
2485{
2486 struct amdgpu_device *adev = dev_get_drvdata(dev);
2487 u32 pwm_mode = 0;
b9a9294b
AD
2488 int ret;
2489
53b3f8f4 2490 if (amdgpu_in_reset(adev))
48b270bb 2491 return -EPERM;
d2ae842d
AD
2492 if (adev->in_suspend && !adev->in_runpm)
2493 return -EPERM;
48b270bb 2494
4a580877 2495 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2496 if (ret < 0) {
4a580877 2497 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2498 return ret;
66429300 2499 }
c9ffa427 2500
79c65f3f 2501 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
f46587bc 2502
4a580877
LT
2503 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2504 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2505
79c65f3f
EQ
2506 if (ret)
2507 return -EINVAL;
2508
fdf8eea5 2509 return sysfs_emit(buf, "%u\n", pwm_mode);
d38ceaf9
AD
2510}
2511
2512static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2513 struct device_attribute *attr,
2514 const char *buf,
2515 size_t count)
2516{
2517 struct amdgpu_device *adev = dev_get_drvdata(dev);
b9a9294b 2518 int err, ret;
d38ceaf9
AD
2519 int value;
2520
53b3f8f4 2521 if (amdgpu_in_reset(adev))
48b270bb 2522 return -EPERM;
d2ae842d
AD
2523 if (adev->in_suspend && !adev->in_runpm)
2524 return -EPERM;
48b270bb 2525
fcd90fee
EQ
2526 err = kstrtoint(buf, 10, &value);
2527 if (err)
2528 return err;
a76ff5af 2529
4a580877 2530 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2531 if (ret < 0) {
4a580877 2532 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2533 return ret;
66429300 2534 }
b9a9294b 2535
79c65f3f 2536 ret = amdgpu_dpm_set_fan_control_mode(adev, value);
f46587bc 2537
4a580877
LT
2538 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2539 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2540
79c65f3f
EQ
2541 if (ret)
2542 return -EINVAL;
2543
d38ceaf9
AD
2544 return count;
2545}
2546
2547static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2548 struct device_attribute *attr,
2549 char *buf)
2550{
fdf8eea5 2551 return sysfs_emit(buf, "%i\n", 0);
d38ceaf9
AD
2552}
2553
2554static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2555 struct device_attribute *attr,
2556 char *buf)
2557{
fdf8eea5 2558 return sysfs_emit(buf, "%i\n", 255);
d38ceaf9
AD
2559}
2560
2561static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2562 struct device_attribute *attr,
2563 const char *buf, size_t count)
2564{
2565 struct amdgpu_device *adev = dev_get_drvdata(dev);
2566 int err;
2567 u32 value;
b8a9c003 2568 u32 pwm_mode;
d38ceaf9 2569
53b3f8f4 2570 if (amdgpu_in_reset(adev))
48b270bb 2571 return -EPERM;
d2ae842d
AD
2572 if (adev->in_suspend && !adev->in_runpm)
2573 return -EPERM;
48b270bb 2574
79c65f3f
EQ
2575 err = kstrtou32(buf, 10, &value);
2576 if (err)
2577 return err;
2578
4a580877 2579 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2580 if (err < 0) {
4a580877 2581 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2582 return err;
66429300 2583 }
b9a9294b 2584
79c65f3f
EQ
2585 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2586 if (err)
2587 goto out;
2588
b8a9c003
RZ
2589 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2590 pr_info("manual fan speed control should be enabled first\n");
79c65f3f
EQ
2591 err = -EINVAL;
2592 goto out;
b8a9c003
RZ
2593 }
2594
79c65f3f 2595 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
b9a9294b 2596
79c65f3f 2597out:
4a580877
LT
2598 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2599 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2600
2601 if (err)
2602 return err;
d38ceaf9
AD
2603
2604 return count;
2605}
2606
2607static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2608 struct device_attribute *attr,
2609 char *buf)
2610{
2611 struct amdgpu_device *adev = dev_get_drvdata(dev);
2612 int err;
cd4d7464 2613 u32 speed = 0;
d38ceaf9 2614
53b3f8f4 2615 if (amdgpu_in_reset(adev))
48b270bb 2616 return -EPERM;
d2ae842d
AD
2617 if (adev->in_suspend && !adev->in_runpm)
2618 return -EPERM;
48b270bb 2619
4a580877 2620 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2621 if (err < 0) {
4a580877 2622 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2623 return err;
66429300 2624 }
5ec36e2d 2625
79c65f3f 2626 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
b9a9294b 2627
4a580877
LT
2628 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2629 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2630
2631 if (err)
2632 return err;
d38ceaf9 2633
fdf8eea5 2634 return sysfs_emit(buf, "%i\n", speed);
d38ceaf9
AD
2635}
2636
81c1514b
GI
2637static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2638 struct device_attribute *attr,
2639 char *buf)
2640{
2641 struct amdgpu_device *adev = dev_get_drvdata(dev);
2642 int err;
cd4d7464 2643 u32 speed = 0;
81c1514b 2644
53b3f8f4 2645 if (amdgpu_in_reset(adev))
48b270bb 2646 return -EPERM;
d2ae842d
AD
2647 if (adev->in_suspend && !adev->in_runpm)
2648 return -EPERM;
48b270bb 2649
4a580877 2650 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2651 if (err < 0) {
4a580877 2652 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2653 return err;
66429300 2654 }
5ec36e2d 2655
79c65f3f 2656 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
b9a9294b 2657
4a580877
LT
2658 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2660
2661 if (err)
2662 return err;
81c1514b 2663
fdf8eea5 2664 return sysfs_emit(buf, "%i\n", speed);
81c1514b
GI
2665}
2666
c2870527
RZ
2667static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2668 struct device_attribute *attr,
2669 char *buf)
2670{
2671 struct amdgpu_device *adev = dev_get_drvdata(dev);
2672 u32 min_rpm = 0;
c2870527
RZ
2673 int r;
2674
d78c227f
ML
2675 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2676 (void *)&min_rpm);
b9a9294b 2677
c2870527
RZ
2678 if (r)
2679 return r;
2680
a9ca9bb3 2681 return sysfs_emit(buf, "%d\n", min_rpm);
c2870527
RZ
2682}
2683
2684static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2685 struct device_attribute *attr,
2686 char *buf)
2687{
2688 struct amdgpu_device *adev = dev_get_drvdata(dev);
2689 u32 max_rpm = 0;
c2870527
RZ
2690 int r;
2691
d78c227f
ML
2692 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2693 (void *)&max_rpm);
b9a9294b 2694
c2870527
RZ
2695 if (r)
2696 return r;
2697
a9ca9bb3 2698 return sysfs_emit(buf, "%d\n", max_rpm);
c2870527
RZ
2699}
2700
2701static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2702 struct device_attribute *attr,
2703 char *buf)
2704{
2705 struct amdgpu_device *adev = dev_get_drvdata(dev);
2706 int err;
2707 u32 rpm = 0;
c2870527 2708
53b3f8f4 2709 if (amdgpu_in_reset(adev))
48b270bb 2710 return -EPERM;
d2ae842d
AD
2711 if (adev->in_suspend && !adev->in_runpm)
2712 return -EPERM;
48b270bb 2713
4a580877 2714 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2715 if (err < 0) {
4a580877 2716 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2717 return err;
66429300 2718 }
c2870527 2719
79c65f3f 2720 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
b9a9294b 2721
4a580877
LT
2722 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2723 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2724
2725 if (err)
2726 return err;
c2870527 2727
fdf8eea5 2728 return sysfs_emit(buf, "%i\n", rpm);
c2870527
RZ
2729}
2730
2731static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2732 struct device_attribute *attr,
2733 const char *buf, size_t count)
2734{
2735 struct amdgpu_device *adev = dev_get_drvdata(dev);
2736 int err;
2737 u32 value;
2738 u32 pwm_mode;
2739
53b3f8f4 2740 if (amdgpu_in_reset(adev))
48b270bb 2741 return -EPERM;
d2ae842d
AD
2742 if (adev->in_suspend && !adev->in_runpm)
2743 return -EPERM;
48b270bb 2744
79c65f3f
EQ
2745 err = kstrtou32(buf, 10, &value);
2746 if (err)
2747 return err;
2748
4a580877 2749 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2750 if (err < 0) {
4a580877 2751 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2752 return err;
66429300 2753 }
b9a9294b 2754
79c65f3f
EQ
2755 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2756 if (err)
2757 goto out;
96026ce0 2758
b9a9294b 2759 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
79c65f3f
EQ
2760 err = -ENODATA;
2761 goto out;
b9a9294b 2762 }
c2870527 2763
79c65f3f 2764 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
b9a9294b 2765
79c65f3f 2766out:
4a580877
LT
2767 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2768 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2769
2770 if (err)
2771 return err;
c2870527
RZ
2772
2773 return count;
2774}
2775
2776static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2777 struct device_attribute *attr,
2778 char *buf)
2779{
2780 struct amdgpu_device *adev = dev_get_drvdata(dev);
2781 u32 pwm_mode = 0;
b9a9294b
AD
2782 int ret;
2783
53b3f8f4 2784 if (amdgpu_in_reset(adev))
48b270bb 2785 return -EPERM;
d2ae842d
AD
2786 if (adev->in_suspend && !adev->in_runpm)
2787 return -EPERM;
48b270bb 2788
4a580877 2789 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2790 if (ret < 0) {
4a580877 2791 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2792 return ret;
66429300 2793 }
c2870527 2794
79c65f3f 2795 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
f46587bc 2796
4a580877
LT
2797 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2798 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2799
79c65f3f
EQ
2800 if (ret)
2801 return -EINVAL;
2802
fdf8eea5 2803 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
c2870527
RZ
2804}
2805
2806static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2807 struct device_attribute *attr,
2808 const char *buf,
2809 size_t count)
2810{
2811 struct amdgpu_device *adev = dev_get_drvdata(dev);
2812 int err;
2813 int value;
2814 u32 pwm_mode;
2815
53b3f8f4 2816 if (amdgpu_in_reset(adev))
48b270bb 2817 return -EPERM;
d2ae842d
AD
2818 if (adev->in_suspend && !adev->in_runpm)
2819 return -EPERM;
48b270bb 2820
c2870527
RZ
2821 err = kstrtoint(buf, 10, &value);
2822 if (err)
2823 return err;
2824
2825 if (value == 0)
2826 pwm_mode = AMD_FAN_CTRL_AUTO;
2827 else if (value == 1)
2828 pwm_mode = AMD_FAN_CTRL_MANUAL;
2829 else
2830 return -EINVAL;
2831
4a580877 2832 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2833 if (err < 0) {
4a580877 2834 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2835 return err;
66429300 2836 }
b9a9294b 2837
79c65f3f 2838 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
c2870527 2839
4a580877
LT
2840 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2841 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2842
79c65f3f
EQ
2843 if (err)
2844 return -EINVAL;
2845
c2870527
RZ
2846 return count;
2847}
2848
2bd376bf
AD
2849static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2850 struct device_attribute *attr,
2851 char *buf)
2852{
2853 struct amdgpu_device *adev = dev_get_drvdata(dev);
2bd376bf 2854 u32 vddgfx;
d78c227f 2855 int r;
2bd376bf 2856
2bd376bf 2857 /* get the voltage */
d78c227f
ML
2858 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
2859 (void *)&vddgfx);
2bd376bf
AD
2860 if (r)
2861 return r;
2862
a9ca9bb3 2863 return sysfs_emit(buf, "%d\n", vddgfx);
2bd376bf
AD
2864}
2865
2866static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2867 struct device_attribute *attr,
2868 char *buf)
2869{
a9ca9bb3 2870 return sysfs_emit(buf, "vddgfx\n");
2bd376bf
AD
2871}
2872
2873static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2874 struct device_attribute *attr,
2875 char *buf)
2876{
2877 struct amdgpu_device *adev = dev_get_drvdata(dev);
2bd376bf 2878 u32 vddnb;
d78c227f 2879 int r;
48b270bb 2880
2bd376bf 2881 /* only APUs have vddnb */
ccf9ef0b 2882 if (!(adev->flags & AMD_IS_APU))
2bd376bf
AD
2883 return -EINVAL;
2884
2bd376bf 2885 /* get the voltage */
d78c227f
ML
2886 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
2887 (void *)&vddnb);
2bd376bf
AD
2888 if (r)
2889 return r;
2890
a9ca9bb3 2891 return sysfs_emit(buf, "%d\n", vddnb);
2bd376bf
AD
2892}
2893
2894static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2895 struct device_attribute *attr,
2896 char *buf)
2897{
a9ca9bb3 2898 return sysfs_emit(buf, "vddnb\n");
2bd376bf
AD
2899}
2900
a5600853
AD
2901static int amdgpu_hwmon_get_power(struct device *dev,
2902 enum amd_pp_sensors sensor)
2976fc26
AD
2903{
2904 struct amdgpu_device *adev = dev_get_drvdata(dev);
d78c227f 2905 unsigned int uw;
5b79d048 2906 u32 query = 0;
d78c227f 2907 int r;
b9a9294b 2908
d78c227f 2909 r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
2976fc26
AD
2910 if (r)
2911 return r;
2912
2913 /* convert to microwatts */
5b79d048 2914 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2976fc26 2915
d78c227f
ML
2916 return uw;
2917}
2918
2919static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2920 struct device_attribute *attr,
2921 char *buf)
2922{
d1090194 2923 ssize_t val;
d78c227f 2924
9366c2e8 2925 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
d78c227f
ML
2926 if (val < 0)
2927 return val;
2928
d1090194 2929 return sysfs_emit(buf, "%zd\n", val);
2976fc26
AD
2930}
2931
bb9f7b68
ML
2932static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
2933 struct device_attribute *attr,
2934 char *buf)
2935{
d1090194 2936 ssize_t val;
bb9f7b68 2937
47f1724d 2938 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
bb9f7b68
ML
2939 if (val < 0)
2940 return val;
2941
d1090194 2942 return sysfs_emit(buf, "%zd\n", val);
bb9f7b68
ML
2943}
2944
91161b06
DP
2945static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2946 struct device_attribute *attr,
2947 char *buf,
2948 enum pp_power_limit_level pp_limit_level)
8d81bce7
RZ
2949{
2950 struct amdgpu_device *adev = dev_get_drvdata(dev);
a40a020d
DP
2951 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2952 uint32_t limit;
b9a9294b
AD
2953 ssize_t size;
2954 int r;
2955
53b3f8f4 2956 if (amdgpu_in_reset(adev))
48b270bb 2957 return -EPERM;
d2ae842d
AD
2958 if (adev->in_suspend && !adev->in_runpm)
2959 return -EPERM;
48b270bb 2960
4a580877 2961 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 2962 if (r < 0) {
4a580877 2963 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 2964 return r;
66429300 2965 }
8d81bce7 2966
79c65f3f 2967 r = amdgpu_dpm_get_power_limit(adev, &limit,
91161b06 2968 pp_limit_level, power_type);
dc2a8240
DP
2969
2970 if (!r)
09b6744c 2971 size = sysfs_emit(buf, "%u\n", limit * 1000000);
dc2a8240 2972 else
09b6744c 2973 size = sysfs_emit(buf, "\n");
b9a9294b 2974
4a580877
LT
2975 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2976 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b
AD
2977
2978 return size;
8d81bce7
RZ
2979}
2980
19589468
MJ
2981static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2982 struct device_attribute *attr,
2983 char *buf)
2984{
2985 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
2986}
91161b06
DP
2987
2988static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
8d81bce7
RZ
2989 struct device_attribute *attr,
2990 char *buf)
2991{
91161b06 2992 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
dc2a8240 2993
91161b06 2994}
b9a9294b 2995
91161b06
DP
2996static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2997 struct device_attribute *attr,
2998 char *buf)
2999{
3000 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
b9a9294b 3001
8d81bce7
RZ
3002}
3003
6e58941c
EH
3004static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3005 struct device_attribute *attr,
3006 char *buf)
3007{
91161b06 3008 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
6e58941c 3009
6e58941c 3010}
91161b06 3011
ae07970a
XH
3012static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3013 struct device_attribute *attr,
3014 char *buf)
3015{
3b99e8e3 3016 struct amdgpu_device *adev = dev_get_drvdata(dev);
4e8303cf 3017 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
ae07970a 3018
8ecad8d6 3019 if (gc_ver == IP_VERSION(10, 3, 1))
3b99e8e3
YW
3020 return sysfs_emit(buf, "%s\n",
3021 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3022 "fastPPT" : "slowPPT");
3023 else
3024 return sysfs_emit(buf, "PPT\n");
ae07970a 3025}
8d81bce7
RZ
3026
3027static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3028 struct device_attribute *attr,
3029 const char *buf,
3030 size_t count)
3031{
3032 struct amdgpu_device *adev = dev_get_drvdata(dev);
ae07970a 3033 int limit_type = to_sensor_dev_attr(attr)->index;
8d81bce7
RZ
3034 int err;
3035 u32 value;
3036
53b3f8f4 3037 if (amdgpu_in_reset(adev))
48b270bb 3038 return -EPERM;
d2ae842d
AD
3039 if (adev->in_suspend && !adev->in_runpm)
3040 return -EPERM;
48b270bb 3041
c9ffa427
YT
3042 if (amdgpu_sriov_vf(adev))
3043 return -EINVAL;
3044
8d81bce7
RZ
3045 err = kstrtou32(buf, 10, &value);
3046 if (err)
3047 return err;
3048
3049 value = value / 1000000; /* convert to Watt */
ae07970a 3050 value |= limit_type << 24;
b9a9294b 3051
4a580877 3052 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
66429300 3053 if (err < 0) {
4a580877 3054 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
b9a9294b 3055 return err;
66429300 3056 }
b9a9294b 3057
79c65f3f 3058 err = amdgpu_dpm_set_power_limit(adev, value);
b9a9294b 3059
4a580877
LT
3060 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3061 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
8d81bce7 3062
fcd90fee
EQ
3063 if (err)
3064 return err;
3065
8d81bce7
RZ
3066 return count;
3067}
3068
d0948af7
AD
3069static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3070 struct device_attribute *attr,
3071 char *buf)
3072{
3073 struct amdgpu_device *adev = dev_get_drvdata(dev);
d0948af7 3074 uint32_t sclk;
d78c227f 3075 int r;
d0948af7 3076
d0948af7 3077 /* get the sclk */
d78c227f
ML
3078 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3079 (void *)&sclk);
d0948af7
AD
3080 if (r)
3081 return r;
3082
a9ca9bb3 3083 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
d0948af7
AD
3084}
3085
3086static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3087 struct device_attribute *attr,
3088 char *buf)
3089{
a9ca9bb3 3090 return sysfs_emit(buf, "sclk\n");
d0948af7
AD
3091}
3092
3093static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3094 struct device_attribute *attr,
3095 char *buf)
3096{
3097 struct amdgpu_device *adev = dev_get_drvdata(dev);
d0948af7 3098 uint32_t mclk;
d78c227f 3099 int r;
d0948af7 3100
d0948af7 3101 /* get the sclk */
d78c227f
ML
3102 r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3103 (void *)&mclk);
d0948af7
AD
3104 if (r)
3105 return r;
3106
a9ca9bb3 3107 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
d0948af7
AD
3108}
3109
3110static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3111 struct device_attribute *attr,
3112 char *buf)
3113{
a9ca9bb3 3114 return sysfs_emit(buf, "mclk\n");
d0948af7 3115}
844c5419
AD
3116
3117/**
3118 * DOC: hwmon
3119 *
3120 * The amdgpu driver exposes the following sensor interfaces:
dc85db25 3121 *
844c5419 3122 * - GPU temperature (via the on-die sensor)
dc85db25 3123 *
844c5419 3124 * - GPU voltage
dc85db25 3125 *
844c5419 3126 * - Northbridge voltage (APUs only)
dc85db25 3127 *
844c5419 3128 * - GPU power
dc85db25 3129 *
844c5419
AD
3130 * - GPU fan
3131 *
d0948af7
AD
3132 * - GPU gfx/compute engine clock
3133 *
3134 * - GPU memory clock (dGPU only)
3135 *
844c5419 3136 * hwmon interfaces for GPU temperature:
dc85db25 3137 *
a34d1166
EQ
3138 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3139 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
dc85db25 3140 *
2adc1156
EQ
3141 * - temp[1-3]_label: temperature channel label
3142 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3143 *
437ccd17
EQ
3144 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3145 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
dc85db25 3146 *
437ccd17
EQ
3147 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3148 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
844c5419 3149 *
901cb599
EQ
3150 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3151 * - these are supported on SOC15 dGPUs only
3152 *
844c5419 3153 * hwmon interfaces for GPU voltage:
dc85db25 3154 *
844c5419 3155 * - in0_input: the voltage on the GPU in millivolts
dc85db25 3156 *
844c5419
AD
3157 * - in1_input: the voltage on the Northbridge in millivolts
3158 *
3159 * hwmon interfaces for GPU power:
dc85db25 3160 *
29f5be8d 3161 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
dc85db25 3162 *
bb9f7b68
ML
3163 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3164 *
844c5419 3165 * - power1_cap_min: minimum cap supported in microWatts
dc85db25 3166 *
844c5419 3167 * - power1_cap_max: maximum cap supported in microWatts
dc85db25 3168 *
844c5419
AD
3169 * - power1_cap: selected power cap in microWatts
3170 *
3171 * hwmon interfaces for GPU fan:
dc85db25 3172 *
844c5419 3173 * - pwm1: pulse width modulation fan level (0-255)
dc85db25
AD
3174 *
3175 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3176 *
844c5419 3177 * - pwm1_min: pulse width modulation fan control minimum level (0)
dc85db25 3178 *
844c5419 3179 * - pwm1_max: pulse width modulation fan control maximum level (255)
dc85db25 3180 *
e5527d8c 3181 * - fan1_min: a minimum value Unit: revolution/min (RPM)
c2870527 3182 *
e5527d8c 3183 * - fan1_max: a maximum value Unit: revolution/max (RPM)
c2870527 3184 *
844c5419
AD
3185 * - fan1_input: fan speed in RPM
3186 *
879e723d 3187 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
c2870527 3188 *
879e723d 3189 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
c2870527 3190 *
96401f7c
EQ
3191 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3192 * That will get the former one overridden.
3193 *
d0948af7
AD
3194 * hwmon interfaces for GPU clocks:
3195 *
3196 * - freq1_input: the gfx/compute clock in hertz
3197 *
3198 * - freq2_input: the memory clock in hertz
3199 *
844c5419
AD
3200 * You can use hwmon tools like sensors to view this information on your system.
3201 *
3202 */
3203
a34d1166 3204static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
d38ceaf9
AD
3205static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3206static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
901cb599 3207static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
a34d1166 3208static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
437ccd17
EQ
3209static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3210static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
901cb599 3211static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
a34d1166 3212static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
437ccd17
EQ
3213static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3214static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
901cb599 3215static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2adc1156
EQ
3216static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3217static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3218static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
d38ceaf9
AD
3219static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3220static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3221static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3222static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
81c1514b 3223static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
c2870527
RZ
3224static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3225static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3226static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3227static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2bd376bf
AD
3228static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3229static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3230static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3231static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2976fc26 3232static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
bb9f7b68 3233static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
8d81bce7
RZ
3234static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3235static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3236static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
6e58941c 3237static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
ae07970a
XH
3238static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3239static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3240static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3241static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3242static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
6e58941c 3243static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
ae07970a 3244static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
d0948af7
AD
3245static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3246static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3247static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3248static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
d38ceaf9
AD
3249
3250static struct attribute *hwmon_attributes[] = {
3251 &sensor_dev_attr_temp1_input.dev_attr.attr,
3252 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3253 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
a34d1166 3254 &sensor_dev_attr_temp2_input.dev_attr.attr,
437ccd17
EQ
3255 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3256 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
a34d1166 3257 &sensor_dev_attr_temp3_input.dev_attr.attr,
437ccd17
EQ
3258 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3259 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
901cb599
EQ
3260 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3261 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3262 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2adc1156
EQ
3263 &sensor_dev_attr_temp1_label.dev_attr.attr,
3264 &sensor_dev_attr_temp2_label.dev_attr.attr,
3265 &sensor_dev_attr_temp3_label.dev_attr.attr,
d38ceaf9
AD
3266 &sensor_dev_attr_pwm1.dev_attr.attr,
3267 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3268 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3269 &sensor_dev_attr_pwm1_max.dev_attr.attr,
81c1514b 3270 &sensor_dev_attr_fan1_input.dev_attr.attr,
c2870527
RZ
3271 &sensor_dev_attr_fan1_min.dev_attr.attr,
3272 &sensor_dev_attr_fan1_max.dev_attr.attr,
3273 &sensor_dev_attr_fan1_target.dev_attr.attr,
3274 &sensor_dev_attr_fan1_enable.dev_attr.attr,
2bd376bf
AD
3275 &sensor_dev_attr_in0_input.dev_attr.attr,
3276 &sensor_dev_attr_in0_label.dev_attr.attr,
3277 &sensor_dev_attr_in1_input.dev_attr.attr,
3278 &sensor_dev_attr_in1_label.dev_attr.attr,
2976fc26 3279 &sensor_dev_attr_power1_average.dev_attr.attr,
bb9f7b68 3280 &sensor_dev_attr_power1_input.dev_attr.attr,
8d81bce7
RZ
3281 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3282 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3283 &sensor_dev_attr_power1_cap.dev_attr.attr,
6e58941c 3284 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
ae07970a
XH
3285 &sensor_dev_attr_power1_label.dev_attr.attr,
3286 &sensor_dev_attr_power2_average.dev_attr.attr,
3287 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3288 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3289 &sensor_dev_attr_power2_cap.dev_attr.attr,
6e58941c 3290 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
ae07970a 3291 &sensor_dev_attr_power2_label.dev_attr.attr,
d0948af7
AD
3292 &sensor_dev_attr_freq1_input.dev_attr.attr,
3293 &sensor_dev_attr_freq1_label.dev_attr.attr,
3294 &sensor_dev_attr_freq2_input.dev_attr.attr,
3295 &sensor_dev_attr_freq2_label.dev_attr.attr,
d38ceaf9
AD
3296 NULL
3297};
3298
3299static umode_t hwmon_attributes_visible(struct kobject *kobj,
3300 struct attribute *attr, int index)
3301{
cc29ec87 3302 struct device *dev = kobj_to_dev(kobj);
d38ceaf9
AD
3303 struct amdgpu_device *adev = dev_get_drvdata(dev);
3304 umode_t effective_mode = attr->mode;
4e8303cf 3305 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
15419813 3306 uint32_t tmp;
d38ceaf9 3307
4f0f1b58
DS
3308 /* under pp one vf mode manage of hwmon attributes is not supported */
3309 if (amdgpu_sriov_is_pp_one_vf(adev))
3310 effective_mode &= ~S_IWUSR;
3311
fc5a136d
RZ
3312 /* Skip fan attributes if fan is not present */
3313 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3314 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3315 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3316 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
c2870527
RZ
3317 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3318 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3319 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3320 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3321 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
fc5a136d 3322 return 0;
135f9711 3323
20a96cd3
AD
3324 /* Skip fan attributes on APU */
3325 if ((adev->flags & AMD_IS_APU) &&
3326 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3327 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3328 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3329 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3330 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3331 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3332 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3333 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3334 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3335 return 0;
3336
35dab589 3337 /* Skip crit temp on APU */
8572fa2a
AK
3338 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3339 (gc_ver == IP_VERSION(9, 4, 3))) &&
35dab589
HR
3340 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3341 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3342 return 0;
3343
1b5708ff 3344 /* Skip limit attributes if DPM is not enabled */
d38ceaf9
AD
3345 if (!adev->pm.dpm_enabled &&
3346 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
27100735
AD
3347 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3348 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3349 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3350 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
c2870527
RZ
3351 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3352 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3353 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3354 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3355 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3356 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
d38ceaf9
AD
3357 return 0;
3358
79c65f3f 3359 /* mask fan attributes if we have no bindings for this asic to expose */
685fae24 3360 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
79c65f3f 3361 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
685fae24 3362 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
79c65f3f
EQ
3363 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3364 effective_mode &= ~S_IRUGO;
239873fc 3365
685fae24 3366 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
79c65f3f 3367 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
685fae24 3368 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
79c65f3f
EQ
3369 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3370 effective_mode &= ~S_IWUSR;
d38ceaf9 3371
8572fa2a 3372 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
ae07970a 3373 if (((adev->family == AMDGPU_FAMILY_SI) ||
8572fa2a
AK
3374 ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3375 (gc_ver != IP_VERSION(9, 4, 3)))) &&
367deb67 3376 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
8ecad8d6 3377 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
6e58941c
EH
3378 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3379 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
8d81bce7
RZ
3380 return 0;
3381
89317d42 3382 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
367deb67 3383 if (((adev->family == AMDGPU_FAMILY_SI) ||
8ecad8d6 3384 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
367deb67
AD
3385 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3386 return 0;
3387
15419813
ML
3388 /* not all products support both average and instantaneous */
3389 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3390 amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3391 return 0;
3392 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3393 amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3394 return 0;
3395
79c65f3f 3396 /* hide max/min values if we can't both query and manage the fan */
685fae24
EQ
3397 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3398 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3399 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3400 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
79c65f3f
EQ
3401 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3402 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3403 return 0;
239873fc 3404
685fae24
EQ
3405 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3406 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
79c65f3f
EQ
3407 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3408 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3409 return 0;
c2870527 3410
1cdd229b 3411 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
8572fa2a
AK
3412 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3413 (gc_ver == IP_VERSION(9, 4, 3))) &&
1cdd229b
JD
3414 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3415 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3416 return 0;
3417
8572fa2a
AK
3418 /* only APUs other than gc 9,4,3 have vddnb */
3419 if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
0d35bc78
AD
3420 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3421 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
81c1514b
GI
3422 return 0;
3423
8572fa2a
AK
3424 /* no mclk on APUs other than gc 9,4,3*/
3425 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
d0948af7
AD
3426 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3427 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3428 return 0;
3429
8ecad8d6 3430 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
8572fa2a
AK
3431 (gc_ver != IP_VERSION(9, 4, 3)) &&
3432 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
bfb4fd20 3433 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
07864911 3434 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
bfb4fd20 3435 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
07864911
AK
3436 attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3437 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
8572fa2a
AK
3438 return 0;
3439
bfb4fd20 3440 /* hotspot temperature for gc 9,4,3*/
9cff0879
LL
3441 if (gc_ver == IP_VERSION(9, 4, 3)) {
3442 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3443 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3444 attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3445 return 0;
3446
3447 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3448 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3449 return attr->mode;
3450 }
8572fa2a
AK
3451
3452 /* only SOC15 dGPUs support hotspot and mem temperatures */
9cff0879
LL
3453 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3454 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
901cb599
EQ
3455 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3456 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3457 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
bfb4fd20 3458 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
437ccd17
EQ
3459 return 0;
3460
ae07970a 3461 /* only Vangogh has fast PPT limit and power labels */
8ecad8d6 3462 if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
ae07970a 3463 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
8ecad8d6 3464 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
ae07970a 3465 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
8ecad8d6
LL
3466 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3467 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3468 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
ae07970a
XH
3469 return 0;
3470
d38ceaf9
AD
3471 return effective_mode;
3472}
3473
3474static const struct attribute_group hwmon_attrgroup = {
3475 .attrs = hwmon_attributes,
3476 .is_visible = hwmon_attributes_visible,
3477};
3478
3479static const struct attribute_group *hwmon_groups[] = {
3480 &hwmon_attrgroup,
3481 NULL
3482};
3483
d7bf1b55
EQ
3484static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3485 enum pp_clock_type od_type,
3486 char *buf)
3487{
3488 int size = 0;
3489 int ret;
3490
3491 if (amdgpu_in_reset(adev))
3492 return -EPERM;
3493 if (adev->in_suspend && !adev->in_runpm)
3494 return -EPERM;
3495
3496 ret = pm_runtime_get_sync(adev->dev);
3497 if (ret < 0) {
3498 pm_runtime_put_autosuspend(adev->dev);
3499 return ret;
3500 }
3501
3502 size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3503 if (size == 0)
3504 size = sysfs_emit(buf, "\n");
3505
3506 pm_runtime_mark_last_busy(adev->dev);
3507 pm_runtime_put_autosuspend(adev->dev);
3508
3509 return size;
3510}
3511
3512static int parse_input_od_command_lines(const char *buf,
3513 size_t count,
3514 u32 *type,
3515 long *params,
3516 uint32_t *num_of_params)
3517{
3518 const char delimiter[3] = {' ', '\n', '\0'};
3519 uint32_t parameter_size = 0;
3520 char buf_cpy[128] = {0};
3521 char *tmp_str, *sub_str;
3522 int ret;
3523
3524 if (count > sizeof(buf_cpy) - 1)
3525 return -EINVAL;
3526
3527 memcpy(buf_cpy, buf, count);
3528 tmp_str = buf_cpy;
3529
3530 /* skip heading spaces */
3531 while (isspace(*tmp_str))
3532 tmp_str++;
3533
3534 switch (*tmp_str) {
3535 case 'c':
3536 *type = PP_OD_COMMIT_DPM_TABLE;
3537 return 0;
f7f9e48f
MJ
3538 case 'r':
3539 params[parameter_size] = *type;
3540 *num_of_params = 1;
3541 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3542 return 0;
d7bf1b55
EQ
3543 default:
3544 break;
3545 }
3546
3547 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3548 if (strlen(sub_str) == 0)
3549 continue;
3550
3551 ret = kstrtol(sub_str, 0, &params[parameter_size]);
3552 if (ret)
3553 return -EINVAL;
3554 parameter_size++;
3555
3556 while (isspace(*tmp_str))
3557 tmp_str++;
3558 }
3559
3560 *num_of_params = parameter_size;
3561
3562 return 0;
3563}
3564
3565static int
3566amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3567 enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3568 const char *in_buf,
3569 size_t count)
3570{
3571 uint32_t parameter_size = 0;
3572 long parameter[64];
3573 int ret;
3574
3575 if (amdgpu_in_reset(adev))
3576 return -EPERM;
3577 if (adev->in_suspend && !adev->in_runpm)
3578 return -EPERM;
3579
3580 ret = parse_input_od_command_lines(in_buf,
3581 count,
3582 &cmd_type,
3583 parameter,
3584 &parameter_size);
3585 if (ret)
3586 return ret;
3587
3588 ret = pm_runtime_get_sync(adev->dev);
3589 if (ret < 0)
3590 goto err_out0;
3591
3592 ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3593 cmd_type,
3594 parameter,
3595 parameter_size);
3596 if (ret)
3597 goto err_out1;
3598
3599 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3600 ret = amdgpu_dpm_dispatch_task(adev,
3601 AMD_PP_TASK_READJUST_POWER_STATE,
3602 NULL);
3603 if (ret)
3604 goto err_out1;
3605 }
3606
3607 pm_runtime_mark_last_busy(adev->dev);
3608 pm_runtime_put_autosuspend(adev->dev);
3609
3610 return count;
3611
3612err_out1:
3613 pm_runtime_mark_last_busy(adev->dev);
3614err_out0:
3615 pm_runtime_put_autosuspend(adev->dev);
3616
3617 return ret;
3618}
3619
3620/**
3621 * DOC: fan_curve
3622 *
3623 * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3624 * control curve line.
3625 *
3626 * Reading back the file shows you the current settings(temperature in Celsius
3627 * degree and fan speed in pwm) applied to every anchor point of the curve line
3628 * and their permitted ranges if changable.
3629 *
3630 * Writing a desired string(with the format like "anchor_point_index temperature
3631 * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3632 * point accordingly.
3633 *
3634 * When you have finished the editing, write "c" (commit) to the file to commit
3635 * your changes.
3636 *
f7f9e48f
MJ
3637 * If you want to reset to the default value, write "r" (reset) to the file to
3638 * reset them
3639 *
d7bf1b55
EQ
3640 * There are two fan control modes supported: auto and manual. With auto mode,
3641 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3642 * While with manual mode, users can set their own fan curve line as what
3643 * described here. Normally the ASIC is booted up with auto mode. Any
3644 * settings via this interface will switch the fan control to manual mode
3645 * implicitly.
3646 */
3647static ssize_t fan_curve_show(struct kobject *kobj,
3648 struct kobj_attribute *attr,
3649 char *buf)
3650{
3651 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3652 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3653
3654 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
3655}
3656
3657static ssize_t fan_curve_store(struct kobject *kobj,
3658 struct kobj_attribute *attr,
3659 const char *buf,
3660 size_t count)
3661{
3662 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3663 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3664
3665 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3666 PP_OD_EDIT_FAN_CURVE,
3667 buf,
3668 count);
3669}
3670
3671static umode_t fan_curve_visible(struct amdgpu_device *adev)
3672{
3673 umode_t umode = 0000;
3674
3675 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
3676 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3677
3678 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
3679 umode |= S_IWUSR;
3680
3681 return umode;
3682}
3683
548009ad
EQ
3684/**
3685 * DOC: acoustic_limit_rpm_threshold
3686 *
3687 * The amdgpu driver provides a sysfs API for checking and adjusting the
3688 * acoustic limit in RPM for fan control.
3689 *
3690 * Reading back the file shows you the current setting and the permitted
3691 * ranges if changable.
3692 *
3693 * Writing an integer to the file, change the setting accordingly.
3694 *
3695 * When you have finished the editing, write "c" (commit) to the file to commit
3696 * your changes.
3697 *
1007bc36
MJ
3698 * If you want to reset to the default value, write "r" (reset) to the file to
3699 * reset them
3700 *
548009ad
EQ
3701 * This setting works under auto fan control mode only. It adjusts the PMFW's
3702 * behavior about the maximum speed in RPM the fan can spin. Setting via this
3703 * interface will switch the fan control to auto mode implicitly.
3704 */
3705static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
3706 struct kobj_attribute *attr,
3707 char *buf)
3708{
3709 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3710 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3711
3712 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
3713}
3714
3715static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
3716 struct kobj_attribute *attr,
3717 const char *buf,
3718 size_t count)
3719{
3720 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3721 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3722
3723 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3724 PP_OD_EDIT_ACOUSTIC_LIMIT,
3725 buf,
3726 count);
3727}
3728
3729static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
3730{
3731 umode_t umode = 0000;
3732
3733 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
3734 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3735
3736 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
3737 umode |= S_IWUSR;
3738
3739 return umode;
3740}
3741
47cf6fcb
EQ
3742/**
3743 * DOC: acoustic_target_rpm_threshold
3744 *
3745 * The amdgpu driver provides a sysfs API for checking and adjusting the
3746 * acoustic target in RPM for fan control.
3747 *
3748 * Reading back the file shows you the current setting and the permitted
3749 * ranges if changable.
3750 *
3751 * Writing an integer to the file, change the setting accordingly.
3752 *
3753 * When you have finished the editing, write "c" (commit) to the file to commit
3754 * your changes.
3755 *
1007bc36
MJ
3756 * If you want to reset to the default value, write "r" (reset) to the file to
3757 * reset them
3758 *
47cf6fcb
EQ
3759 * This setting works under auto fan control mode only. It can co-exist with
3760 * other settings which can work also under auto mode. It adjusts the PMFW's
3761 * behavior about the maximum speed in RPM the fan can spin when ASIC
3762 * temperature is not greater than target temperature. Setting via this
3763 * interface will switch the fan control to auto mode implicitly.
3764 */
3765static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
3766 struct kobj_attribute *attr,
3767 char *buf)
3768{
3769 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3770 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3771
3772 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
3773}
3774
3775static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
3776 struct kobj_attribute *attr,
3777 const char *buf,
3778 size_t count)
3779{
3780 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3781 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3782
3783 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3784 PP_OD_EDIT_ACOUSTIC_TARGET,
3785 buf,
3786 count);
3787}
3788
3789static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
3790{
3791 umode_t umode = 0000;
3792
3793 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
3794 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3795
3796 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
3797 umode |= S_IWUSR;
3798
3799 return umode;
3800}
3801
eedd5a34
EQ
3802/**
3803 * DOC: fan_target_temperature
3804 *
3805 * The amdgpu driver provides a sysfs API for checking and adjusting the
3806 * target tempeature in Celsius degree for fan control.
3807 *
3808 * Reading back the file shows you the current setting and the permitted
3809 * ranges if changable.
3810 *
3811 * Writing an integer to the file, change the setting accordingly.
3812 *
3813 * When you have finished the editing, write "c" (commit) to the file to commit
3814 * your changes.
3815 *
1007bc36
MJ
3816 * If you want to reset to the default value, write "r" (reset) to the file to
3817 * reset them
3818 *
eedd5a34
EQ
3819 * This setting works under auto fan control mode only. It can co-exist with
3820 * other settings which can work also under auto mode. Paring with the
3821 * acoustic_target_rpm_threshold setting, they define the maximum speed in
3822 * RPM the fan can spin when ASIC temperature is not greater than target
3823 * temperature. Setting via this interface will switch the fan control to
3824 * auto mode implicitly.
3825 */
3826static ssize_t fan_target_temperature_show(struct kobject *kobj,
3827 struct kobj_attribute *attr,
3828 char *buf)
3829{
3830 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3831 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3832
3833 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
3834}
3835
3836static ssize_t fan_target_temperature_store(struct kobject *kobj,
3837 struct kobj_attribute *attr,
3838 const char *buf,
3839 size_t count)
3840{
3841 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3842 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3843
3844 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3845 PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
3846 buf,
3847 count);
3848}
3849
3850static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
3851{
3852 umode_t umode = 0000;
3853
3854 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
3855 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3856
3857 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
3858 umode |= S_IWUSR;
3859
3860 return umode;
3861}
3862
9df5d008
EQ
3863/**
3864 * DOC: fan_minimum_pwm
3865 *
3866 * The amdgpu driver provides a sysfs API for checking and adjusting the
3867 * minimum fan speed in PWM.
3868 *
3869 * Reading back the file shows you the current setting and the permitted
3870 * ranges if changable.
3871 *
3872 * Writing an integer to the file, change the setting accordingly.
3873 *
3874 * When you have finished the editing, write "c" (commit) to the file to commit
3875 * your changes.
3876 *
1007bc36
MJ
3877 * If you want to reset to the default value, write "r" (reset) to the file to
3878 * reset them
3879 *
9df5d008
EQ
3880 * This setting works under auto fan control mode only. It can co-exist with
3881 * other settings which can work also under auto mode. It adjusts the PMFW's
3882 * behavior about the minimum fan speed in PWM the fan should spin. Setting
3883 * via this interface will switch the fan control to auto mode implicitly.
3884 */
3885static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
3886 struct kobj_attribute *attr,
3887 char *buf)
3888{
3889 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3890 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3891
3892 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
3893}
3894
3895static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
3896 struct kobj_attribute *attr,
3897 const char *buf,
3898 size_t count)
3899{
3900 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3901 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3902
3903 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3904 PP_OD_EDIT_FAN_MINIMUM_PWM,
3905 buf,
3906 count);
3907}
3908
3909static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
3910{
3911 umode_t umode = 0000;
3912
3913 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
3914 umode |= S_IRUSR | S_IRGRP | S_IROTH;
3915
3916 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
3917 umode |= S_IWUSR;
3918
3919 return umode;
3920}
3921
d7bf1b55
EQ
3922static struct od_feature_set amdgpu_od_set = {
3923 .containers = {
3924 [0] = {
3925 .name = "fan_ctrl",
3926 .sub_feature = {
3927 [0] = {
3928 .name = "fan_curve",
3929 .ops = {
3930 .is_visible = fan_curve_visible,
3931 .show = fan_curve_show,
3932 .store = fan_curve_store,
3933 },
3934 },
548009ad
EQ
3935 [1] = {
3936 .name = "acoustic_limit_rpm_threshold",
3937 .ops = {
3938 .is_visible = acoustic_limit_threshold_visible,
3939 .show = acoustic_limit_threshold_show,
3940 .store = acoustic_limit_threshold_store,
3941 },
3942 },
47cf6fcb
EQ
3943 [2] = {
3944 .name = "acoustic_target_rpm_threshold",
3945 .ops = {
3946 .is_visible = acoustic_target_threshold_visible,
3947 .show = acoustic_target_threshold_show,
3948 .store = acoustic_target_threshold_store,
3949 },
3950 },
eedd5a34
EQ
3951 [3] = {
3952 .name = "fan_target_temperature",
3953 .ops = {
3954 .is_visible = fan_target_temperature_visible,
3955 .show = fan_target_temperature_show,
3956 .store = fan_target_temperature_store,
3957 },
3958 },
9df5d008
EQ
3959 [4] = {
3960 .name = "fan_minimum_pwm",
3961 .ops = {
3962 .is_visible = fan_minimum_pwm_visible,
3963 .show = fan_minimum_pwm_show,
3964 .store = fan_minimum_pwm_store,
3965 },
3966 },
d7bf1b55
EQ
3967 },
3968 },
3969 },
3970};
3e38b634
EQ
3971
3972static void od_kobj_release(struct kobject *kobj)
3973{
3974 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
3975
3976 kfree(od_kobj);
3977}
3978
3979static const struct kobj_type od_ktype = {
3980 .release = od_kobj_release,
3981 .sysfs_ops = &kobj_sysfs_ops,
3982};
3983
3984static void amdgpu_od_set_fini(struct amdgpu_device *adev)
3985{
3986 struct od_kobj *container, *container_next;
3987 struct od_attribute *attribute, *attribute_next;
3988
3989 if (list_empty(&adev->pm.od_kobj_list))
3990 return;
3991
3992 list_for_each_entry_safe(container, container_next,
3993 &adev->pm.od_kobj_list, entry) {
3994 list_del(&container->entry);
3995
3996 list_for_each_entry_safe(attribute, attribute_next,
3997 &container->attribute, entry) {
3998 list_del(&attribute->entry);
3999 sysfs_remove_file(&container->kobj,
4000 &attribute->attribute.attr);
4001 kfree(attribute);
4002 }
4003
4004 kobject_put(&container->kobj);
4005 }
4006}
4007
4008static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4009 struct od_feature_ops *feature_ops)
4010{
4011 umode_t mode;
4012
4013 if (!feature_ops->is_visible)
4014 return false;
4015
4016 /*
4017 * If the feature has no user read and write mode set,
4018 * we can assume the feature is actually not supported.(?)
4019 * And the revelant sysfs interface should not be exposed.
4020 */
4021 mode = feature_ops->is_visible(adev);
4022 if (mode & (S_IRUSR | S_IWUSR))
4023 return true;
4024
4025 return false;
4026}
4027
4028static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4029 struct od_feature_container *container)
4030{
4031 int i;
4032
4033 /*
4034 * If there is no valid entry within the container, the container
4035 * is recognized as a self contained container. And the valid entry
4036 * here means it has a valid naming and it is visible/supported by
4037 * the ASIC.
4038 */
4039 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4040 if (container->sub_feature[i].name &&
4041 amdgpu_is_od_feature_supported(adev,
4042 &container->sub_feature[i].ops))
4043 return false;
4044 }
4045
4046 return true;
4047}
4048
4049static int amdgpu_od_set_init(struct amdgpu_device *adev)
d38ceaf9 4050{
3e38b634
EQ
4051 struct od_kobj *top_set, *sub_set;
4052 struct od_attribute *attribute;
4053 struct od_feature_container *container;
4054 struct od_feature_item *feature;
4055 int i, j;
d38ceaf9 4056 int ret;
3e38b634
EQ
4057
4058 /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4059 top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4060 if (!top_set)
4061 return -ENOMEM;
4062 list_add(&top_set->entry, &adev->pm.od_kobj_list);
4063
4064 ret = kobject_init_and_add(&top_set->kobj,
4065 &od_ktype,
4066 &adev->dev->kobj,
4067 "%s",
4068 "gpu_od");
4069 if (ret)
4070 goto err_out;
4071 INIT_LIST_HEAD(&top_set->attribute);
4072 top_set->priv = adev;
4073
4074 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4075 container = &amdgpu_od_set.containers[i];
4076
4077 if (!container->name)
4078 continue;
4079
4080 /*
4081 * If there is valid entries within the container, the container
4082 * will be presented as a sub directory and all its holding entries
4083 * will be presented as plain files under it.
4084 * While if there is no valid entry within the container, the container
4085 * itself will be presented as a plain file under top `gpu_od` directory.
4086 */
4087 if (amdgpu_od_is_self_contained(adev, container)) {
4088 if (!amdgpu_is_od_feature_supported(adev,
4089 &container->ops))
4090 continue;
4091
4092 /*
4093 * The container is presented as a plain file under top `gpu_od`
4094 * directory.
4095 */
4096 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4097 if (!attribute) {
4098 ret = -ENOMEM;
4099 goto err_out;
4100 }
4101 list_add(&attribute->entry, &top_set->attribute);
4102
4103 attribute->attribute.attr.mode =
4104 container->ops.is_visible(adev);
4105 attribute->attribute.attr.name = container->name;
4106 attribute->attribute.show =
4107 container->ops.show;
4108 attribute->attribute.store =
4109 container->ops.store;
4110 ret = sysfs_create_file(&top_set->kobj,
4111 &attribute->attribute.attr);
4112 if (ret)
4113 goto err_out;
4114 } else {
4115 /* The container is presented as a sub directory. */
4116 sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4117 if (!sub_set) {
4118 ret = -ENOMEM;
4119 goto err_out;
4120 }
4121 list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4122
4123 ret = kobject_init_and_add(&sub_set->kobj,
4124 &od_ktype,
4125 &top_set->kobj,
4126 "%s",
4127 container->name);
4128 if (ret)
4129 goto err_out;
4130 INIT_LIST_HEAD(&sub_set->attribute);
4131 sub_set->priv = adev;
4132
4133 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4134 feature = &container->sub_feature[j];
4135 if (!feature->name)
4136 continue;
4137
4138 if (!amdgpu_is_od_feature_supported(adev,
4139 &feature->ops))
4140 continue;
4141
4142 /*
4143 * With the container presented as a sub directory, the entry within
4144 * it is presented as a plain file under the sub directory.
4145 */
4146 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4147 if (!attribute) {
4148 ret = -ENOMEM;
4149 goto err_out;
4150 }
4151 list_add(&attribute->entry, &sub_set->attribute);
4152
4153 attribute->attribute.attr.mode =
4154 feature->ops.is_visible(adev);
4155 attribute->attribute.attr.name = feature->name;
4156 attribute->attribute.show =
4157 feature->ops.show;
4158 attribute->attribute.store =
4159 feature->ops.store;
4160 ret = sysfs_create_file(&sub_set->kobj,
4161 &attribute->attribute.attr);
4162 if (ret)
4163 goto err_out;
4164 }
4165 }
4166 }
4167
4168 return 0;
4169
4170err_out:
4171 amdgpu_od_set_fini(adev);
4172
4173 return ret;
4174}
4175
4176int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4177{
88e5c8f8 4178 enum amdgpu_sriov_vf_mode mode;
4e01847c 4179 uint32_t mask = 0;
3e38b634 4180 int ret;
d38ceaf9 4181
c86f5ebf
AD
4182 if (adev->pm.sysfs_initialized)
4183 return 0;
4184
5fa99373
ZY
4185 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4186
d2f52ac8
RZ
4187 if (adev->pm.dpm_enabled == 0)
4188 return 0;
4189
88e5c8f8
MJ
4190 mode = amdgpu_virt_get_sriov_vf_mode(adev);
4191
4192 /* under multi-vf mode, the hwmon attributes are all not supported */
4193 if (mode != SRIOV_VF_MODE_MULTI_VF) {
4194 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4195 DRIVER_NAME, adev,
4196 hwmon_groups);
4197 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4198 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4199 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4200 return ret;
4201 }
d38ceaf9
AD
4202 }
4203
88e5c8f8 4204 switch (mode) {
4e01847c
KW
4205 case SRIOV_VF_MODE_ONE_VF:
4206 mask = ATTR_FLAG_ONEVF;
4207 break;
4208 case SRIOV_VF_MODE_MULTI_VF:
4209 mask = 0;
4210 break;
4211 case SRIOV_VF_MODE_BARE_METAL:
4212 default:
4213 mask = ATTR_FLAG_MASK_ALL;
4214 break;
8efd7275
ML
4215 }
4216
4e01847c
KW
4217 ret = amdgpu_device_attr_create_groups(adev,
4218 amdgpu_device_attrs,
4219 ARRAY_SIZE(amdgpu_device_attrs),
ba02fd6b
KW
4220 mask,
4221 &adev->pm.pm_attr_list);
4e01847c 4222 if (ret)
3e38b634
EQ
4223 goto err_out0;
4224
4225 if (amdgpu_dpm_is_overdrive_supported(adev)) {
4226 ret = amdgpu_od_set_init(adev);
4227 if (ret)
4228 goto err_out1;
4229 }
7ca881a8 4230
c86f5ebf
AD
4231 adev->pm.sysfs_initialized = true;
4232
d38ceaf9 4233 return 0;
3e38b634
EQ
4234
4235err_out1:
4236 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4237err_out0:
4238 if (adev->pm.int_hwmon_dev)
4239 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4240
4241 return ret;
d38ceaf9
AD
4242}
4243
4244void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4245{
3e38b634
EQ
4246 amdgpu_od_set_fini(adev);
4247
d38ceaf9
AD
4248 if (adev->pm.int_hwmon_dev)
4249 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4e01847c 4250
ba02fd6b 4251 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
d38ceaf9
AD
4252}
4253
d38ceaf9
AD
4254/*
4255 * Debugfs info
4256 */
4257#if defined(CONFIG_DEBUG_FS)
4258
517cb957 4259static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
e1b3bcaa
RS
4260 struct amdgpu_device *adev)
4261{
517cb957
HR
4262 uint16_t *p_val;
4263 uint32_t size;
4264 int i;
79c65f3f 4265 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
517cb957 4266
79c65f3f
EQ
4267 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4268 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
517cb957
HR
4269 GFP_KERNEL);
4270
4271 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4272 (void *)p_val, &size)) {
79c65f3f 4273 for (i = 0; i < num_cpu_cores; i++)
517cb957
HR
4274 seq_printf(m, "\t%u MHz (CPU%d)\n",
4275 *(p_val + i), i);
4276 }
4277
4278 kfree(p_val);
4279 }
4280}
4281
3de4ec57
TSD
4282static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4283{
4e8303cf
LL
4284 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4285 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
cd7b0c66 4286 uint32_t value;
800c53d6 4287 uint64_t value64 = 0;
5b79d048 4288 uint32_t query = 0;
9f8df7d7 4289 int size;
3de4ec57 4290
3de4ec57 4291 /* GPU Clocks */
9f8df7d7 4292 size = sizeof(value);
3de4ec57 4293 seq_printf(m, "GFX Clocks and Power:\n");
517cb957
HR
4294
4295 amdgpu_debugfs_prints_cpu_info(m, adev);
4296
9f8df7d7 4297 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3de4ec57 4298 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
9f8df7d7 4299 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3de4ec57 4300 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
5ed8d656
RZ
4301 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4302 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4303 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4304 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
9f8df7d7 4305 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3de4ec57 4306 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
9f8df7d7 4307 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3de4ec57 4308 seq_printf(m, "\t%u mV (VDDNB)\n", value);
5b79d048 4309 size = sizeof(uint32_t);
9366c2e8 4310 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
f0b8f65b 4311 seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
e0e1764a
AD
4312 size = sizeof(uint32_t);
4313 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
f0b8f65b 4314 seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
9f8df7d7 4315 size = sizeof(value);
3de4ec57
TSD
4316 seq_printf(m, "\n");
4317
4318 /* GPU Temp */
9f8df7d7 4319 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3de4ec57
TSD
4320 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4321
4322 /* GPU Load */
9f8df7d7 4323 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3de4ec57 4324 seq_printf(m, "GPU Load: %u %%\n", value);
9b6eb00d
TSD
4325 /* MEM Load */
4326 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4327 seq_printf(m, "MEM Load: %u %%\n", value);
4328
3de4ec57
TSD
4329 seq_printf(m, "\n");
4330
505f8dbb
AD
4331 /* SMC feature mask */
4332 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4333 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4334
8ecad8d6
LL
4335 /* ASICs greater than CHIP_VEGA20 supports these sensors */
4336 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
1f96ecef
EQ
4337 /* VCN clocks */
4338 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4339 if (!value) {
4340 seq_printf(m, "VCN: Disabled\n");
4341 } else {
4342 seq_printf(m, "VCN: Enabled\n");
4343 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4344 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4345 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4346 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4347 }
3de4ec57 4348 }
1f96ecef
EQ
4349 seq_printf(m, "\n");
4350 } else {
4351 /* UVD clocks */
4352 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4353 if (!value) {
4354 seq_printf(m, "UVD: Disabled\n");
4355 } else {
4356 seq_printf(m, "UVD: Enabled\n");
4357 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4358 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4359 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4360 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4361 }
4362 }
4363 seq_printf(m, "\n");
3de4ec57 4364
1f96ecef
EQ
4365 /* VCE clocks */
4366 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4367 if (!value) {
4368 seq_printf(m, "VCE: Disabled\n");
4369 } else {
4370 seq_printf(m, "VCE: Enabled\n");
4371 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4372 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4373 }
3de4ec57
TSD
4374 }
4375 }
4376
4377 return 0;
4378}
4379
44762718
NC
4380static const struct cg_flag_name clocks[] = {
4381 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4382 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4383 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4384 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4385 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4386 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4387 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4388 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4389 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4390 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4391 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4392 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4393 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4394 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4395 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4396 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4397 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4398 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4399 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4400 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4401 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4402 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4403 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4404 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4405 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4406 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4407 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4408 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4409 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4410 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4411 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4412 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4413 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4414 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4415 {0, NULL},
4416};
4417
25faeddc 4418static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
a8503b15
HR
4419{
4420 int i;
4421
4422 for (i = 0; clocks[i].flag; i++)
4423 seq_printf(m, "\t%s: %s\n", clocks[i].name,
4424 (flags & clocks[i].flag) ? "On" : "Off");
4425}
4426
373720f7 4427static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
d38ceaf9 4428{
373720f7
ND
4429 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4430 struct drm_device *dev = adev_to_drm(adev);
25faeddc 4431 u64 flags = 0;
b9a9294b
AD
4432 int r;
4433
53b3f8f4 4434 if (amdgpu_in_reset(adev))
48b270bb 4435 return -EPERM;
d2ae842d
AD
4436 if (adev->in_suspend && !adev->in_runpm)
4437 return -EPERM;
48b270bb 4438
b9a9294b 4439 r = pm_runtime_get_sync(dev->dev);
66429300
AD
4440 if (r < 0) {
4441 pm_runtime_put_autosuspend(dev->dev);
b9a9294b 4442 return r;
66429300 4443 }
6cb2d4e4 4444
79c65f3f 4445 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
b9a9294b 4446 r = amdgpu_debugfs_pm_info_pp(m, adev);
79c65f3f
EQ
4447 if (r)
4448 goto out;
d38ceaf9 4449 }
81b41ff5 4450
81b41ff5 4451 amdgpu_device_ip_get_clockgating_state(adev, &flags);
81b41ff5 4452
25faeddc 4453 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
81b41ff5
EQ
4454 amdgpu_parse_cg_state(m, flags);
4455 seq_printf(m, "\n");
d38ceaf9 4456
81b41ff5 4457out:
b9a9294b
AD
4458 pm_runtime_mark_last_busy(dev->dev);
4459 pm_runtime_put_autosuspend(dev->dev);
4460
4461 return r;
d38ceaf9
AD
4462}
4463
373720f7
ND
4464DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4465
27ebf21f
LL
4466/*
4467 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4468 *
4469 * Reads debug memory region allocated to PMFW
4470 */
4471static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4472 size_t size, loff_t *pos)
4473{
4474 struct amdgpu_device *adev = file_inode(f)->i_private;
27ebf21f
LL
4475 size_t smu_prv_buf_size;
4476 void *smu_prv_buf;
79c65f3f 4477 int ret = 0;
27ebf21f
LL
4478
4479 if (amdgpu_in_reset(adev))
4480 return -EPERM;
4481 if (adev->in_suspend && !adev->in_runpm)
4482 return -EPERM;
4483
79c65f3f
EQ
4484 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
4485 if (ret)
4486 return ret;
27ebf21f
LL
4487
4488 if (!smu_prv_buf || !smu_prv_buf_size)
4489 return -EINVAL;
4490
4491 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
4492 smu_prv_buf_size);
4493}
4494
4495static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
4496 .owner = THIS_MODULE,
4497 .open = simple_open,
4498 .read = amdgpu_pm_prv_buffer_read,
4499 .llseek = default_llseek,
4500};
4501
d38ceaf9
AD
4502#endif
4503
373720f7 4504void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
d38ceaf9
AD
4505{
4506#if defined(CONFIG_DEBUG_FS)
373720f7
ND
4507 struct drm_minor *minor = adev_to_drm(adev)->primary;
4508 struct dentry *root = minor->debugfs_root;
4509
1613f346
FC
4510 if (!adev->pm.dpm_enabled)
4511 return;
4512
373720f7
ND
4513 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
4514 &amdgpu_debugfs_pm_info_fops);
4515
27ebf21f
LL
4516 if (adev->pm.smu_prv_buffer_size > 0)
4517 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
4518 adev,
4519 &amdgpu_debugfs_pm_prv_buffer_fops,
4520 adev->pm.smu_prv_buffer_size);
1f5fc7a5 4521
79c65f3f 4522 amdgpu_dpm_stb_debug_fs_init(adev);
d38ceaf9
AD
4523#endif
4524}