]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
Merge tag 'amd-drm-next-5.9-2020-07-01' of git://people.freedesktop.org/~agd5f/linux...
[people/ms/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_kms.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include "amdgpu.h"
30 #include <drm/drm_debugfs.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_sched.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "atom.h"
36
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
46
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48 {
49 struct amdgpu_gpu_instance *gpu_instance;
50 int i;
51
52 mutex_lock(&mgpu_info.mutex);
53
54 for (i = 0; i < mgpu_info.num_gpu; i++) {
55 gpu_instance = &(mgpu_info.gpu_ins[i]);
56 if (gpu_instance->adev == adev) {
57 mgpu_info.gpu_ins[i] =
58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59 mgpu_info.num_gpu--;
60 if (adev->flags & AMD_IS_APU)
61 mgpu_info.num_apu--;
62 else
63 mgpu_info.num_dgpu--;
64 break;
65 }
66 }
67
68 mutex_unlock(&mgpu_info.mutex);
69 }
70
71 /**
72 * amdgpu_driver_unload_kms - Main unload function for KMS.
73 *
74 * @dev: drm dev pointer
75 *
76 * This is the main unload function for KMS (all asics).
77 * Returns 0 on success.
78 */
79 void amdgpu_driver_unload_kms(struct drm_device *dev)
80 {
81 struct amdgpu_device *adev = dev->dev_private;
82
83 if (adev == NULL)
84 return;
85
86 amdgpu_unregister_gpu_instance(adev);
87
88 if (adev->rmmio == NULL)
89 goto done_free;
90
91 if (adev->runpm) {
92 pm_runtime_get_sync(dev->dev);
93 pm_runtime_forbid(dev->dev);
94 }
95
96 amdgpu_acpi_fini(adev);
97
98 amdgpu_device_fini(adev);
99
100 done_free:
101 kfree(adev);
102 dev->dev_private = NULL;
103 }
104
105 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
106 {
107 struct amdgpu_gpu_instance *gpu_instance;
108
109 mutex_lock(&mgpu_info.mutex);
110
111 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
112 DRM_ERROR("Cannot register more gpu instance\n");
113 mutex_unlock(&mgpu_info.mutex);
114 return;
115 }
116
117 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
118 gpu_instance->adev = adev;
119 gpu_instance->mgpu_fan_enabled = 0;
120
121 mgpu_info.num_gpu++;
122 if (adev->flags & AMD_IS_APU)
123 mgpu_info.num_apu++;
124 else
125 mgpu_info.num_dgpu++;
126
127 mutex_unlock(&mgpu_info.mutex);
128 }
129
130 /**
131 * amdgpu_driver_load_kms - Main load function for KMS.
132 *
133 * @dev: drm dev pointer
134 * @flags: device flags
135 *
136 * This is the main load function for KMS (all asics).
137 * Returns 0 on success, error on failure.
138 */
139 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
140 {
141 struct amdgpu_device *adev;
142 int r, acpi_status;
143
144 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
145 if (adev == NULL) {
146 return -ENOMEM;
147 }
148 dev->dev_private = (void *)adev;
149
150 if (amdgpu_has_atpx() &&
151 (amdgpu_is_atpx_hybrid() ||
152 amdgpu_has_atpx_dgpu_power_cntl()) &&
153 ((flags & AMD_IS_APU) == 0) &&
154 !pci_is_thunderbolt_attached(dev->pdev))
155 flags |= AMD_IS_PX;
156
157 /* amdgpu_device_init should report only fatal error
158 * like memory allocation failure or iomapping failure,
159 * or memory manager initialization failure, it must
160 * properly initialize the GPU MC controller and permit
161 * VRAM allocation
162 */
163 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
164 if (r) {
165 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
166 goto out;
167 }
168
169 if (amdgpu_device_supports_boco(dev) &&
170 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
171 adev->runpm = true;
172 } else if (amdgpu_device_supports_baco(dev) &&
173 (amdgpu_runtime_pm != 0)) {
174 switch (adev->asic_type) {
175 #ifdef CONFIG_DRM_AMDGPU_CIK
176 case CHIP_BONAIRE:
177 case CHIP_HAWAII:
178 #endif
179 case CHIP_VEGA20:
180 case CHIP_ARCTURUS:
181 case CHIP_SIENNA_CICHLID:
182 /* enable runpm if runpm=1 */
183 if (amdgpu_runtime_pm > 0)
184 adev->runpm = true;
185 break;
186 case CHIP_VEGA10:
187 /* turn runpm on if noretry=0 */
188 if (!amdgpu_noretry)
189 adev->runpm = true;
190 break;
191 default:
192 /* enable runpm on VI+ */
193 adev->runpm = true;
194 break;
195 }
196 }
197
198 /* Call ACPI methods: require modeset init
199 * but failure is not fatal
200 */
201
202 acpi_status = amdgpu_acpi_init(adev);
203 if (acpi_status)
204 dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
205
206 if (adev->runpm) {
207 /* only need to skip on ATPX */
208 if (amdgpu_device_supports_boco(dev) &&
209 !amdgpu_is_atpx_hybrid())
210 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
211 pm_runtime_use_autosuspend(dev->dev);
212 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
213 pm_runtime_allow(dev->dev);
214 pm_runtime_mark_last_busy(dev->dev);
215 pm_runtime_put_autosuspend(dev->dev);
216 }
217
218 out:
219 if (r) {
220 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
221 if (adev->rmmio && adev->runpm)
222 pm_runtime_put_noidle(dev->dev);
223 amdgpu_driver_unload_kms(dev);
224 }
225
226 return r;
227 }
228
229 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
230 struct drm_amdgpu_query_fw *query_fw,
231 struct amdgpu_device *adev)
232 {
233 switch (query_fw->fw_type) {
234 case AMDGPU_INFO_FW_VCE:
235 fw_info->ver = adev->vce.fw_version;
236 fw_info->feature = adev->vce.fb_version;
237 break;
238 case AMDGPU_INFO_FW_UVD:
239 fw_info->ver = adev->uvd.fw_version;
240 fw_info->feature = 0;
241 break;
242 case AMDGPU_INFO_FW_VCN:
243 fw_info->ver = adev->vcn.fw_version;
244 fw_info->feature = 0;
245 break;
246 case AMDGPU_INFO_FW_GMC:
247 fw_info->ver = adev->gmc.fw_version;
248 fw_info->feature = 0;
249 break;
250 case AMDGPU_INFO_FW_GFX_ME:
251 fw_info->ver = adev->gfx.me_fw_version;
252 fw_info->feature = adev->gfx.me_feature_version;
253 break;
254 case AMDGPU_INFO_FW_GFX_PFP:
255 fw_info->ver = adev->gfx.pfp_fw_version;
256 fw_info->feature = adev->gfx.pfp_feature_version;
257 break;
258 case AMDGPU_INFO_FW_GFX_CE:
259 fw_info->ver = adev->gfx.ce_fw_version;
260 fw_info->feature = adev->gfx.ce_feature_version;
261 break;
262 case AMDGPU_INFO_FW_GFX_RLC:
263 fw_info->ver = adev->gfx.rlc_fw_version;
264 fw_info->feature = adev->gfx.rlc_feature_version;
265 break;
266 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
267 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
268 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
269 break;
270 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
271 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
272 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
273 break;
274 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
275 fw_info->ver = adev->gfx.rlc_srls_fw_version;
276 fw_info->feature = adev->gfx.rlc_srls_feature_version;
277 break;
278 case AMDGPU_INFO_FW_GFX_MEC:
279 if (query_fw->index == 0) {
280 fw_info->ver = adev->gfx.mec_fw_version;
281 fw_info->feature = adev->gfx.mec_feature_version;
282 } else if (query_fw->index == 1) {
283 fw_info->ver = adev->gfx.mec2_fw_version;
284 fw_info->feature = adev->gfx.mec2_feature_version;
285 } else
286 return -EINVAL;
287 break;
288 case AMDGPU_INFO_FW_SMC:
289 fw_info->ver = adev->pm.fw_version;
290 fw_info->feature = 0;
291 break;
292 case AMDGPU_INFO_FW_TA:
293 if (query_fw->index > 1)
294 return -EINVAL;
295 if (query_fw->index == 0) {
296 fw_info->ver = adev->psp.ta_fw_version;
297 fw_info->feature = adev->psp.ta_xgmi_ucode_version;
298 } else {
299 fw_info->ver = adev->psp.ta_fw_version;
300 fw_info->feature = adev->psp.ta_ras_ucode_version;
301 }
302 break;
303 case AMDGPU_INFO_FW_SDMA:
304 if (query_fw->index >= adev->sdma.num_instances)
305 return -EINVAL;
306 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
307 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
308 break;
309 case AMDGPU_INFO_FW_SOS:
310 fw_info->ver = adev->psp.sos_fw_version;
311 fw_info->feature = adev->psp.sos_feature_version;
312 break;
313 case AMDGPU_INFO_FW_ASD:
314 fw_info->ver = adev->psp.asd_fw_version;
315 fw_info->feature = adev->psp.asd_feature_version;
316 break;
317 case AMDGPU_INFO_FW_DMCU:
318 fw_info->ver = adev->dm.dmcu_fw_version;
319 fw_info->feature = 0;
320 break;
321 case AMDGPU_INFO_FW_DMCUB:
322 fw_info->ver = adev->dm.dmcub_fw_version;
323 fw_info->feature = 0;
324 break;
325 default:
326 return -EINVAL;
327 }
328 return 0;
329 }
330
331 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
332 struct drm_amdgpu_info *info,
333 struct drm_amdgpu_info_hw_ip *result)
334 {
335 uint32_t ib_start_alignment = 0;
336 uint32_t ib_size_alignment = 0;
337 enum amd_ip_block_type type;
338 unsigned int num_rings = 0;
339 unsigned int i, j;
340
341 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
342 return -EINVAL;
343
344 switch (info->query_hw_ip.type) {
345 case AMDGPU_HW_IP_GFX:
346 type = AMD_IP_BLOCK_TYPE_GFX;
347 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
348 if (adev->gfx.gfx_ring[i].sched.ready)
349 ++num_rings;
350 ib_start_alignment = 32;
351 ib_size_alignment = 32;
352 break;
353 case AMDGPU_HW_IP_COMPUTE:
354 type = AMD_IP_BLOCK_TYPE_GFX;
355 for (i = 0; i < adev->gfx.num_compute_rings; i++)
356 if (adev->gfx.compute_ring[i].sched.ready)
357 ++num_rings;
358 ib_start_alignment = 32;
359 ib_size_alignment = 32;
360 break;
361 case AMDGPU_HW_IP_DMA:
362 type = AMD_IP_BLOCK_TYPE_SDMA;
363 for (i = 0; i < adev->sdma.num_instances; i++)
364 if (adev->sdma.instance[i].ring.sched.ready)
365 ++num_rings;
366 ib_start_alignment = 256;
367 ib_size_alignment = 4;
368 break;
369 case AMDGPU_HW_IP_UVD:
370 type = AMD_IP_BLOCK_TYPE_UVD;
371 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
372 if (adev->uvd.harvest_config & (1 << i))
373 continue;
374
375 if (adev->uvd.inst[i].ring.sched.ready)
376 ++num_rings;
377 }
378 ib_start_alignment = 64;
379 ib_size_alignment = 64;
380 break;
381 case AMDGPU_HW_IP_VCE:
382 type = AMD_IP_BLOCK_TYPE_VCE;
383 for (i = 0; i < adev->vce.num_rings; i++)
384 if (adev->vce.ring[i].sched.ready)
385 ++num_rings;
386 ib_start_alignment = 4;
387 ib_size_alignment = 1;
388 break;
389 case AMDGPU_HW_IP_UVD_ENC:
390 type = AMD_IP_BLOCK_TYPE_UVD;
391 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
392 if (adev->uvd.harvest_config & (1 << i))
393 continue;
394
395 for (j = 0; j < adev->uvd.num_enc_rings; j++)
396 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
397 ++num_rings;
398 }
399 ib_start_alignment = 64;
400 ib_size_alignment = 64;
401 break;
402 case AMDGPU_HW_IP_VCN_DEC:
403 type = AMD_IP_BLOCK_TYPE_VCN;
404 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
405 if (adev->uvd.harvest_config & (1 << i))
406 continue;
407
408 if (adev->vcn.inst[i].ring_dec.sched.ready)
409 ++num_rings;
410 }
411 ib_start_alignment = 16;
412 ib_size_alignment = 16;
413 break;
414 case AMDGPU_HW_IP_VCN_ENC:
415 type = AMD_IP_BLOCK_TYPE_VCN;
416 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
417 if (adev->uvd.harvest_config & (1 << i))
418 continue;
419
420 for (j = 0; j < adev->vcn.num_enc_rings; j++)
421 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
422 ++num_rings;
423 }
424 ib_start_alignment = 64;
425 ib_size_alignment = 1;
426 break;
427 case AMDGPU_HW_IP_VCN_JPEG:
428 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
429 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
430
431 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
432 if (adev->jpeg.harvest_config & (1 << i))
433 continue;
434
435 if (adev->jpeg.inst[i].ring_dec.sched.ready)
436 ++num_rings;
437 }
438 ib_start_alignment = 16;
439 ib_size_alignment = 16;
440 break;
441 default:
442 return -EINVAL;
443 }
444
445 for (i = 0; i < adev->num_ip_blocks; i++)
446 if (adev->ip_blocks[i].version->type == type &&
447 adev->ip_blocks[i].status.valid)
448 break;
449
450 if (i == adev->num_ip_blocks)
451 return 0;
452
453 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
454 num_rings);
455
456 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
457 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
458 result->capabilities_flags = 0;
459 result->available_rings = (1 << num_rings) - 1;
460 result->ib_start_alignment = ib_start_alignment;
461 result->ib_size_alignment = ib_size_alignment;
462 return 0;
463 }
464
465 /*
466 * Userspace get information ioctl
467 */
468 /**
469 * amdgpu_info_ioctl - answer a device specific request.
470 *
471 * @adev: amdgpu device pointer
472 * @data: request object
473 * @filp: drm filp
474 *
475 * This function is used to pass device specific parameters to the userspace
476 * drivers. Examples include: pci device id, pipeline parms, tiling params,
477 * etc. (all asics).
478 * Returns 0 on success, -EINVAL on failure.
479 */
480 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
481 {
482 struct amdgpu_device *adev = dev->dev_private;
483 struct drm_amdgpu_info *info = data;
484 struct amdgpu_mode_info *minfo = &adev->mode_info;
485 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
486 uint32_t size = info->return_size;
487 struct drm_crtc *crtc;
488 uint32_t ui32 = 0;
489 uint64_t ui64 = 0;
490 int i, found;
491 int ui32_size = sizeof(ui32);
492
493 if (!info->return_size || !info->return_pointer)
494 return -EINVAL;
495
496 switch (info->query) {
497 case AMDGPU_INFO_ACCEL_WORKING:
498 ui32 = adev->accel_working;
499 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
500 case AMDGPU_INFO_CRTC_FROM_ID:
501 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
502 crtc = (struct drm_crtc *)minfo->crtcs[i];
503 if (crtc && crtc->base.id == info->mode_crtc.id) {
504 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
505 ui32 = amdgpu_crtc->crtc_id;
506 found = 1;
507 break;
508 }
509 }
510 if (!found) {
511 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
512 return -EINVAL;
513 }
514 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
515 case AMDGPU_INFO_HW_IP_INFO: {
516 struct drm_amdgpu_info_hw_ip ip = {};
517 int ret;
518
519 ret = amdgpu_hw_ip_info(adev, info, &ip);
520 if (ret)
521 return ret;
522
523 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
524 return ret ? -EFAULT : 0;
525 }
526 case AMDGPU_INFO_HW_IP_COUNT: {
527 enum amd_ip_block_type type;
528 uint32_t count = 0;
529
530 switch (info->query_hw_ip.type) {
531 case AMDGPU_HW_IP_GFX:
532 type = AMD_IP_BLOCK_TYPE_GFX;
533 break;
534 case AMDGPU_HW_IP_COMPUTE:
535 type = AMD_IP_BLOCK_TYPE_GFX;
536 break;
537 case AMDGPU_HW_IP_DMA:
538 type = AMD_IP_BLOCK_TYPE_SDMA;
539 break;
540 case AMDGPU_HW_IP_UVD:
541 type = AMD_IP_BLOCK_TYPE_UVD;
542 break;
543 case AMDGPU_HW_IP_VCE:
544 type = AMD_IP_BLOCK_TYPE_VCE;
545 break;
546 case AMDGPU_HW_IP_UVD_ENC:
547 type = AMD_IP_BLOCK_TYPE_UVD;
548 break;
549 case AMDGPU_HW_IP_VCN_DEC:
550 case AMDGPU_HW_IP_VCN_ENC:
551 type = AMD_IP_BLOCK_TYPE_VCN;
552 break;
553 case AMDGPU_HW_IP_VCN_JPEG:
554 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
555 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
556 break;
557 default:
558 return -EINVAL;
559 }
560
561 for (i = 0; i < adev->num_ip_blocks; i++)
562 if (adev->ip_blocks[i].version->type == type &&
563 adev->ip_blocks[i].status.valid &&
564 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
565 count++;
566
567 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
568 }
569 case AMDGPU_INFO_TIMESTAMP:
570 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
571 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
572 case AMDGPU_INFO_FW_VERSION: {
573 struct drm_amdgpu_info_firmware fw_info;
574 int ret;
575
576 /* We only support one instance of each IP block right now. */
577 if (info->query_fw.ip_instance != 0)
578 return -EINVAL;
579
580 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
581 if (ret)
582 return ret;
583
584 return copy_to_user(out, &fw_info,
585 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
586 }
587 case AMDGPU_INFO_NUM_BYTES_MOVED:
588 ui64 = atomic64_read(&adev->num_bytes_moved);
589 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
590 case AMDGPU_INFO_NUM_EVICTIONS:
591 ui64 = atomic64_read(&adev->num_evictions);
592 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
593 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
594 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
595 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
596 case AMDGPU_INFO_VRAM_USAGE:
597 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
598 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
599 case AMDGPU_INFO_VIS_VRAM_USAGE:
600 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
601 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
602 case AMDGPU_INFO_GTT_USAGE:
603 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
604 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
605 case AMDGPU_INFO_GDS_CONFIG: {
606 struct drm_amdgpu_info_gds gds_info;
607
608 memset(&gds_info, 0, sizeof(gds_info));
609 gds_info.compute_partition_size = adev->gds.gds_size;
610 gds_info.gds_total_size = adev->gds.gds_size;
611 gds_info.gws_per_compute_partition = adev->gds.gws_size;
612 gds_info.oa_per_compute_partition = adev->gds.oa_size;
613 return copy_to_user(out, &gds_info,
614 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
615 }
616 case AMDGPU_INFO_VRAM_GTT: {
617 struct drm_amdgpu_info_vram_gtt vram_gtt;
618
619 vram_gtt.vram_size = adev->gmc.real_vram_size -
620 atomic64_read(&adev->vram_pin_size) -
621 AMDGPU_VM_RESERVED_VRAM;
622 vram_gtt.vram_cpu_accessible_size =
623 min(adev->gmc.visible_vram_size -
624 atomic64_read(&adev->visible_pin_size),
625 vram_gtt.vram_size);
626 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
627 vram_gtt.gtt_size *= PAGE_SIZE;
628 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
629 return copy_to_user(out, &vram_gtt,
630 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
631 }
632 case AMDGPU_INFO_MEMORY: {
633 struct drm_amdgpu_memory_info mem;
634
635 memset(&mem, 0, sizeof(mem));
636 mem.vram.total_heap_size = adev->gmc.real_vram_size;
637 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
638 atomic64_read(&adev->vram_pin_size) -
639 AMDGPU_VM_RESERVED_VRAM;
640 mem.vram.heap_usage =
641 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
642 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
643
644 mem.cpu_accessible_vram.total_heap_size =
645 adev->gmc.visible_vram_size;
646 mem.cpu_accessible_vram.usable_heap_size =
647 min(adev->gmc.visible_vram_size -
648 atomic64_read(&adev->visible_pin_size),
649 mem.vram.usable_heap_size);
650 mem.cpu_accessible_vram.heap_usage =
651 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
652 mem.cpu_accessible_vram.max_allocation =
653 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
654
655 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
656 mem.gtt.total_heap_size *= PAGE_SIZE;
657 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
658 atomic64_read(&adev->gart_pin_size);
659 mem.gtt.heap_usage =
660 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
661 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
662
663 return copy_to_user(out, &mem,
664 min((size_t)size, sizeof(mem)))
665 ? -EFAULT : 0;
666 }
667 case AMDGPU_INFO_READ_MMR_REG: {
668 unsigned n, alloc_size;
669 uint32_t *regs;
670 unsigned se_num = (info->read_mmr_reg.instance >>
671 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
672 AMDGPU_INFO_MMR_SE_INDEX_MASK;
673 unsigned sh_num = (info->read_mmr_reg.instance >>
674 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
675 AMDGPU_INFO_MMR_SH_INDEX_MASK;
676
677 /* set full masks if the userspace set all bits
678 * in the bitfields */
679 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
680 se_num = 0xffffffff;
681 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
682 sh_num = 0xffffffff;
683
684 if (info->read_mmr_reg.count > 128)
685 return -EINVAL;
686
687 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
688 if (!regs)
689 return -ENOMEM;
690 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
691
692 amdgpu_gfx_off_ctrl(adev, false);
693 for (i = 0; i < info->read_mmr_reg.count; i++) {
694 if (amdgpu_asic_read_register(adev, se_num, sh_num,
695 info->read_mmr_reg.dword_offset + i,
696 &regs[i])) {
697 DRM_DEBUG_KMS("unallowed offset %#x\n",
698 info->read_mmr_reg.dword_offset + i);
699 kfree(regs);
700 amdgpu_gfx_off_ctrl(adev, true);
701 return -EFAULT;
702 }
703 }
704 amdgpu_gfx_off_ctrl(adev, true);
705 n = copy_to_user(out, regs, min(size, alloc_size));
706 kfree(regs);
707 return n ? -EFAULT : 0;
708 }
709 case AMDGPU_INFO_DEV_INFO: {
710 struct drm_amdgpu_info_device dev_info = {};
711 uint64_t vm_size;
712
713 dev_info.device_id = dev->pdev->device;
714 dev_info.chip_rev = adev->rev_id;
715 dev_info.external_rev = adev->external_rev_id;
716 dev_info.pci_rev = dev->pdev->revision;
717 dev_info.family = adev->family;
718 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
719 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
720 /* return all clocks in KHz */
721 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
722 if (adev->pm.dpm_enabled) {
723 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
724 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
725 } else {
726 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
727 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
728 }
729 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
730 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
731 adev->gfx.config.max_shader_engines;
732 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
733 dev_info._pad = 0;
734 dev_info.ids_flags = 0;
735 if (adev->flags & AMD_IS_APU)
736 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
737 if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
738 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
739
740 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
741 vm_size -= AMDGPU_VA_RESERVED_SIZE;
742
743 /* Older VCE FW versions are buggy and can handle only 40bits */
744 if (adev->vce.fw_version &&
745 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
746 vm_size = min(vm_size, 1ULL << 40);
747
748 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
749 dev_info.virtual_address_max =
750 min(vm_size, AMDGPU_GMC_HOLE_START);
751
752 if (vm_size > AMDGPU_GMC_HOLE_START) {
753 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
754 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
755 }
756 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
757 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
758 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
759 dev_info.cu_active_number = adev->gfx.cu_info.number;
760 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
761 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
762 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
763 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
764 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
765 sizeof(adev->gfx.cu_info.bitmap));
766 dev_info.vram_type = adev->gmc.vram_type;
767 dev_info.vram_bit_width = adev->gmc.vram_width;
768 dev_info.vce_harvest_config = adev->vce.harvest_config;
769 dev_info.gc_double_offchip_lds_buf =
770 adev->gfx.config.double_offchip_lds_buf;
771 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
772 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
773 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
774 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
775 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
776 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
777 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
778
779 if (adev->family >= AMDGPU_FAMILY_NV)
780 dev_info.pa_sc_tile_steering_override =
781 adev->gfx.config.pa_sc_tile_steering_override;
782
783 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
784
785 return copy_to_user(out, &dev_info,
786 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
787 }
788 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
789 unsigned i;
790 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
791 struct amd_vce_state *vce_state;
792
793 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
794 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
795 if (vce_state) {
796 vce_clk_table.entries[i].sclk = vce_state->sclk;
797 vce_clk_table.entries[i].mclk = vce_state->mclk;
798 vce_clk_table.entries[i].eclk = vce_state->evclk;
799 vce_clk_table.num_valid_entries++;
800 }
801 }
802
803 return copy_to_user(out, &vce_clk_table,
804 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
805 }
806 case AMDGPU_INFO_VBIOS: {
807 uint32_t bios_size = adev->bios_size;
808
809 switch (info->vbios_info.type) {
810 case AMDGPU_INFO_VBIOS_SIZE:
811 return copy_to_user(out, &bios_size,
812 min((size_t)size, sizeof(bios_size)))
813 ? -EFAULT : 0;
814 case AMDGPU_INFO_VBIOS_IMAGE: {
815 uint8_t *bios;
816 uint32_t bios_offset = info->vbios_info.offset;
817
818 if (bios_offset >= bios_size)
819 return -EINVAL;
820
821 bios = adev->bios + bios_offset;
822 return copy_to_user(out, bios,
823 min((size_t)size, (size_t)(bios_size - bios_offset)))
824 ? -EFAULT : 0;
825 }
826 default:
827 DRM_DEBUG_KMS("Invalid request %d\n",
828 info->vbios_info.type);
829 return -EINVAL;
830 }
831 }
832 case AMDGPU_INFO_NUM_HANDLES: {
833 struct drm_amdgpu_info_num_handles handle;
834
835 switch (info->query_hw_ip.type) {
836 case AMDGPU_HW_IP_UVD:
837 /* Starting Polaris, we support unlimited UVD handles */
838 if (adev->asic_type < CHIP_POLARIS10) {
839 handle.uvd_max_handles = adev->uvd.max_handles;
840 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
841
842 return copy_to_user(out, &handle,
843 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
844 } else {
845 return -ENODATA;
846 }
847
848 break;
849 default:
850 return -EINVAL;
851 }
852 }
853 case AMDGPU_INFO_SENSOR: {
854 if (!adev->pm.dpm_enabled)
855 return -ENOENT;
856
857 switch (info->sensor_info.type) {
858 case AMDGPU_INFO_SENSOR_GFX_SCLK:
859 /* get sclk in Mhz */
860 if (amdgpu_dpm_read_sensor(adev,
861 AMDGPU_PP_SENSOR_GFX_SCLK,
862 (void *)&ui32, &ui32_size)) {
863 return -EINVAL;
864 }
865 ui32 /= 100;
866 break;
867 case AMDGPU_INFO_SENSOR_GFX_MCLK:
868 /* get mclk in Mhz */
869 if (amdgpu_dpm_read_sensor(adev,
870 AMDGPU_PP_SENSOR_GFX_MCLK,
871 (void *)&ui32, &ui32_size)) {
872 return -EINVAL;
873 }
874 ui32 /= 100;
875 break;
876 case AMDGPU_INFO_SENSOR_GPU_TEMP:
877 /* get temperature in millidegrees C */
878 if (amdgpu_dpm_read_sensor(adev,
879 AMDGPU_PP_SENSOR_GPU_TEMP,
880 (void *)&ui32, &ui32_size)) {
881 return -EINVAL;
882 }
883 break;
884 case AMDGPU_INFO_SENSOR_GPU_LOAD:
885 /* get GPU load */
886 if (amdgpu_dpm_read_sensor(adev,
887 AMDGPU_PP_SENSOR_GPU_LOAD,
888 (void *)&ui32, &ui32_size)) {
889 return -EINVAL;
890 }
891 break;
892 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
893 /* get average GPU power */
894 if (amdgpu_dpm_read_sensor(adev,
895 AMDGPU_PP_SENSOR_GPU_POWER,
896 (void *)&ui32, &ui32_size)) {
897 return -EINVAL;
898 }
899 ui32 >>= 8;
900 break;
901 case AMDGPU_INFO_SENSOR_VDDNB:
902 /* get VDDNB in millivolts */
903 if (amdgpu_dpm_read_sensor(adev,
904 AMDGPU_PP_SENSOR_VDDNB,
905 (void *)&ui32, &ui32_size)) {
906 return -EINVAL;
907 }
908 break;
909 case AMDGPU_INFO_SENSOR_VDDGFX:
910 /* get VDDGFX in millivolts */
911 if (amdgpu_dpm_read_sensor(adev,
912 AMDGPU_PP_SENSOR_VDDGFX,
913 (void *)&ui32, &ui32_size)) {
914 return -EINVAL;
915 }
916 break;
917 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
918 /* get stable pstate sclk in Mhz */
919 if (amdgpu_dpm_read_sensor(adev,
920 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
921 (void *)&ui32, &ui32_size)) {
922 return -EINVAL;
923 }
924 ui32 /= 100;
925 break;
926 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
927 /* get stable pstate mclk in Mhz */
928 if (amdgpu_dpm_read_sensor(adev,
929 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
930 (void *)&ui32, &ui32_size)) {
931 return -EINVAL;
932 }
933 ui32 /= 100;
934 break;
935 default:
936 DRM_DEBUG_KMS("Invalid request %d\n",
937 info->sensor_info.type);
938 return -EINVAL;
939 }
940 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
941 }
942 case AMDGPU_INFO_VRAM_LOST_COUNTER:
943 ui32 = atomic_read(&adev->vram_lost_counter);
944 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
945 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
946 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
947 uint64_t ras_mask;
948
949 if (!ras)
950 return -EINVAL;
951 ras_mask = (uint64_t)ras->supported << 32 | ras->features;
952
953 return copy_to_user(out, &ras_mask,
954 min_t(u64, size, sizeof(ras_mask))) ?
955 -EFAULT : 0;
956 }
957 default:
958 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
959 return -EINVAL;
960 }
961 return 0;
962 }
963
964
965 /*
966 * Outdated mess for old drm with Xorg being in charge (void function now).
967 */
968 /**
969 * amdgpu_driver_lastclose_kms - drm callback for last close
970 *
971 * @dev: drm dev pointer
972 *
973 * Switch vga_switcheroo state after last close (all asics).
974 */
975 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
976 {
977 drm_fb_helper_lastclose(dev);
978 vga_switcheroo_process_delayed_switch();
979 }
980
981 /**
982 * amdgpu_driver_open_kms - drm callback for open
983 *
984 * @dev: drm dev pointer
985 * @file_priv: drm file
986 *
987 * On device open, init vm on cayman+ (all asics).
988 * Returns 0 on success, error on failure.
989 */
990 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
991 {
992 struct amdgpu_device *adev = dev->dev_private;
993 struct amdgpu_fpriv *fpriv;
994 int r, pasid;
995
996 /* Ensure IB tests are run on ring */
997 flush_delayed_work(&adev->delayed_init_work);
998
999
1000 if (amdgpu_ras_intr_triggered()) {
1001 DRM_ERROR("RAS Intr triggered, device disabled!!");
1002 return -EHWPOISON;
1003 }
1004
1005 file_priv->driver_priv = NULL;
1006
1007 r = pm_runtime_get_sync(dev->dev);
1008 if (r < 0)
1009 goto pm_put;
1010
1011 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1012 if (unlikely(!fpriv)) {
1013 r = -ENOMEM;
1014 goto out_suspend;
1015 }
1016
1017 pasid = amdgpu_pasid_alloc(16);
1018 if (pasid < 0) {
1019 dev_warn(adev->dev, "No more PASIDs available!");
1020 pasid = 0;
1021 }
1022 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
1023 if (r)
1024 goto error_pasid;
1025
1026 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1027 if (!fpriv->prt_va) {
1028 r = -ENOMEM;
1029 goto error_vm;
1030 }
1031
1032 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1033 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1034
1035 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1036 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1037 if (r)
1038 goto error_vm;
1039 }
1040
1041 mutex_init(&fpriv->bo_list_lock);
1042 idr_init(&fpriv->bo_list_handles);
1043
1044 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1045
1046 file_priv->driver_priv = fpriv;
1047 goto out_suspend;
1048
1049 error_vm:
1050 amdgpu_vm_fini(adev, &fpriv->vm);
1051
1052 error_pasid:
1053 if (pasid)
1054 amdgpu_pasid_free(pasid);
1055
1056 kfree(fpriv);
1057
1058 out_suspend:
1059 pm_runtime_mark_last_busy(dev->dev);
1060 pm_put:
1061 pm_runtime_put_autosuspend(dev->dev);
1062
1063 return r;
1064 }
1065
1066 /**
1067 * amdgpu_driver_postclose_kms - drm callback for post close
1068 *
1069 * @dev: drm dev pointer
1070 * @file_priv: drm file
1071 *
1072 * On device post close, tear down vm on cayman+ (all asics).
1073 */
1074 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1075 struct drm_file *file_priv)
1076 {
1077 struct amdgpu_device *adev = dev->dev_private;
1078 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1079 struct amdgpu_bo_list *list;
1080 struct amdgpu_bo *pd;
1081 unsigned int pasid;
1082 int handle;
1083
1084 if (!fpriv)
1085 return;
1086
1087 pm_runtime_get_sync(dev->dev);
1088
1089 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1090 amdgpu_uvd_free_handles(adev, file_priv);
1091 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1092 amdgpu_vce_free_handles(adev, file_priv);
1093
1094 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1095
1096 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1097 /* TODO: how to handle reserve failure */
1098 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1099 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1100 fpriv->csa_va = NULL;
1101 amdgpu_bo_unreserve(adev->virt.csa_obj);
1102 }
1103
1104 pasid = fpriv->vm.pasid;
1105 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1106
1107 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1108 amdgpu_vm_fini(adev, &fpriv->vm);
1109
1110 if (pasid)
1111 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1112 amdgpu_bo_unref(&pd);
1113
1114 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1115 amdgpu_bo_list_put(list);
1116
1117 idr_destroy(&fpriv->bo_list_handles);
1118 mutex_destroy(&fpriv->bo_list_lock);
1119
1120 kfree(fpriv);
1121 file_priv->driver_priv = NULL;
1122
1123 pm_runtime_mark_last_busy(dev->dev);
1124 pm_runtime_put_autosuspend(dev->dev);
1125 }
1126
1127 /*
1128 * VBlank related functions.
1129 */
1130 /**
1131 * amdgpu_get_vblank_counter_kms - get frame count
1132 *
1133 * @crtc: crtc to get the frame count from
1134 *
1135 * Gets the frame count on the requested crtc (all asics).
1136 * Returns frame count on success, -EINVAL on failure.
1137 */
1138 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
1139 {
1140 struct drm_device *dev = crtc->dev;
1141 unsigned int pipe = crtc->index;
1142 struct amdgpu_device *adev = dev->dev_private;
1143 int vpos, hpos, stat;
1144 u32 count;
1145
1146 if (pipe >= adev->mode_info.num_crtc) {
1147 DRM_ERROR("Invalid crtc %u\n", pipe);
1148 return -EINVAL;
1149 }
1150
1151 /* The hw increments its frame counter at start of vsync, not at start
1152 * of vblank, as is required by DRM core vblank counter handling.
1153 * Cook the hw count here to make it appear to the caller as if it
1154 * incremented at start of vblank. We measure distance to start of
1155 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1156 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1157 * result by 1 to give the proper appearance to caller.
1158 */
1159 if (adev->mode_info.crtcs[pipe]) {
1160 /* Repeat readout if needed to provide stable result if
1161 * we cross start of vsync during the queries.
1162 */
1163 do {
1164 count = amdgpu_display_vblank_get_counter(adev, pipe);
1165 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1166 * vpos as distance to start of vblank, instead of
1167 * regular vertical scanout pos.
1168 */
1169 stat = amdgpu_display_get_crtc_scanoutpos(
1170 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1171 &vpos, &hpos, NULL, NULL,
1172 &adev->mode_info.crtcs[pipe]->base.hwmode);
1173 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1174
1175 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1176 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1177 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1178 } else {
1179 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1180 pipe, vpos);
1181
1182 /* Bump counter if we are at >= leading edge of vblank,
1183 * but before vsync where vpos would turn negative and
1184 * the hw counter really increments.
1185 */
1186 if (vpos >= 0)
1187 count++;
1188 }
1189 } else {
1190 /* Fallback to use value as is. */
1191 count = amdgpu_display_vblank_get_counter(adev, pipe);
1192 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1193 }
1194
1195 return count;
1196 }
1197
1198 /**
1199 * amdgpu_enable_vblank_kms - enable vblank interrupt
1200 *
1201 * @crtc: crtc to enable vblank interrupt for
1202 *
1203 * Enable the interrupt on the requested crtc (all asics).
1204 * Returns 0 on success, -EINVAL on failure.
1205 */
1206 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
1207 {
1208 struct drm_device *dev = crtc->dev;
1209 unsigned int pipe = crtc->index;
1210 struct amdgpu_device *adev = dev->dev_private;
1211 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1212
1213 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1214 }
1215
1216 /**
1217 * amdgpu_disable_vblank_kms - disable vblank interrupt
1218 *
1219 * @crtc: crtc to disable vblank interrupt for
1220 *
1221 * Disable the interrupt on the requested crtc (all asics).
1222 */
1223 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
1224 {
1225 struct drm_device *dev = crtc->dev;
1226 unsigned int pipe = crtc->index;
1227 struct amdgpu_device *adev = dev->dev_private;
1228 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1229
1230 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1231 }
1232
1233 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1234 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1235 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1236 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1237 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1238 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1239 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1240 /* KMS */
1241 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1242 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1243 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1244 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1245 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1246 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1247 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1248 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1249 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1250 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1251 };
1252 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1253
1254 /*
1255 * Debugfs info
1256 */
1257 #if defined(CONFIG_DEBUG_FS)
1258
1259 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1260 {
1261 struct drm_info_node *node = (struct drm_info_node *) m->private;
1262 struct drm_device *dev = node->minor->dev;
1263 struct amdgpu_device *adev = dev->dev_private;
1264 struct drm_amdgpu_info_firmware fw_info;
1265 struct drm_amdgpu_query_fw query_fw;
1266 struct atom_context *ctx = adev->mode_info.atom_context;
1267 int ret, i;
1268
1269 /* VCE */
1270 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1271 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1272 if (ret)
1273 return ret;
1274 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1275 fw_info.feature, fw_info.ver);
1276
1277 /* UVD */
1278 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1279 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1280 if (ret)
1281 return ret;
1282 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1283 fw_info.feature, fw_info.ver);
1284
1285 /* GMC */
1286 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1287 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1288 if (ret)
1289 return ret;
1290 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1291 fw_info.feature, fw_info.ver);
1292
1293 /* ME */
1294 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1295 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1296 if (ret)
1297 return ret;
1298 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1299 fw_info.feature, fw_info.ver);
1300
1301 /* PFP */
1302 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1303 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1304 if (ret)
1305 return ret;
1306 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1307 fw_info.feature, fw_info.ver);
1308
1309 /* CE */
1310 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1311 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1312 if (ret)
1313 return ret;
1314 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1315 fw_info.feature, fw_info.ver);
1316
1317 /* RLC */
1318 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1319 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1320 if (ret)
1321 return ret;
1322 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1323 fw_info.feature, fw_info.ver);
1324
1325 /* RLC SAVE RESTORE LIST CNTL */
1326 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1327 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1328 if (ret)
1329 return ret;
1330 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1331 fw_info.feature, fw_info.ver);
1332
1333 /* RLC SAVE RESTORE LIST GPM MEM */
1334 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1335 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1336 if (ret)
1337 return ret;
1338 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1339 fw_info.feature, fw_info.ver);
1340
1341 /* RLC SAVE RESTORE LIST SRM MEM */
1342 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1343 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1344 if (ret)
1345 return ret;
1346 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1347 fw_info.feature, fw_info.ver);
1348
1349 /* MEC */
1350 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1351 query_fw.index = 0;
1352 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1353 if (ret)
1354 return ret;
1355 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1356 fw_info.feature, fw_info.ver);
1357
1358 /* MEC2 */
1359 if (adev->gfx.mec2_fw) {
1360 query_fw.index = 1;
1361 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1362 if (ret)
1363 return ret;
1364 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1365 fw_info.feature, fw_info.ver);
1366 }
1367
1368 /* PSP SOS */
1369 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1370 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1371 if (ret)
1372 return ret;
1373 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1374 fw_info.feature, fw_info.ver);
1375
1376
1377 /* PSP ASD */
1378 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1379 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1380 if (ret)
1381 return ret;
1382 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1383 fw_info.feature, fw_info.ver);
1384
1385 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1386 for (i = 0; i < 2; i++) {
1387 query_fw.index = i;
1388 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1389 if (ret)
1390 continue;
1391 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1392 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1393 }
1394
1395 /* SMC */
1396 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1397 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1398 if (ret)
1399 return ret;
1400 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1401 fw_info.feature, fw_info.ver);
1402
1403 /* SDMA */
1404 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1405 for (i = 0; i < adev->sdma.num_instances; i++) {
1406 query_fw.index = i;
1407 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1408 if (ret)
1409 return ret;
1410 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1411 i, fw_info.feature, fw_info.ver);
1412 }
1413
1414 /* VCN */
1415 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1416 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1417 if (ret)
1418 return ret;
1419 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1420 fw_info.feature, fw_info.ver);
1421
1422 /* DMCU */
1423 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1424 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1425 if (ret)
1426 return ret;
1427 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1428 fw_info.feature, fw_info.ver);
1429
1430 /* DMCUB */
1431 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1432 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1433 if (ret)
1434 return ret;
1435 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1436 fw_info.feature, fw_info.ver);
1437
1438
1439 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1440
1441 return 0;
1442 }
1443
1444 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1445 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1446 };
1447 #endif
1448
1449 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1450 {
1451 #if defined(CONFIG_DEBUG_FS)
1452 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1453 ARRAY_SIZE(amdgpu_firmware_info_list));
1454 #else
1455 return 0;
1456 #endif
1457 }