2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/drm_debugfs.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_sched.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device
*adev
)
49 struct amdgpu_gpu_instance
*gpu_instance
;
52 mutex_lock(&mgpu_info
.mutex
);
54 for (i
= 0; i
< mgpu_info
.num_gpu
; i
++) {
55 gpu_instance
= &(mgpu_info
.gpu_ins
[i
]);
56 if (gpu_instance
->adev
== adev
) {
57 mgpu_info
.gpu_ins
[i
] =
58 mgpu_info
.gpu_ins
[mgpu_info
.num_gpu
- 1];
60 if (adev
->flags
& AMD_IS_APU
)
68 mutex_unlock(&mgpu_info
.mutex
);
72 * amdgpu_driver_unload_kms - Main unload function for KMS.
74 * @dev: drm dev pointer
76 * This is the main unload function for KMS (all asics).
77 * Returns 0 on success.
79 void amdgpu_driver_unload_kms(struct drm_device
*dev
)
81 struct amdgpu_device
*adev
= dev
->dev_private
;
86 amdgpu_unregister_gpu_instance(adev
);
88 if (adev
->rmmio
== NULL
)
92 pm_runtime_get_sync(dev
->dev
);
93 pm_runtime_forbid(dev
->dev
);
96 amdgpu_acpi_fini(adev
);
98 amdgpu_device_fini(adev
);
102 dev
->dev_private
= NULL
;
105 void amdgpu_register_gpu_instance(struct amdgpu_device
*adev
)
107 struct amdgpu_gpu_instance
*gpu_instance
;
109 mutex_lock(&mgpu_info
.mutex
);
111 if (mgpu_info
.num_gpu
>= MAX_GPU_INSTANCE
) {
112 DRM_ERROR("Cannot register more gpu instance\n");
113 mutex_unlock(&mgpu_info
.mutex
);
117 gpu_instance
= &(mgpu_info
.gpu_ins
[mgpu_info
.num_gpu
]);
118 gpu_instance
->adev
= adev
;
119 gpu_instance
->mgpu_fan_enabled
= 0;
122 if (adev
->flags
& AMD_IS_APU
)
125 mgpu_info
.num_dgpu
++;
127 mutex_unlock(&mgpu_info
.mutex
);
131 * amdgpu_driver_load_kms - Main load function for KMS.
133 * @dev: drm dev pointer
134 * @flags: device flags
136 * This is the main load function for KMS (all asics).
137 * Returns 0 on success, error on failure.
139 int amdgpu_driver_load_kms(struct drm_device
*dev
, unsigned long flags
)
141 struct amdgpu_device
*adev
;
144 adev
= kzalloc(sizeof(struct amdgpu_device
), GFP_KERNEL
);
148 dev
->dev_private
= (void *)adev
;
150 if (amdgpu_has_atpx() &&
151 (amdgpu_is_atpx_hybrid() ||
152 amdgpu_has_atpx_dgpu_power_cntl()) &&
153 ((flags
& AMD_IS_APU
) == 0) &&
154 !pci_is_thunderbolt_attached(dev
->pdev
))
157 /* amdgpu_device_init should report only fatal error
158 * like memory allocation failure or iomapping failure,
159 * or memory manager initialization failure, it must
160 * properly initialize the GPU MC controller and permit
163 r
= amdgpu_device_init(adev
, dev
, dev
->pdev
, flags
);
165 dev_err(&dev
->pdev
->dev
, "Fatal error during GPU init\n");
169 if (amdgpu_device_supports_boco(dev
) &&
170 (amdgpu_runtime_pm
!= 0)) { /* enable runpm by default for boco */
172 } else if (amdgpu_device_supports_baco(dev
) &&
173 (amdgpu_runtime_pm
!= 0)) {
174 switch (adev
->asic_type
) {
175 #ifdef CONFIG_DRM_AMDGPU_CIK
181 case CHIP_SIENNA_CICHLID
:
182 /* enable runpm if runpm=1 */
183 if (amdgpu_runtime_pm
> 0)
187 /* turn runpm on if noretry=0 */
192 /* enable runpm on VI+ */
198 /* Call ACPI methods: require modeset init
199 * but failure is not fatal
202 acpi_status
= amdgpu_acpi_init(adev
);
204 dev_dbg(&dev
->pdev
->dev
, "Error during ACPI methods call\n");
207 /* only need to skip on ATPX */
208 if (amdgpu_device_supports_boco(dev
) &&
209 !amdgpu_is_atpx_hybrid())
210 dev_pm_set_driver_flags(dev
->dev
, DPM_FLAG_NO_DIRECT_COMPLETE
);
211 pm_runtime_use_autosuspend(dev
->dev
);
212 pm_runtime_set_autosuspend_delay(dev
->dev
, 5000);
213 pm_runtime_allow(dev
->dev
);
214 pm_runtime_mark_last_busy(dev
->dev
);
215 pm_runtime_put_autosuspend(dev
->dev
);
220 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
221 if (adev
->rmmio
&& adev
->runpm
)
222 pm_runtime_put_noidle(dev
->dev
);
223 amdgpu_driver_unload_kms(dev
);
229 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware
*fw_info
,
230 struct drm_amdgpu_query_fw
*query_fw
,
231 struct amdgpu_device
*adev
)
233 switch (query_fw
->fw_type
) {
234 case AMDGPU_INFO_FW_VCE
:
235 fw_info
->ver
= adev
->vce
.fw_version
;
236 fw_info
->feature
= adev
->vce
.fb_version
;
238 case AMDGPU_INFO_FW_UVD
:
239 fw_info
->ver
= adev
->uvd
.fw_version
;
240 fw_info
->feature
= 0;
242 case AMDGPU_INFO_FW_VCN
:
243 fw_info
->ver
= adev
->vcn
.fw_version
;
244 fw_info
->feature
= 0;
246 case AMDGPU_INFO_FW_GMC
:
247 fw_info
->ver
= adev
->gmc
.fw_version
;
248 fw_info
->feature
= 0;
250 case AMDGPU_INFO_FW_GFX_ME
:
251 fw_info
->ver
= adev
->gfx
.me_fw_version
;
252 fw_info
->feature
= adev
->gfx
.me_feature_version
;
254 case AMDGPU_INFO_FW_GFX_PFP
:
255 fw_info
->ver
= adev
->gfx
.pfp_fw_version
;
256 fw_info
->feature
= adev
->gfx
.pfp_feature_version
;
258 case AMDGPU_INFO_FW_GFX_CE
:
259 fw_info
->ver
= adev
->gfx
.ce_fw_version
;
260 fw_info
->feature
= adev
->gfx
.ce_feature_version
;
262 case AMDGPU_INFO_FW_GFX_RLC
:
263 fw_info
->ver
= adev
->gfx
.rlc_fw_version
;
264 fw_info
->feature
= adev
->gfx
.rlc_feature_version
;
266 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL
:
267 fw_info
->ver
= adev
->gfx
.rlc_srlc_fw_version
;
268 fw_info
->feature
= adev
->gfx
.rlc_srlc_feature_version
;
270 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM
:
271 fw_info
->ver
= adev
->gfx
.rlc_srlg_fw_version
;
272 fw_info
->feature
= adev
->gfx
.rlc_srlg_feature_version
;
274 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM
:
275 fw_info
->ver
= adev
->gfx
.rlc_srls_fw_version
;
276 fw_info
->feature
= adev
->gfx
.rlc_srls_feature_version
;
278 case AMDGPU_INFO_FW_GFX_MEC
:
279 if (query_fw
->index
== 0) {
280 fw_info
->ver
= adev
->gfx
.mec_fw_version
;
281 fw_info
->feature
= adev
->gfx
.mec_feature_version
;
282 } else if (query_fw
->index
== 1) {
283 fw_info
->ver
= adev
->gfx
.mec2_fw_version
;
284 fw_info
->feature
= adev
->gfx
.mec2_feature_version
;
288 case AMDGPU_INFO_FW_SMC
:
289 fw_info
->ver
= adev
->pm
.fw_version
;
290 fw_info
->feature
= 0;
292 case AMDGPU_INFO_FW_TA
:
293 if (query_fw
->index
> 1)
295 if (query_fw
->index
== 0) {
296 fw_info
->ver
= adev
->psp
.ta_fw_version
;
297 fw_info
->feature
= adev
->psp
.ta_xgmi_ucode_version
;
299 fw_info
->ver
= adev
->psp
.ta_fw_version
;
300 fw_info
->feature
= adev
->psp
.ta_ras_ucode_version
;
303 case AMDGPU_INFO_FW_SDMA
:
304 if (query_fw
->index
>= adev
->sdma
.num_instances
)
306 fw_info
->ver
= adev
->sdma
.instance
[query_fw
->index
].fw_version
;
307 fw_info
->feature
= adev
->sdma
.instance
[query_fw
->index
].feature_version
;
309 case AMDGPU_INFO_FW_SOS
:
310 fw_info
->ver
= adev
->psp
.sos_fw_version
;
311 fw_info
->feature
= adev
->psp
.sos_feature_version
;
313 case AMDGPU_INFO_FW_ASD
:
314 fw_info
->ver
= adev
->psp
.asd_fw_version
;
315 fw_info
->feature
= adev
->psp
.asd_feature_version
;
317 case AMDGPU_INFO_FW_DMCU
:
318 fw_info
->ver
= adev
->dm
.dmcu_fw_version
;
319 fw_info
->feature
= 0;
321 case AMDGPU_INFO_FW_DMCUB
:
322 fw_info
->ver
= adev
->dm
.dmcub_fw_version
;
323 fw_info
->feature
= 0;
331 static int amdgpu_hw_ip_info(struct amdgpu_device
*adev
,
332 struct drm_amdgpu_info
*info
,
333 struct drm_amdgpu_info_hw_ip
*result
)
335 uint32_t ib_start_alignment
= 0;
336 uint32_t ib_size_alignment
= 0;
337 enum amd_ip_block_type type
;
338 unsigned int num_rings
= 0;
341 if (info
->query_hw_ip
.ip_instance
>= AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
344 switch (info
->query_hw_ip
.type
) {
345 case AMDGPU_HW_IP_GFX
:
346 type
= AMD_IP_BLOCK_TYPE_GFX
;
347 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
348 if (adev
->gfx
.gfx_ring
[i
].sched
.ready
)
350 ib_start_alignment
= 32;
351 ib_size_alignment
= 32;
353 case AMDGPU_HW_IP_COMPUTE
:
354 type
= AMD_IP_BLOCK_TYPE_GFX
;
355 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
356 if (adev
->gfx
.compute_ring
[i
].sched
.ready
)
358 ib_start_alignment
= 32;
359 ib_size_alignment
= 32;
361 case AMDGPU_HW_IP_DMA
:
362 type
= AMD_IP_BLOCK_TYPE_SDMA
;
363 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
364 if (adev
->sdma
.instance
[i
].ring
.sched
.ready
)
366 ib_start_alignment
= 256;
367 ib_size_alignment
= 4;
369 case AMDGPU_HW_IP_UVD
:
370 type
= AMD_IP_BLOCK_TYPE_UVD
;
371 for (i
= 0; i
< adev
->uvd
.num_uvd_inst
; i
++) {
372 if (adev
->uvd
.harvest_config
& (1 << i
))
375 if (adev
->uvd
.inst
[i
].ring
.sched
.ready
)
378 ib_start_alignment
= 64;
379 ib_size_alignment
= 64;
381 case AMDGPU_HW_IP_VCE
:
382 type
= AMD_IP_BLOCK_TYPE_VCE
;
383 for (i
= 0; i
< adev
->vce
.num_rings
; i
++)
384 if (adev
->vce
.ring
[i
].sched
.ready
)
386 ib_start_alignment
= 4;
387 ib_size_alignment
= 1;
389 case AMDGPU_HW_IP_UVD_ENC
:
390 type
= AMD_IP_BLOCK_TYPE_UVD
;
391 for (i
= 0; i
< adev
->uvd
.num_uvd_inst
; i
++) {
392 if (adev
->uvd
.harvest_config
& (1 << i
))
395 for (j
= 0; j
< adev
->uvd
.num_enc_rings
; j
++)
396 if (adev
->uvd
.inst
[i
].ring_enc
[j
].sched
.ready
)
399 ib_start_alignment
= 64;
400 ib_size_alignment
= 64;
402 case AMDGPU_HW_IP_VCN_DEC
:
403 type
= AMD_IP_BLOCK_TYPE_VCN
;
404 for (i
= 0; i
< adev
->vcn
.num_vcn_inst
; i
++) {
405 if (adev
->uvd
.harvest_config
& (1 << i
))
408 if (adev
->vcn
.inst
[i
].ring_dec
.sched
.ready
)
411 ib_start_alignment
= 16;
412 ib_size_alignment
= 16;
414 case AMDGPU_HW_IP_VCN_ENC
:
415 type
= AMD_IP_BLOCK_TYPE_VCN
;
416 for (i
= 0; i
< adev
->vcn
.num_vcn_inst
; i
++) {
417 if (adev
->uvd
.harvest_config
& (1 << i
))
420 for (j
= 0; j
< adev
->vcn
.num_enc_rings
; j
++)
421 if (adev
->vcn
.inst
[i
].ring_enc
[j
].sched
.ready
)
424 ib_start_alignment
= 64;
425 ib_size_alignment
= 1;
427 case AMDGPU_HW_IP_VCN_JPEG
:
428 type
= (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_JPEG
)) ?
429 AMD_IP_BLOCK_TYPE_JPEG
: AMD_IP_BLOCK_TYPE_VCN
;
431 for (i
= 0; i
< adev
->jpeg
.num_jpeg_inst
; i
++) {
432 if (adev
->jpeg
.harvest_config
& (1 << i
))
435 if (adev
->jpeg
.inst
[i
].ring_dec
.sched
.ready
)
438 ib_start_alignment
= 16;
439 ib_size_alignment
= 16;
445 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
446 if (adev
->ip_blocks
[i
].version
->type
== type
&&
447 adev
->ip_blocks
[i
].status
.valid
)
450 if (i
== adev
->num_ip_blocks
)
453 num_rings
= min(amdgpu_ctx_num_entities
[info
->query_hw_ip
.type
],
456 result
->hw_ip_version_major
= adev
->ip_blocks
[i
].version
->major
;
457 result
->hw_ip_version_minor
= adev
->ip_blocks
[i
].version
->minor
;
458 result
->capabilities_flags
= 0;
459 result
->available_rings
= (1 << num_rings
) - 1;
460 result
->ib_start_alignment
= ib_start_alignment
;
461 result
->ib_size_alignment
= ib_size_alignment
;
466 * Userspace get information ioctl
469 * amdgpu_info_ioctl - answer a device specific request.
471 * @adev: amdgpu device pointer
472 * @data: request object
475 * This function is used to pass device specific parameters to the userspace
476 * drivers. Examples include: pci device id, pipeline parms, tiling params,
478 * Returns 0 on success, -EINVAL on failure.
480 static int amdgpu_info_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
482 struct amdgpu_device
*adev
= dev
->dev_private
;
483 struct drm_amdgpu_info
*info
= data
;
484 struct amdgpu_mode_info
*minfo
= &adev
->mode_info
;
485 void __user
*out
= (void __user
*)(uintptr_t)info
->return_pointer
;
486 uint32_t size
= info
->return_size
;
487 struct drm_crtc
*crtc
;
491 int ui32_size
= sizeof(ui32
);
493 if (!info
->return_size
|| !info
->return_pointer
)
496 switch (info
->query
) {
497 case AMDGPU_INFO_ACCEL_WORKING
:
498 ui32
= adev
->accel_working
;
499 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
500 case AMDGPU_INFO_CRTC_FROM_ID
:
501 for (i
= 0, found
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
502 crtc
= (struct drm_crtc
*)minfo
->crtcs
[i
];
503 if (crtc
&& crtc
->base
.id
== info
->mode_crtc
.id
) {
504 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
505 ui32
= amdgpu_crtc
->crtc_id
;
511 DRM_DEBUG_KMS("unknown crtc id %d\n", info
->mode_crtc
.id
);
514 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
515 case AMDGPU_INFO_HW_IP_INFO
: {
516 struct drm_amdgpu_info_hw_ip ip
= {};
519 ret
= amdgpu_hw_ip_info(adev
, info
, &ip
);
523 ret
= copy_to_user(out
, &ip
, min((size_t)size
, sizeof(ip
)));
524 return ret
? -EFAULT
: 0;
526 case AMDGPU_INFO_HW_IP_COUNT
: {
527 enum amd_ip_block_type type
;
530 switch (info
->query_hw_ip
.type
) {
531 case AMDGPU_HW_IP_GFX
:
532 type
= AMD_IP_BLOCK_TYPE_GFX
;
534 case AMDGPU_HW_IP_COMPUTE
:
535 type
= AMD_IP_BLOCK_TYPE_GFX
;
537 case AMDGPU_HW_IP_DMA
:
538 type
= AMD_IP_BLOCK_TYPE_SDMA
;
540 case AMDGPU_HW_IP_UVD
:
541 type
= AMD_IP_BLOCK_TYPE_UVD
;
543 case AMDGPU_HW_IP_VCE
:
544 type
= AMD_IP_BLOCK_TYPE_VCE
;
546 case AMDGPU_HW_IP_UVD_ENC
:
547 type
= AMD_IP_BLOCK_TYPE_UVD
;
549 case AMDGPU_HW_IP_VCN_DEC
:
550 case AMDGPU_HW_IP_VCN_ENC
:
551 type
= AMD_IP_BLOCK_TYPE_VCN
;
553 case AMDGPU_HW_IP_VCN_JPEG
:
554 type
= (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_JPEG
)) ?
555 AMD_IP_BLOCK_TYPE_JPEG
: AMD_IP_BLOCK_TYPE_VCN
;
561 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
562 if (adev
->ip_blocks
[i
].version
->type
== type
&&
563 adev
->ip_blocks
[i
].status
.valid
&&
564 count
< AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
567 return copy_to_user(out
, &count
, min(size
, 4u)) ? -EFAULT
: 0;
569 case AMDGPU_INFO_TIMESTAMP
:
570 ui64
= amdgpu_gfx_get_gpu_clock_counter(adev
);
571 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
572 case AMDGPU_INFO_FW_VERSION
: {
573 struct drm_amdgpu_info_firmware fw_info
;
576 /* We only support one instance of each IP block right now. */
577 if (info
->query_fw
.ip_instance
!= 0)
580 ret
= amdgpu_firmware_info(&fw_info
, &info
->query_fw
, adev
);
584 return copy_to_user(out
, &fw_info
,
585 min((size_t)size
, sizeof(fw_info
))) ? -EFAULT
: 0;
587 case AMDGPU_INFO_NUM_BYTES_MOVED
:
588 ui64
= atomic64_read(&adev
->num_bytes_moved
);
589 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
590 case AMDGPU_INFO_NUM_EVICTIONS
:
591 ui64
= atomic64_read(&adev
->num_evictions
);
592 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
593 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
:
594 ui64
= atomic64_read(&adev
->num_vram_cpu_page_faults
);
595 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
596 case AMDGPU_INFO_VRAM_USAGE
:
597 ui64
= amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
598 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
599 case AMDGPU_INFO_VIS_VRAM_USAGE
:
600 ui64
= amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
601 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
602 case AMDGPU_INFO_GTT_USAGE
:
603 ui64
= amdgpu_gtt_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
604 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
605 case AMDGPU_INFO_GDS_CONFIG
: {
606 struct drm_amdgpu_info_gds gds_info
;
608 memset(&gds_info
, 0, sizeof(gds_info
));
609 gds_info
.compute_partition_size
= adev
->gds
.gds_size
;
610 gds_info
.gds_total_size
= adev
->gds
.gds_size
;
611 gds_info
.gws_per_compute_partition
= adev
->gds
.gws_size
;
612 gds_info
.oa_per_compute_partition
= adev
->gds
.oa_size
;
613 return copy_to_user(out
, &gds_info
,
614 min((size_t)size
, sizeof(gds_info
))) ? -EFAULT
: 0;
616 case AMDGPU_INFO_VRAM_GTT
: {
617 struct drm_amdgpu_info_vram_gtt vram_gtt
;
619 vram_gtt
.vram_size
= adev
->gmc
.real_vram_size
-
620 atomic64_read(&adev
->vram_pin_size
) -
621 AMDGPU_VM_RESERVED_VRAM
;
622 vram_gtt
.vram_cpu_accessible_size
=
623 min(adev
->gmc
.visible_vram_size
-
624 atomic64_read(&adev
->visible_pin_size
),
626 vram_gtt
.gtt_size
= adev
->mman
.bdev
.man
[TTM_PL_TT
].size
;
627 vram_gtt
.gtt_size
*= PAGE_SIZE
;
628 vram_gtt
.gtt_size
-= atomic64_read(&adev
->gart_pin_size
);
629 return copy_to_user(out
, &vram_gtt
,
630 min((size_t)size
, sizeof(vram_gtt
))) ? -EFAULT
: 0;
632 case AMDGPU_INFO_MEMORY
: {
633 struct drm_amdgpu_memory_info mem
;
635 memset(&mem
, 0, sizeof(mem
));
636 mem
.vram
.total_heap_size
= adev
->gmc
.real_vram_size
;
637 mem
.vram
.usable_heap_size
= adev
->gmc
.real_vram_size
-
638 atomic64_read(&adev
->vram_pin_size
) -
639 AMDGPU_VM_RESERVED_VRAM
;
640 mem
.vram
.heap_usage
=
641 amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
642 mem
.vram
.max_allocation
= mem
.vram
.usable_heap_size
* 3 / 4;
644 mem
.cpu_accessible_vram
.total_heap_size
=
645 adev
->gmc
.visible_vram_size
;
646 mem
.cpu_accessible_vram
.usable_heap_size
=
647 min(adev
->gmc
.visible_vram_size
-
648 atomic64_read(&adev
->visible_pin_size
),
649 mem
.vram
.usable_heap_size
);
650 mem
.cpu_accessible_vram
.heap_usage
=
651 amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
652 mem
.cpu_accessible_vram
.max_allocation
=
653 mem
.cpu_accessible_vram
.usable_heap_size
* 3 / 4;
655 mem
.gtt
.total_heap_size
= adev
->mman
.bdev
.man
[TTM_PL_TT
].size
;
656 mem
.gtt
.total_heap_size
*= PAGE_SIZE
;
657 mem
.gtt
.usable_heap_size
= mem
.gtt
.total_heap_size
-
658 atomic64_read(&adev
->gart_pin_size
);
660 amdgpu_gtt_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
661 mem
.gtt
.max_allocation
= mem
.gtt
.usable_heap_size
* 3 / 4;
663 return copy_to_user(out
, &mem
,
664 min((size_t)size
, sizeof(mem
)))
667 case AMDGPU_INFO_READ_MMR_REG
: {
668 unsigned n
, alloc_size
;
670 unsigned se_num
= (info
->read_mmr_reg
.instance
>>
671 AMDGPU_INFO_MMR_SE_INDEX_SHIFT
) &
672 AMDGPU_INFO_MMR_SE_INDEX_MASK
;
673 unsigned sh_num
= (info
->read_mmr_reg
.instance
>>
674 AMDGPU_INFO_MMR_SH_INDEX_SHIFT
) &
675 AMDGPU_INFO_MMR_SH_INDEX_MASK
;
677 /* set full masks if the userspace set all bits
678 * in the bitfields */
679 if (se_num
== AMDGPU_INFO_MMR_SE_INDEX_MASK
)
681 if (sh_num
== AMDGPU_INFO_MMR_SH_INDEX_MASK
)
684 if (info
->read_mmr_reg
.count
> 128)
687 regs
= kmalloc_array(info
->read_mmr_reg
.count
, sizeof(*regs
), GFP_KERNEL
);
690 alloc_size
= info
->read_mmr_reg
.count
* sizeof(*regs
);
692 amdgpu_gfx_off_ctrl(adev
, false);
693 for (i
= 0; i
< info
->read_mmr_reg
.count
; i
++) {
694 if (amdgpu_asic_read_register(adev
, se_num
, sh_num
,
695 info
->read_mmr_reg
.dword_offset
+ i
,
697 DRM_DEBUG_KMS("unallowed offset %#x\n",
698 info
->read_mmr_reg
.dword_offset
+ i
);
700 amdgpu_gfx_off_ctrl(adev
, true);
704 amdgpu_gfx_off_ctrl(adev
, true);
705 n
= copy_to_user(out
, regs
, min(size
, alloc_size
));
707 return n
? -EFAULT
: 0;
709 case AMDGPU_INFO_DEV_INFO
: {
710 struct drm_amdgpu_info_device dev_info
= {};
713 dev_info
.device_id
= dev
->pdev
->device
;
714 dev_info
.chip_rev
= adev
->rev_id
;
715 dev_info
.external_rev
= adev
->external_rev_id
;
716 dev_info
.pci_rev
= dev
->pdev
->revision
;
717 dev_info
.family
= adev
->family
;
718 dev_info
.num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
719 dev_info
.num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
720 /* return all clocks in KHz */
721 dev_info
.gpu_counter_freq
= amdgpu_asic_get_xclk(adev
) * 10;
722 if (adev
->pm
.dpm_enabled
) {
723 dev_info
.max_engine_clock
= amdgpu_dpm_get_sclk(adev
, false) * 10;
724 dev_info
.max_memory_clock
= amdgpu_dpm_get_mclk(adev
, false) * 10;
726 dev_info
.max_engine_clock
= adev
->clock
.default_sclk
* 10;
727 dev_info
.max_memory_clock
= adev
->clock
.default_mclk
* 10;
729 dev_info
.enabled_rb_pipes_mask
= adev
->gfx
.config
.backend_enable_mask
;
730 dev_info
.num_rb_pipes
= adev
->gfx
.config
.max_backends_per_se
*
731 adev
->gfx
.config
.max_shader_engines
;
732 dev_info
.num_hw_gfx_contexts
= adev
->gfx
.config
.max_hw_contexts
;
734 dev_info
.ids_flags
= 0;
735 if (adev
->flags
& AMD_IS_APU
)
736 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_FUSION
;
737 if (amdgpu_mcbp
|| amdgpu_sriov_vf(adev
))
738 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_PREEMPTION
;
740 vm_size
= adev
->vm_manager
.max_pfn
* AMDGPU_GPU_PAGE_SIZE
;
741 vm_size
-= AMDGPU_VA_RESERVED_SIZE
;
743 /* Older VCE FW versions are buggy and can handle only 40bits */
744 if (adev
->vce
.fw_version
&&
745 adev
->vce
.fw_version
< AMDGPU_VCE_FW_53_45
)
746 vm_size
= min(vm_size
, 1ULL << 40);
748 dev_info
.virtual_address_offset
= AMDGPU_VA_RESERVED_SIZE
;
749 dev_info
.virtual_address_max
=
750 min(vm_size
, AMDGPU_GMC_HOLE_START
);
752 if (vm_size
> AMDGPU_GMC_HOLE_START
) {
753 dev_info
.high_va_offset
= AMDGPU_GMC_HOLE_END
;
754 dev_info
.high_va_max
= AMDGPU_GMC_HOLE_END
| vm_size
;
756 dev_info
.virtual_address_alignment
= max((int)PAGE_SIZE
, AMDGPU_GPU_PAGE_SIZE
);
757 dev_info
.pte_fragment_size
= (1 << adev
->vm_manager
.fragment_size
) * AMDGPU_GPU_PAGE_SIZE
;
758 dev_info
.gart_page_size
= AMDGPU_GPU_PAGE_SIZE
;
759 dev_info
.cu_active_number
= adev
->gfx
.cu_info
.number
;
760 dev_info
.cu_ao_mask
= adev
->gfx
.cu_info
.ao_cu_mask
;
761 dev_info
.ce_ram_size
= adev
->gfx
.ce_ram_size
;
762 memcpy(&dev_info
.cu_ao_bitmap
[0], &adev
->gfx
.cu_info
.ao_cu_bitmap
[0],
763 sizeof(adev
->gfx
.cu_info
.ao_cu_bitmap
));
764 memcpy(&dev_info
.cu_bitmap
[0], &adev
->gfx
.cu_info
.bitmap
[0],
765 sizeof(adev
->gfx
.cu_info
.bitmap
));
766 dev_info
.vram_type
= adev
->gmc
.vram_type
;
767 dev_info
.vram_bit_width
= adev
->gmc
.vram_width
;
768 dev_info
.vce_harvest_config
= adev
->vce
.harvest_config
;
769 dev_info
.gc_double_offchip_lds_buf
=
770 adev
->gfx
.config
.double_offchip_lds_buf
;
771 dev_info
.wave_front_size
= adev
->gfx
.cu_info
.wave_front_size
;
772 dev_info
.num_shader_visible_vgprs
= adev
->gfx
.config
.max_gprs
;
773 dev_info
.num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
774 dev_info
.num_tcc_blocks
= adev
->gfx
.config
.max_texture_channel_caches
;
775 dev_info
.gs_vgt_table_depth
= adev
->gfx
.config
.gs_vgt_table_depth
;
776 dev_info
.gs_prim_buffer_depth
= adev
->gfx
.config
.gs_prim_buffer_depth
;
777 dev_info
.max_gs_waves_per_vgt
= adev
->gfx
.config
.max_gs_threads
;
779 if (adev
->family
>= AMDGPU_FAMILY_NV
)
780 dev_info
.pa_sc_tile_steering_override
=
781 adev
->gfx
.config
.pa_sc_tile_steering_override
;
783 dev_info
.tcc_disabled_mask
= adev
->gfx
.config
.tcc_disabled_mask
;
785 return copy_to_user(out
, &dev_info
,
786 min((size_t)size
, sizeof(dev_info
))) ? -EFAULT
: 0;
788 case AMDGPU_INFO_VCE_CLOCK_TABLE
: {
790 struct drm_amdgpu_info_vce_clock_table vce_clk_table
= {};
791 struct amd_vce_state
*vce_state
;
793 for (i
= 0; i
< AMDGPU_VCE_CLOCK_TABLE_ENTRIES
; i
++) {
794 vce_state
= amdgpu_dpm_get_vce_clock_state(adev
, i
);
796 vce_clk_table
.entries
[i
].sclk
= vce_state
->sclk
;
797 vce_clk_table
.entries
[i
].mclk
= vce_state
->mclk
;
798 vce_clk_table
.entries
[i
].eclk
= vce_state
->evclk
;
799 vce_clk_table
.num_valid_entries
++;
803 return copy_to_user(out
, &vce_clk_table
,
804 min((size_t)size
, sizeof(vce_clk_table
))) ? -EFAULT
: 0;
806 case AMDGPU_INFO_VBIOS
: {
807 uint32_t bios_size
= adev
->bios_size
;
809 switch (info
->vbios_info
.type
) {
810 case AMDGPU_INFO_VBIOS_SIZE
:
811 return copy_to_user(out
, &bios_size
,
812 min((size_t)size
, sizeof(bios_size
)))
814 case AMDGPU_INFO_VBIOS_IMAGE
: {
816 uint32_t bios_offset
= info
->vbios_info
.offset
;
818 if (bios_offset
>= bios_size
)
821 bios
= adev
->bios
+ bios_offset
;
822 return copy_to_user(out
, bios
,
823 min((size_t)size
, (size_t)(bios_size
- bios_offset
)))
827 DRM_DEBUG_KMS("Invalid request %d\n",
828 info
->vbios_info
.type
);
832 case AMDGPU_INFO_NUM_HANDLES
: {
833 struct drm_amdgpu_info_num_handles handle
;
835 switch (info
->query_hw_ip
.type
) {
836 case AMDGPU_HW_IP_UVD
:
837 /* Starting Polaris, we support unlimited UVD handles */
838 if (adev
->asic_type
< CHIP_POLARIS10
) {
839 handle
.uvd_max_handles
= adev
->uvd
.max_handles
;
840 handle
.uvd_used_handles
= amdgpu_uvd_used_handles(adev
);
842 return copy_to_user(out
, &handle
,
843 min((size_t)size
, sizeof(handle
))) ? -EFAULT
: 0;
853 case AMDGPU_INFO_SENSOR
: {
854 if (!adev
->pm
.dpm_enabled
)
857 switch (info
->sensor_info
.type
) {
858 case AMDGPU_INFO_SENSOR_GFX_SCLK
:
859 /* get sclk in Mhz */
860 if (amdgpu_dpm_read_sensor(adev
,
861 AMDGPU_PP_SENSOR_GFX_SCLK
,
862 (void *)&ui32
, &ui32_size
)) {
867 case AMDGPU_INFO_SENSOR_GFX_MCLK
:
868 /* get mclk in Mhz */
869 if (amdgpu_dpm_read_sensor(adev
,
870 AMDGPU_PP_SENSOR_GFX_MCLK
,
871 (void *)&ui32
, &ui32_size
)) {
876 case AMDGPU_INFO_SENSOR_GPU_TEMP
:
877 /* get temperature in millidegrees C */
878 if (amdgpu_dpm_read_sensor(adev
,
879 AMDGPU_PP_SENSOR_GPU_TEMP
,
880 (void *)&ui32
, &ui32_size
)) {
884 case AMDGPU_INFO_SENSOR_GPU_LOAD
:
886 if (amdgpu_dpm_read_sensor(adev
,
887 AMDGPU_PP_SENSOR_GPU_LOAD
,
888 (void *)&ui32
, &ui32_size
)) {
892 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER
:
893 /* get average GPU power */
894 if (amdgpu_dpm_read_sensor(adev
,
895 AMDGPU_PP_SENSOR_GPU_POWER
,
896 (void *)&ui32
, &ui32_size
)) {
901 case AMDGPU_INFO_SENSOR_VDDNB
:
902 /* get VDDNB in millivolts */
903 if (amdgpu_dpm_read_sensor(adev
,
904 AMDGPU_PP_SENSOR_VDDNB
,
905 (void *)&ui32
, &ui32_size
)) {
909 case AMDGPU_INFO_SENSOR_VDDGFX
:
910 /* get VDDGFX in millivolts */
911 if (amdgpu_dpm_read_sensor(adev
,
912 AMDGPU_PP_SENSOR_VDDGFX
,
913 (void *)&ui32
, &ui32_size
)) {
917 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK
:
918 /* get stable pstate sclk in Mhz */
919 if (amdgpu_dpm_read_sensor(adev
,
920 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
,
921 (void *)&ui32
, &ui32_size
)) {
926 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK
:
927 /* get stable pstate mclk in Mhz */
928 if (amdgpu_dpm_read_sensor(adev
,
929 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
,
930 (void *)&ui32
, &ui32_size
)) {
936 DRM_DEBUG_KMS("Invalid request %d\n",
937 info
->sensor_info
.type
);
940 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
942 case AMDGPU_INFO_VRAM_LOST_COUNTER
:
943 ui32
= atomic_read(&adev
->vram_lost_counter
);
944 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
945 case AMDGPU_INFO_RAS_ENABLED_FEATURES
: {
946 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
951 ras_mask
= (uint64_t)ras
->supported
<< 32 | ras
->features
;
953 return copy_to_user(out
, &ras_mask
,
954 min_t(u64
, size
, sizeof(ras_mask
))) ?
958 DRM_DEBUG_KMS("Invalid request %d\n", info
->query
);
966 * Outdated mess for old drm with Xorg being in charge (void function now).
969 * amdgpu_driver_lastclose_kms - drm callback for last close
971 * @dev: drm dev pointer
973 * Switch vga_switcheroo state after last close (all asics).
975 void amdgpu_driver_lastclose_kms(struct drm_device
*dev
)
977 drm_fb_helper_lastclose(dev
);
978 vga_switcheroo_process_delayed_switch();
982 * amdgpu_driver_open_kms - drm callback for open
984 * @dev: drm dev pointer
985 * @file_priv: drm file
987 * On device open, init vm on cayman+ (all asics).
988 * Returns 0 on success, error on failure.
990 int amdgpu_driver_open_kms(struct drm_device
*dev
, struct drm_file
*file_priv
)
992 struct amdgpu_device
*adev
= dev
->dev_private
;
993 struct amdgpu_fpriv
*fpriv
;
996 /* Ensure IB tests are run on ring */
997 flush_delayed_work(&adev
->delayed_init_work
);
1000 if (amdgpu_ras_intr_triggered()) {
1001 DRM_ERROR("RAS Intr triggered, device disabled!!");
1005 file_priv
->driver_priv
= NULL
;
1007 r
= pm_runtime_get_sync(dev
->dev
);
1011 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
1012 if (unlikely(!fpriv
)) {
1017 pasid
= amdgpu_pasid_alloc(16);
1019 dev_warn(adev
->dev
, "No more PASIDs available!");
1022 r
= amdgpu_vm_init(adev
, &fpriv
->vm
, AMDGPU_VM_CONTEXT_GFX
, pasid
);
1026 fpriv
->prt_va
= amdgpu_vm_bo_add(adev
, &fpriv
->vm
, NULL
);
1027 if (!fpriv
->prt_va
) {
1032 if (amdgpu_mcbp
|| amdgpu_sriov_vf(adev
)) {
1033 uint64_t csa_addr
= amdgpu_csa_vaddr(adev
) & AMDGPU_GMC_HOLE_MASK
;
1035 r
= amdgpu_map_static_csa(adev
, &fpriv
->vm
, adev
->virt
.csa_obj
,
1036 &fpriv
->csa_va
, csa_addr
, AMDGPU_CSA_SIZE
);
1041 mutex_init(&fpriv
->bo_list_lock
);
1042 idr_init(&fpriv
->bo_list_handles
);
1044 amdgpu_ctx_mgr_init(&fpriv
->ctx_mgr
);
1046 file_priv
->driver_priv
= fpriv
;
1050 amdgpu_vm_fini(adev
, &fpriv
->vm
);
1054 amdgpu_pasid_free(pasid
);
1059 pm_runtime_mark_last_busy(dev
->dev
);
1061 pm_runtime_put_autosuspend(dev
->dev
);
1067 * amdgpu_driver_postclose_kms - drm callback for post close
1069 * @dev: drm dev pointer
1070 * @file_priv: drm file
1072 * On device post close, tear down vm on cayman+ (all asics).
1074 void amdgpu_driver_postclose_kms(struct drm_device
*dev
,
1075 struct drm_file
*file_priv
)
1077 struct amdgpu_device
*adev
= dev
->dev_private
;
1078 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
1079 struct amdgpu_bo_list
*list
;
1080 struct amdgpu_bo
*pd
;
1087 pm_runtime_get_sync(dev
->dev
);
1089 if (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_UVD
) != NULL
)
1090 amdgpu_uvd_free_handles(adev
, file_priv
);
1091 if (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_VCE
) != NULL
)
1092 amdgpu_vce_free_handles(adev
, file_priv
);
1094 amdgpu_vm_bo_rmv(adev
, fpriv
->prt_va
);
1096 if (amdgpu_mcbp
|| amdgpu_sriov_vf(adev
)) {
1097 /* TODO: how to handle reserve failure */
1098 BUG_ON(amdgpu_bo_reserve(adev
->virt
.csa_obj
, true));
1099 amdgpu_vm_bo_rmv(adev
, fpriv
->csa_va
);
1100 fpriv
->csa_va
= NULL
;
1101 amdgpu_bo_unreserve(adev
->virt
.csa_obj
);
1104 pasid
= fpriv
->vm
.pasid
;
1105 pd
= amdgpu_bo_ref(fpriv
->vm
.root
.base
.bo
);
1107 amdgpu_ctx_mgr_fini(&fpriv
->ctx_mgr
);
1108 amdgpu_vm_fini(adev
, &fpriv
->vm
);
1111 amdgpu_pasid_free_delayed(pd
->tbo
.base
.resv
, pasid
);
1112 amdgpu_bo_unref(&pd
);
1114 idr_for_each_entry(&fpriv
->bo_list_handles
, list
, handle
)
1115 amdgpu_bo_list_put(list
);
1117 idr_destroy(&fpriv
->bo_list_handles
);
1118 mutex_destroy(&fpriv
->bo_list_lock
);
1121 file_priv
->driver_priv
= NULL
;
1123 pm_runtime_mark_last_busy(dev
->dev
);
1124 pm_runtime_put_autosuspend(dev
->dev
);
1128 * VBlank related functions.
1131 * amdgpu_get_vblank_counter_kms - get frame count
1133 * @crtc: crtc to get the frame count from
1135 * Gets the frame count on the requested crtc (all asics).
1136 * Returns frame count on success, -EINVAL on failure.
1138 u32
amdgpu_get_vblank_counter_kms(struct drm_crtc
*crtc
)
1140 struct drm_device
*dev
= crtc
->dev
;
1141 unsigned int pipe
= crtc
->index
;
1142 struct amdgpu_device
*adev
= dev
->dev_private
;
1143 int vpos
, hpos
, stat
;
1146 if (pipe
>= adev
->mode_info
.num_crtc
) {
1147 DRM_ERROR("Invalid crtc %u\n", pipe
);
1151 /* The hw increments its frame counter at start of vsync, not at start
1152 * of vblank, as is required by DRM core vblank counter handling.
1153 * Cook the hw count here to make it appear to the caller as if it
1154 * incremented at start of vblank. We measure distance to start of
1155 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1156 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1157 * result by 1 to give the proper appearance to caller.
1159 if (adev
->mode_info
.crtcs
[pipe
]) {
1160 /* Repeat readout if needed to provide stable result if
1161 * we cross start of vsync during the queries.
1164 count
= amdgpu_display_vblank_get_counter(adev
, pipe
);
1165 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1166 * vpos as distance to start of vblank, instead of
1167 * regular vertical scanout pos.
1169 stat
= amdgpu_display_get_crtc_scanoutpos(
1170 dev
, pipe
, GET_DISTANCE_TO_VBLANKSTART
,
1171 &vpos
, &hpos
, NULL
, NULL
,
1172 &adev
->mode_info
.crtcs
[pipe
]->base
.hwmode
);
1173 } while (count
!= amdgpu_display_vblank_get_counter(adev
, pipe
));
1175 if (((stat
& (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
)) !=
1176 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
))) {
1177 DRM_DEBUG_VBL("Query failed! stat %d\n", stat
);
1179 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1182 /* Bump counter if we are at >= leading edge of vblank,
1183 * but before vsync where vpos would turn negative and
1184 * the hw counter really increments.
1190 /* Fallback to use value as is. */
1191 count
= amdgpu_display_vblank_get_counter(adev
, pipe
);
1192 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1199 * amdgpu_enable_vblank_kms - enable vblank interrupt
1201 * @crtc: crtc to enable vblank interrupt for
1203 * Enable the interrupt on the requested crtc (all asics).
1204 * Returns 0 on success, -EINVAL on failure.
1206 int amdgpu_enable_vblank_kms(struct drm_crtc
*crtc
)
1208 struct drm_device
*dev
= crtc
->dev
;
1209 unsigned int pipe
= crtc
->index
;
1210 struct amdgpu_device
*adev
= dev
->dev_private
;
1211 int idx
= amdgpu_display_crtc_idx_to_irq_type(adev
, pipe
);
1213 return amdgpu_irq_get(adev
, &adev
->crtc_irq
, idx
);
1217 * amdgpu_disable_vblank_kms - disable vblank interrupt
1219 * @crtc: crtc to disable vblank interrupt for
1221 * Disable the interrupt on the requested crtc (all asics).
1223 void amdgpu_disable_vblank_kms(struct drm_crtc
*crtc
)
1225 struct drm_device
*dev
= crtc
->dev
;
1226 unsigned int pipe
= crtc
->index
;
1227 struct amdgpu_device
*adev
= dev
->dev_private
;
1228 int idx
= amdgpu_display_crtc_idx_to_irq_type(adev
, pipe
);
1230 amdgpu_irq_put(adev
, &adev
->crtc_irq
, idx
);
1233 const struct drm_ioctl_desc amdgpu_ioctls_kms
[] = {
1234 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE
, amdgpu_gem_create_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1235 DRM_IOCTL_DEF_DRV(AMDGPU_CTX
, amdgpu_ctx_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1236 DRM_IOCTL_DEF_DRV(AMDGPU_VM
, amdgpu_vm_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1237 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED
, amdgpu_sched_ioctl
, DRM_MASTER
),
1238 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST
, amdgpu_bo_list_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1239 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE
, amdgpu_cs_fence_to_handle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1241 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP
, amdgpu_gem_mmap_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1242 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE
, amdgpu_gem_wait_idle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1243 DRM_IOCTL_DEF_DRV(AMDGPU_CS
, amdgpu_cs_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1244 DRM_IOCTL_DEF_DRV(AMDGPU_INFO
, amdgpu_info_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1245 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS
, amdgpu_cs_wait_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1246 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES
, amdgpu_cs_wait_fences_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1247 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA
, amdgpu_gem_metadata_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1248 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA
, amdgpu_gem_va_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1249 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP
, amdgpu_gem_op_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1250 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR
, amdgpu_gem_userptr_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
)
1252 const int amdgpu_max_kms_ioctl
= ARRAY_SIZE(amdgpu_ioctls_kms
);
1257 #if defined(CONFIG_DEBUG_FS)
1259 static int amdgpu_debugfs_firmware_info(struct seq_file
*m
, void *data
)
1261 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1262 struct drm_device
*dev
= node
->minor
->dev
;
1263 struct amdgpu_device
*adev
= dev
->dev_private
;
1264 struct drm_amdgpu_info_firmware fw_info
;
1265 struct drm_amdgpu_query_fw query_fw
;
1266 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
1270 query_fw
.fw_type
= AMDGPU_INFO_FW_VCE
;
1271 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1274 seq_printf(m
, "VCE feature version: %u, firmware version: 0x%08x\n",
1275 fw_info
.feature
, fw_info
.ver
);
1278 query_fw
.fw_type
= AMDGPU_INFO_FW_UVD
;
1279 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1282 seq_printf(m
, "UVD feature version: %u, firmware version: 0x%08x\n",
1283 fw_info
.feature
, fw_info
.ver
);
1286 query_fw
.fw_type
= AMDGPU_INFO_FW_GMC
;
1287 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1290 seq_printf(m
, "MC feature version: %u, firmware version: 0x%08x\n",
1291 fw_info
.feature
, fw_info
.ver
);
1294 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_ME
;
1295 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1298 seq_printf(m
, "ME feature version: %u, firmware version: 0x%08x\n",
1299 fw_info
.feature
, fw_info
.ver
);
1302 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_PFP
;
1303 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1306 seq_printf(m
, "PFP feature version: %u, firmware version: 0x%08x\n",
1307 fw_info
.feature
, fw_info
.ver
);
1310 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_CE
;
1311 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1314 seq_printf(m
, "CE feature version: %u, firmware version: 0x%08x\n",
1315 fw_info
.feature
, fw_info
.ver
);
1318 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC
;
1319 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1322 seq_printf(m
, "RLC feature version: %u, firmware version: 0x%08x\n",
1323 fw_info
.feature
, fw_info
.ver
);
1325 /* RLC SAVE RESTORE LIST CNTL */
1326 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL
;
1327 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1330 seq_printf(m
, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1331 fw_info
.feature
, fw_info
.ver
);
1333 /* RLC SAVE RESTORE LIST GPM MEM */
1334 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM
;
1335 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1338 seq_printf(m
, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1339 fw_info
.feature
, fw_info
.ver
);
1341 /* RLC SAVE RESTORE LIST SRM MEM */
1342 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM
;
1343 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1346 seq_printf(m
, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1347 fw_info
.feature
, fw_info
.ver
);
1350 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_MEC
;
1352 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1355 seq_printf(m
, "MEC feature version: %u, firmware version: 0x%08x\n",
1356 fw_info
.feature
, fw_info
.ver
);
1359 if (adev
->gfx
.mec2_fw
) {
1361 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1364 seq_printf(m
, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1365 fw_info
.feature
, fw_info
.ver
);
1369 query_fw
.fw_type
= AMDGPU_INFO_FW_SOS
;
1370 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1373 seq_printf(m
, "SOS feature version: %u, firmware version: 0x%08x\n",
1374 fw_info
.feature
, fw_info
.ver
);
1378 query_fw
.fw_type
= AMDGPU_INFO_FW_ASD
;
1379 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1382 seq_printf(m
, "ASD feature version: %u, firmware version: 0x%08x\n",
1383 fw_info
.feature
, fw_info
.ver
);
1385 query_fw
.fw_type
= AMDGPU_INFO_FW_TA
;
1386 for (i
= 0; i
< 2; i
++) {
1388 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1391 seq_printf(m
, "TA %s feature version: %u, firmware version: 0x%08x\n",
1392 i
? "RAS" : "XGMI", fw_info
.feature
, fw_info
.ver
);
1396 query_fw
.fw_type
= AMDGPU_INFO_FW_SMC
;
1397 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1400 seq_printf(m
, "SMC feature version: %u, firmware version: 0x%08x\n",
1401 fw_info
.feature
, fw_info
.ver
);
1404 query_fw
.fw_type
= AMDGPU_INFO_FW_SDMA
;
1405 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1407 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1410 seq_printf(m
, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1411 i
, fw_info
.feature
, fw_info
.ver
);
1415 query_fw
.fw_type
= AMDGPU_INFO_FW_VCN
;
1416 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1419 seq_printf(m
, "VCN feature version: %u, firmware version: 0x%08x\n",
1420 fw_info
.feature
, fw_info
.ver
);
1423 query_fw
.fw_type
= AMDGPU_INFO_FW_DMCU
;
1424 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1427 seq_printf(m
, "DMCU feature version: %u, firmware version: 0x%08x\n",
1428 fw_info
.feature
, fw_info
.ver
);
1431 query_fw
.fw_type
= AMDGPU_INFO_FW_DMCUB
;
1432 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1435 seq_printf(m
, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1436 fw_info
.feature
, fw_info
.ver
);
1439 seq_printf(m
, "VBIOS version: %s\n", ctx
->vbios_version
);
1444 static const struct drm_info_list amdgpu_firmware_info_list
[] = {
1445 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info
, 0, NULL
},
1449 int amdgpu_debugfs_firmware_init(struct amdgpu_device
*adev
)
1451 #if defined(CONFIG_DEBUG_FS)
1452 return amdgpu_debugfs_add_files(adev
, amdgpu_firmware_info_list
,
1453 ARRAY_SIZE(amdgpu_firmware_info_list
));