2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
34 #include <linux/vga_switcheroo.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
39 #if defined(CONFIG_VGA_SWITCHEROO)
40 bool amdgpu_has_atpx(void);
42 static inline bool amdgpu_has_atpx(void) { return false; }
46 * amdgpu_driver_unload_kms - Main unload function for KMS.
48 * @dev: drm dev pointer
50 * This is the main unload function for KMS (all asics).
51 * Returns 0 on success.
53 int amdgpu_driver_unload_kms(struct drm_device
*dev
)
55 struct amdgpu_device
*adev
= dev
->dev_private
;
60 if (adev
->rmmio
== NULL
)
63 pm_runtime_get_sync(dev
->dev
);
65 amdgpu_amdkfd_device_fini(adev
);
67 amdgpu_acpi_fini(adev
);
69 amdgpu_device_fini(adev
);
73 dev
->dev_private
= NULL
;
78 * amdgpu_driver_load_kms - Main load function for KMS.
80 * @dev: drm dev pointer
81 * @flags: device flags
83 * This is the main load function for KMS (all asics).
84 * Returns 0 on success, error on failure.
86 int amdgpu_driver_load_kms(struct drm_device
*dev
, unsigned long flags
)
88 struct amdgpu_device
*adev
;
91 adev
= kzalloc(sizeof(struct amdgpu_device
), GFP_KERNEL
);
95 dev
->dev_private
= (void *)adev
;
97 if ((amdgpu_runtime_pm
!= 0) &&
99 ((flags
& AMD_IS_APU
) == 0))
102 /* amdgpu_device_init should report only fatal error
103 * like memory allocation failure or iomapping failure,
104 * or memory manager initialization failure, it must
105 * properly initialize the GPU MC controller and permit
108 r
= amdgpu_device_init(adev
, dev
, dev
->pdev
, flags
);
110 dev_err(&dev
->pdev
->dev
, "Fatal error during GPU init\n");
114 /* Call ACPI methods: require modeset init
115 * but failure is not fatal
118 acpi_status
= amdgpu_acpi_init(adev
);
120 dev_dbg(&dev
->pdev
->dev
,
121 "Error during ACPI methods call\n");
124 amdgpu_amdkfd_load_interface(adev
);
125 amdgpu_amdkfd_device_probe(adev
);
126 amdgpu_amdkfd_device_init(adev
);
128 if (amdgpu_device_is_px(dev
)) {
129 pm_runtime_use_autosuspend(dev
->dev
);
130 pm_runtime_set_autosuspend_delay(dev
->dev
, 5000);
131 pm_runtime_set_active(dev
->dev
);
132 pm_runtime_allow(dev
->dev
);
133 pm_runtime_mark_last_busy(dev
->dev
);
134 pm_runtime_put_autosuspend(dev
->dev
);
139 amdgpu_driver_unload_kms(dev
);
146 * Userspace get information ioctl
149 * amdgpu_info_ioctl - answer a device specific request.
151 * @adev: amdgpu device pointer
152 * @data: request object
155 * This function is used to pass device specific parameters to the userspace
156 * drivers. Examples include: pci device id, pipeline parms, tiling params,
158 * Returns 0 on success, -EINVAL on failure.
160 static int amdgpu_info_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
162 struct amdgpu_device
*adev
= dev
->dev_private
;
163 struct drm_amdgpu_info
*info
= data
;
164 struct amdgpu_mode_info
*minfo
= &adev
->mode_info
;
165 void __user
*out
= (void __user
*)(long)info
->return_pointer
;
166 uint32_t size
= info
->return_size
;
167 struct drm_crtc
*crtc
;
172 if (!info
->return_size
|| !info
->return_pointer
)
175 switch (info
->query
) {
176 case AMDGPU_INFO_ACCEL_WORKING
:
177 ui32
= adev
->accel_working
;
178 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
179 case AMDGPU_INFO_CRTC_FROM_ID
:
180 for (i
= 0, found
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
181 crtc
= (struct drm_crtc
*)minfo
->crtcs
[i
];
182 if (crtc
&& crtc
->base
.id
== info
->mode_crtc
.id
) {
183 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
184 ui32
= amdgpu_crtc
->crtc_id
;
190 DRM_DEBUG_KMS("unknown crtc id %d\n", info
->mode_crtc
.id
);
193 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
194 case AMDGPU_INFO_HW_IP_INFO
: {
195 struct drm_amdgpu_info_hw_ip ip
= {};
196 enum amd_ip_block_type type
;
197 uint32_t ring_mask
= 0;
198 uint32_t ib_start_alignment
= 0;
199 uint32_t ib_size_alignment
= 0;
201 if (info
->query_hw_ip
.ip_instance
>= AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
204 switch (info
->query_hw_ip
.type
) {
205 case AMDGPU_HW_IP_GFX
:
206 type
= AMD_IP_BLOCK_TYPE_GFX
;
207 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
208 ring_mask
|= ((adev
->gfx
.gfx_ring
[i
].ready
? 1 : 0) << i
);
209 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
210 ib_size_alignment
= 8;
212 case AMDGPU_HW_IP_COMPUTE
:
213 type
= AMD_IP_BLOCK_TYPE_GFX
;
214 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
215 ring_mask
|= ((adev
->gfx
.compute_ring
[i
].ready
? 1 : 0) << i
);
216 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
217 ib_size_alignment
= 8;
219 case AMDGPU_HW_IP_DMA
:
220 type
= AMD_IP_BLOCK_TYPE_SDMA
;
221 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
222 ring_mask
|= ((adev
->sdma
.instance
[i
].ring
.ready
? 1 : 0) << i
);
223 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
224 ib_size_alignment
= 1;
226 case AMDGPU_HW_IP_UVD
:
227 type
= AMD_IP_BLOCK_TYPE_UVD
;
228 ring_mask
= adev
->uvd
.ring
.ready
? 1 : 0;
229 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
230 ib_size_alignment
= 8;
232 case AMDGPU_HW_IP_VCE
:
233 type
= AMD_IP_BLOCK_TYPE_VCE
;
234 for (i
= 0; i
< AMDGPU_MAX_VCE_RINGS
; i
++)
235 ring_mask
|= ((adev
->vce
.ring
[i
].ready
? 1 : 0) << i
);
236 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
237 ib_size_alignment
= 8;
243 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
244 if (adev
->ip_blocks
[i
].type
== type
&&
245 adev
->ip_block_status
[i
].valid
) {
246 ip
.hw_ip_version_major
= adev
->ip_blocks
[i
].major
;
247 ip
.hw_ip_version_minor
= adev
->ip_blocks
[i
].minor
;
248 ip
.capabilities_flags
= 0;
249 ip
.available_rings
= ring_mask
;
250 ip
.ib_start_alignment
= ib_start_alignment
;
251 ip
.ib_size_alignment
= ib_size_alignment
;
255 return copy_to_user(out
, &ip
,
256 min((size_t)size
, sizeof(ip
))) ? -EFAULT
: 0;
258 case AMDGPU_INFO_HW_IP_COUNT
: {
259 enum amd_ip_block_type type
;
262 switch (info
->query_hw_ip
.type
) {
263 case AMDGPU_HW_IP_GFX
:
264 type
= AMD_IP_BLOCK_TYPE_GFX
;
266 case AMDGPU_HW_IP_COMPUTE
:
267 type
= AMD_IP_BLOCK_TYPE_GFX
;
269 case AMDGPU_HW_IP_DMA
:
270 type
= AMD_IP_BLOCK_TYPE_SDMA
;
272 case AMDGPU_HW_IP_UVD
:
273 type
= AMD_IP_BLOCK_TYPE_UVD
;
275 case AMDGPU_HW_IP_VCE
:
276 type
= AMD_IP_BLOCK_TYPE_VCE
;
282 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
283 if (adev
->ip_blocks
[i
].type
== type
&&
284 adev
->ip_block_status
[i
].valid
&&
285 count
< AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
288 return copy_to_user(out
, &count
, min(size
, 4u)) ? -EFAULT
: 0;
290 case AMDGPU_INFO_TIMESTAMP
:
291 ui64
= amdgpu_asic_get_gpu_clock_counter(adev
);
292 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
293 case AMDGPU_INFO_FW_VERSION
: {
294 struct drm_amdgpu_info_firmware fw_info
;
296 /* We only support one instance of each IP block right now. */
297 if (info
->query_fw
.ip_instance
!= 0)
300 switch (info
->query_fw
.fw_type
) {
301 case AMDGPU_INFO_FW_VCE
:
302 fw_info
.ver
= adev
->vce
.fw_version
;
303 fw_info
.feature
= adev
->vce
.fb_version
;
305 case AMDGPU_INFO_FW_UVD
:
309 case AMDGPU_INFO_FW_GMC
:
310 fw_info
.ver
= adev
->mc
.fw_version
;
313 case AMDGPU_INFO_FW_GFX_ME
:
314 fw_info
.ver
= adev
->gfx
.me_fw_version
;
315 fw_info
.feature
= adev
->gfx
.me_feature_version
;
317 case AMDGPU_INFO_FW_GFX_PFP
:
318 fw_info
.ver
= adev
->gfx
.pfp_fw_version
;
319 fw_info
.feature
= adev
->gfx
.pfp_feature_version
;
321 case AMDGPU_INFO_FW_GFX_CE
:
322 fw_info
.ver
= adev
->gfx
.ce_fw_version
;
323 fw_info
.feature
= adev
->gfx
.ce_feature_version
;
325 case AMDGPU_INFO_FW_GFX_RLC
:
326 fw_info
.ver
= adev
->gfx
.rlc_fw_version
;
327 fw_info
.feature
= adev
->gfx
.rlc_feature_version
;
329 case AMDGPU_INFO_FW_GFX_MEC
:
330 if (info
->query_fw
.index
== 0) {
331 fw_info
.ver
= adev
->gfx
.mec_fw_version
;
332 fw_info
.feature
= adev
->gfx
.mec_feature_version
;
333 } else if (info
->query_fw
.index
== 1) {
334 fw_info
.ver
= adev
->gfx
.mec2_fw_version
;
335 fw_info
.feature
= adev
->gfx
.mec2_feature_version
;
339 case AMDGPU_INFO_FW_SMC
:
340 fw_info
.ver
= adev
->pm
.fw_version
;
343 case AMDGPU_INFO_FW_SDMA
:
344 if (info
->query_fw
.index
>= adev
->sdma
.num_instances
)
346 fw_info
.ver
= adev
->sdma
.instance
[info
->query_fw
.index
].fw_version
;
347 fw_info
.feature
= adev
->sdma
.instance
[info
->query_fw
.index
].feature_version
;
352 return copy_to_user(out
, &fw_info
,
353 min((size_t)size
, sizeof(fw_info
))) ? -EFAULT
: 0;
355 case AMDGPU_INFO_NUM_BYTES_MOVED
:
356 ui64
= atomic64_read(&adev
->num_bytes_moved
);
357 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
358 case AMDGPU_INFO_VRAM_USAGE
:
359 ui64
= atomic64_read(&adev
->vram_usage
);
360 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
361 case AMDGPU_INFO_VIS_VRAM_USAGE
:
362 ui64
= atomic64_read(&adev
->vram_vis_usage
);
363 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
364 case AMDGPU_INFO_GTT_USAGE
:
365 ui64
= atomic64_read(&adev
->gtt_usage
);
366 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
367 case AMDGPU_INFO_GDS_CONFIG
: {
368 struct drm_amdgpu_info_gds gds_info
;
370 memset(&gds_info
, 0, sizeof(gds_info
));
371 gds_info
.gds_gfx_partition_size
= adev
->gds
.mem
.gfx_partition_size
>> AMDGPU_GDS_SHIFT
;
372 gds_info
.compute_partition_size
= adev
->gds
.mem
.cs_partition_size
>> AMDGPU_GDS_SHIFT
;
373 gds_info
.gds_total_size
= adev
->gds
.mem
.total_size
>> AMDGPU_GDS_SHIFT
;
374 gds_info
.gws_per_gfx_partition
= adev
->gds
.gws
.gfx_partition_size
>> AMDGPU_GWS_SHIFT
;
375 gds_info
.gws_per_compute_partition
= adev
->gds
.gws
.cs_partition_size
>> AMDGPU_GWS_SHIFT
;
376 gds_info
.oa_per_gfx_partition
= adev
->gds
.oa
.gfx_partition_size
>> AMDGPU_OA_SHIFT
;
377 gds_info
.oa_per_compute_partition
= adev
->gds
.oa
.cs_partition_size
>> AMDGPU_OA_SHIFT
;
378 return copy_to_user(out
, &gds_info
,
379 min((size_t)size
, sizeof(gds_info
))) ? -EFAULT
: 0;
381 case AMDGPU_INFO_VRAM_GTT
: {
382 struct drm_amdgpu_info_vram_gtt vram_gtt
;
384 vram_gtt
.vram_size
= adev
->mc
.real_vram_size
;
385 vram_gtt
.vram_cpu_accessible_size
= adev
->mc
.visible_vram_size
;
386 vram_gtt
.vram_cpu_accessible_size
-= adev
->vram_pin_size
;
387 vram_gtt
.gtt_size
= adev
->mc
.gtt_size
;
388 vram_gtt
.gtt_size
-= adev
->gart_pin_size
;
389 return copy_to_user(out
, &vram_gtt
,
390 min((size_t)size
, sizeof(vram_gtt
))) ? -EFAULT
: 0;
392 case AMDGPU_INFO_READ_MMR_REG
: {
393 unsigned n
, alloc_size
;
395 unsigned se_num
= (info
->read_mmr_reg
.instance
>>
396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT
) &
397 AMDGPU_INFO_MMR_SE_INDEX_MASK
;
398 unsigned sh_num
= (info
->read_mmr_reg
.instance
>>
399 AMDGPU_INFO_MMR_SH_INDEX_SHIFT
) &
400 AMDGPU_INFO_MMR_SH_INDEX_MASK
;
402 /* set full masks if the userspace set all bits
403 * in the bitfields */
404 if (se_num
== AMDGPU_INFO_MMR_SE_INDEX_MASK
)
406 if (sh_num
== AMDGPU_INFO_MMR_SH_INDEX_MASK
)
409 regs
= kmalloc_array(info
->read_mmr_reg
.count
, sizeof(*regs
), GFP_KERNEL
);
412 alloc_size
= info
->read_mmr_reg
.count
* sizeof(*regs
);
414 for (i
= 0; i
< info
->read_mmr_reg
.count
; i
++)
415 if (amdgpu_asic_read_register(adev
, se_num
, sh_num
,
416 info
->read_mmr_reg
.dword_offset
+ i
,
418 DRM_DEBUG_KMS("unallowed offset %#x\n",
419 info
->read_mmr_reg
.dword_offset
+ i
);
423 n
= copy_to_user(out
, regs
, min(size
, alloc_size
));
425 return n
? -EFAULT
: 0;
427 case AMDGPU_INFO_DEV_INFO
: {
428 struct drm_amdgpu_info_device dev_info
= {};
429 struct amdgpu_cu_info cu_info
;
431 dev_info
.device_id
= dev
->pdev
->device
;
432 dev_info
.chip_rev
= adev
->rev_id
;
433 dev_info
.external_rev
= adev
->external_rev_id
;
434 dev_info
.pci_rev
= dev
->pdev
->revision
;
435 dev_info
.family
= adev
->family
;
436 dev_info
.num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
437 dev_info
.num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
438 /* return all clocks in KHz */
439 dev_info
.gpu_counter_freq
= amdgpu_asic_get_xclk(adev
) * 10;
440 if (adev
->pm
.dpm_enabled
) {
441 dev_info
.max_engine_clock
=
442 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
* 10;
443 dev_info
.max_memory_clock
=
444 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
* 10;
446 dev_info
.max_engine_clock
= adev
->pm
.default_sclk
* 10;
447 dev_info
.max_memory_clock
= adev
->pm
.default_mclk
* 10;
449 dev_info
.enabled_rb_pipes_mask
= adev
->gfx
.config
.backend_enable_mask
;
450 dev_info
.num_rb_pipes
= adev
->gfx
.config
.max_backends_per_se
*
451 adev
->gfx
.config
.max_shader_engines
;
452 dev_info
.num_hw_gfx_contexts
= adev
->gfx
.config
.max_hw_contexts
;
454 dev_info
.ids_flags
= 0;
455 if (adev
->flags
& AMD_IS_APU
)
456 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_FUSION
;
457 dev_info
.virtual_address_offset
= AMDGPU_VA_RESERVED_SIZE
;
458 dev_info
.virtual_address_max
= (uint64_t)adev
->vm_manager
.max_pfn
* AMDGPU_GPU_PAGE_SIZE
;
459 dev_info
.virtual_address_alignment
= max((int)PAGE_SIZE
, AMDGPU_GPU_PAGE_SIZE
);
460 dev_info
.pte_fragment_size
= (1 << AMDGPU_LOG2_PAGES_PER_FRAG
) *
461 AMDGPU_GPU_PAGE_SIZE
;
462 dev_info
.gart_page_size
= AMDGPU_GPU_PAGE_SIZE
;
464 amdgpu_asic_get_cu_info(adev
, &cu_info
);
465 dev_info
.cu_active_number
= cu_info
.number
;
466 dev_info
.cu_ao_mask
= cu_info
.ao_cu_mask
;
467 dev_info
.ce_ram_size
= adev
->gfx
.ce_ram_size
;
468 memcpy(&dev_info
.cu_bitmap
[0], &cu_info
.bitmap
[0], sizeof(cu_info
.bitmap
));
469 dev_info
.vram_type
= adev
->mc
.vram_type
;
470 dev_info
.vram_bit_width
= adev
->mc
.vram_width
;
471 dev_info
.vce_harvest_config
= adev
->vce
.harvest_config
;
473 return copy_to_user(out
, &dev_info
,
474 min((size_t)size
, sizeof(dev_info
))) ? -EFAULT
: 0;
477 DRM_DEBUG_KMS("Invalid request %d\n", info
->query
);
485 * Outdated mess for old drm with Xorg being in charge (void function now).
488 * amdgpu_driver_lastclose_kms - drm callback for last close
490 * @dev: drm dev pointer
492 * Switch vga_switcheroo state after last close (all asics).
494 void amdgpu_driver_lastclose_kms(struct drm_device
*dev
)
496 struct amdgpu_device
*adev
= dev
->dev_private
;
498 amdgpu_fbdev_restore_mode(adev
);
499 vga_switcheroo_process_delayed_switch();
503 * amdgpu_driver_open_kms - drm callback for open
505 * @dev: drm dev pointer
506 * @file_priv: drm file
508 * On device open, init vm on cayman+ (all asics).
509 * Returns 0 on success, error on failure.
511 int amdgpu_driver_open_kms(struct drm_device
*dev
, struct drm_file
*file_priv
)
513 struct amdgpu_device
*adev
= dev
->dev_private
;
514 struct amdgpu_fpriv
*fpriv
;
517 file_priv
->driver_priv
= NULL
;
519 r
= pm_runtime_get_sync(dev
->dev
);
523 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
524 if (unlikely(!fpriv
))
527 r
= amdgpu_vm_init(adev
, &fpriv
->vm
);
531 mutex_init(&fpriv
->bo_list_lock
);
532 idr_init(&fpriv
->bo_list_handles
);
534 amdgpu_ctx_mgr_init(&fpriv
->ctx_mgr
);
536 file_priv
->driver_priv
= fpriv
;
538 pm_runtime_mark_last_busy(dev
->dev
);
539 pm_runtime_put_autosuspend(dev
->dev
);
549 * amdgpu_driver_postclose_kms - drm callback for post close
551 * @dev: drm dev pointer
552 * @file_priv: drm file
554 * On device post close, tear down vm on cayman+ (all asics).
556 void amdgpu_driver_postclose_kms(struct drm_device
*dev
,
557 struct drm_file
*file_priv
)
559 struct amdgpu_device
*adev
= dev
->dev_private
;
560 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
561 struct amdgpu_bo_list
*list
;
567 amdgpu_ctx_mgr_fini(&fpriv
->ctx_mgr
);
569 amdgpu_vm_fini(adev
, &fpriv
->vm
);
571 idr_for_each_entry(&fpriv
->bo_list_handles
, list
, handle
)
572 amdgpu_bo_list_free(list
);
574 idr_destroy(&fpriv
->bo_list_handles
);
575 mutex_destroy(&fpriv
->bo_list_lock
);
578 file_priv
->driver_priv
= NULL
;
582 * amdgpu_driver_preclose_kms - drm callback for pre close
584 * @dev: drm dev pointer
585 * @file_priv: drm file
587 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
590 void amdgpu_driver_preclose_kms(struct drm_device
*dev
,
591 struct drm_file
*file_priv
)
593 struct amdgpu_device
*adev
= dev
->dev_private
;
595 amdgpu_uvd_free_handles(adev
, file_priv
);
596 amdgpu_vce_free_handles(adev
, file_priv
);
600 * VBlank related functions.
603 * amdgpu_get_vblank_counter_kms - get frame count
605 * @dev: drm dev pointer
606 * @pipe: crtc to get the frame count from
608 * Gets the frame count on the requested crtc (all asics).
609 * Returns frame count on success, -EINVAL on failure.
611 u32
amdgpu_get_vblank_counter_kms(struct drm_device
*dev
, unsigned int pipe
)
613 struct amdgpu_device
*adev
= dev
->dev_private
;
615 if (pipe
>= adev
->mode_info
.num_crtc
) {
616 DRM_ERROR("Invalid crtc %u\n", pipe
);
620 return amdgpu_display_vblank_get_counter(adev
, pipe
);
624 * amdgpu_enable_vblank_kms - enable vblank interrupt
626 * @dev: drm dev pointer
627 * @pipe: crtc to enable vblank interrupt for
629 * Enable the interrupt on the requested crtc (all asics).
630 * Returns 0 on success, -EINVAL on failure.
632 int amdgpu_enable_vblank_kms(struct drm_device
*dev
, unsigned int pipe
)
634 struct amdgpu_device
*adev
= dev
->dev_private
;
635 int idx
= amdgpu_crtc_idx_to_irq_type(adev
, pipe
);
637 return amdgpu_irq_get(adev
, &adev
->crtc_irq
, idx
);
641 * amdgpu_disable_vblank_kms - disable vblank interrupt
643 * @dev: drm dev pointer
644 * @pipe: crtc to disable vblank interrupt for
646 * Disable the interrupt on the requested crtc (all asics).
648 void amdgpu_disable_vblank_kms(struct drm_device
*dev
, unsigned int pipe
)
650 struct amdgpu_device
*adev
= dev
->dev_private
;
651 int idx
= amdgpu_crtc_idx_to_irq_type(adev
, pipe
);
653 amdgpu_irq_put(adev
, &adev
->crtc_irq
, idx
);
657 * amdgpu_get_vblank_timestamp_kms - get vblank timestamp
659 * @dev: drm dev pointer
660 * @crtc: crtc to get the timestamp for
661 * @max_error: max error
662 * @vblank_time: time value
663 * @flags: flags passed to the driver
665 * Gets the timestamp on the requested crtc based on the
666 * scanout position. (all asics).
667 * Returns postive status flags on success, negative error on failure.
669 int amdgpu_get_vblank_timestamp_kms(struct drm_device
*dev
, unsigned int pipe
,
671 struct timeval
*vblank_time
,
674 struct drm_crtc
*crtc
;
675 struct amdgpu_device
*adev
= dev
->dev_private
;
677 if (pipe
>= dev
->num_crtcs
) {
678 DRM_ERROR("Invalid crtc %u\n", pipe
);
682 /* Get associated drm_crtc: */
683 crtc
= &adev
->mode_info
.crtcs
[pipe
]->base
;
685 /* Helper routine in DRM core does all the work: */
686 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
691 const struct drm_ioctl_desc amdgpu_ioctls_kms
[] = {
692 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE
, amdgpu_gem_create_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
693 DRM_IOCTL_DEF_DRV(AMDGPU_CTX
, amdgpu_ctx_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
694 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST
, amdgpu_bo_list_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
696 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP
, amdgpu_gem_mmap_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
697 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE
, amdgpu_gem_wait_idle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
698 DRM_IOCTL_DEF_DRV(AMDGPU_CS
, amdgpu_cs_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
699 DRM_IOCTL_DEF_DRV(AMDGPU_INFO
, amdgpu_info_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
700 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS
, amdgpu_cs_wait_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
701 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA
, amdgpu_gem_metadata_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
702 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA
, amdgpu_gem_va_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
703 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP
, amdgpu_gem_op_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
704 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR
, amdgpu_gem_userptr_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
706 int amdgpu_max_kms_ioctl
= ARRAY_SIZE(amdgpu_ioctls_kms
);