2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
49 #include "ivsrcid/ivsrcid_vislands30.h"
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/firmware.h>
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_edid.h>
66 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
67 #include "ivsrcid/irqsrcs_dcn_1_0.h"
69 #include "dcn/dcn_1_0_offset.h"
70 #include "dcn/dcn_1_0_sh_mask.h"
71 #include "soc15_hw_ip.h"
72 #include "vega10_ip_offset.h"
74 #include "soc15_common.h"
77 #include "modules/inc/mod_freesync.h"
78 #include "modules/power/power_helpers.h"
79 #include "modules/inc/mod_info_packet.h"
81 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
82 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
87 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
88 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
89 * requests into DC requests, and DC responses into DRM responses.
91 * The root control structure is &struct amdgpu_display_manager.
94 /* basic init/fini API */
95 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
96 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
99 * initializes drm_device display related structures, based on the information
100 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
101 * drm_encoder, drm_mode_config
103 * Returns 0 on success
105 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
106 /* removes and deallocates the drm structures, created by the above function */
107 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
110 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
112 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
113 struct drm_plane
*plane
,
114 unsigned long possible_crtcs
);
115 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
116 struct drm_plane
*plane
,
117 uint32_t link_index
);
118 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
119 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
121 struct amdgpu_encoder
*amdgpu_encoder
);
122 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
123 struct amdgpu_encoder
*aencoder
,
124 uint32_t link_index
);
126 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
128 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
129 struct drm_atomic_state
*state
,
132 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
134 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
135 struct drm_atomic_state
*state
);
137 static void handle_cursor_update(struct drm_plane
*plane
,
138 struct drm_plane_state
*old_plane_state
);
142 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
143 DRM_PLANE_TYPE_PRIMARY
,
144 DRM_PLANE_TYPE_PRIMARY
,
145 DRM_PLANE_TYPE_PRIMARY
,
146 DRM_PLANE_TYPE_PRIMARY
,
147 DRM_PLANE_TYPE_PRIMARY
,
148 DRM_PLANE_TYPE_PRIMARY
,
151 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
152 DRM_PLANE_TYPE_PRIMARY
,
153 DRM_PLANE_TYPE_PRIMARY
,
154 DRM_PLANE_TYPE_PRIMARY
,
155 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
158 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
159 DRM_PLANE_TYPE_PRIMARY
,
160 DRM_PLANE_TYPE_PRIMARY
,
161 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
165 * dm_vblank_get_counter
168 * Get counter for number of vertical blanks
171 * struct amdgpu_device *adev - [in] desired amdgpu device
172 * int disp_idx - [in] which CRTC to get the counter from
175 * Counter for vertical blanks
177 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
179 if (crtc
>= adev
->mode_info
.num_crtc
)
182 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
183 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
187 if (acrtc_state
->stream
== NULL
) {
188 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
193 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
197 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
198 u32
*vbl
, u32
*position
)
200 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
202 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
205 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
206 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
209 if (acrtc_state
->stream
== NULL
) {
210 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
216 * TODO rework base driver to use values directly.
217 * for now parse it back into reg-format
219 dc_stream_get_scanoutpos(acrtc_state
->stream
,
225 *position
= v_position
| (h_position
<< 16);
226 *vbl
= v_blank_start
| (v_blank_end
<< 16);
232 static bool dm_is_idle(void *handle
)
238 static int dm_wait_for_idle(void *handle
)
244 static bool dm_check_soft_reset(void *handle
)
249 static int dm_soft_reset(void *handle
)
255 static struct amdgpu_crtc
*
256 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
259 struct drm_device
*dev
= adev
->ddev
;
260 struct drm_crtc
*crtc
;
261 struct amdgpu_crtc
*amdgpu_crtc
;
263 if (otg_inst
== -1) {
265 return adev
->mode_info
.crtcs
[0];
268 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
269 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
271 if (amdgpu_crtc
->otg_inst
== otg_inst
)
278 static void dm_pflip_high_irq(void *interrupt_params
)
280 struct amdgpu_crtc
*amdgpu_crtc
;
281 struct common_irq_params
*irq_params
= interrupt_params
;
282 struct amdgpu_device
*adev
= irq_params
->adev
;
285 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
287 /* IRQ could occur when in initial stage */
288 /* TODO work and BO cleanup */
289 if (amdgpu_crtc
== NULL
) {
290 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
294 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
296 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
297 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
298 amdgpu_crtc
->pflip_status
,
299 AMDGPU_FLIP_SUBMITTED
,
300 amdgpu_crtc
->crtc_id
,
302 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
307 /* wake up userspace */
308 if (amdgpu_crtc
->event
) {
309 /* Update to correct count(s) if racing with vblank irq */
310 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
312 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
314 /* page flip completed. clean up */
315 amdgpu_crtc
->event
= NULL
;
320 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
321 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
323 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
324 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
326 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
329 static void dm_crtc_high_irq(void *interrupt_params
)
331 struct common_irq_params
*irq_params
= interrupt_params
;
332 struct amdgpu_device
*adev
= irq_params
->adev
;
333 struct amdgpu_crtc
*acrtc
;
334 struct dm_crtc_state
*acrtc_state
;
336 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
339 drm_crtc_handle_vblank(&acrtc
->base
);
340 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
342 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
344 if (acrtc_state
->stream
&&
345 acrtc_state
->vrr_params
.supported
&&
346 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
347 mod_freesync_handle_v_update(
348 adev
->dm
.freesync_module
,
350 &acrtc_state
->vrr_params
);
352 dc_stream_adjust_vmin_vmax(
355 &acrtc_state
->vrr_params
.adjust
);
360 static int dm_set_clockgating_state(void *handle
,
361 enum amd_clockgating_state state
)
366 static int dm_set_powergating_state(void *handle
,
367 enum amd_powergating_state state
)
372 /* Prototypes of private functions */
373 static int dm_early_init(void* handle
);
375 /* Allocate memory for FBC compressed data */
376 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
378 struct drm_device
*dev
= connector
->dev
;
379 struct amdgpu_device
*adev
= dev
->dev_private
;
380 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
381 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
382 struct drm_display_mode
*mode
;
383 unsigned long max_size
= 0;
385 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
388 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
391 if (compressor
->bo_ptr
)
395 list_for_each_entry(mode
, &connector
->modes
, head
) {
396 if (max_size
< mode
->htotal
* mode
->vtotal
)
397 max_size
= mode
->htotal
* mode
->vtotal
;
401 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
402 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
403 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
406 DRM_ERROR("DM: Failed to initialize FBC\n");
408 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
409 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
416 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
418 struct dc_init_data init_data
;
419 adev
->dm
.ddev
= adev
->ddev
;
420 adev
->dm
.adev
= adev
;
422 /* Zero all the fields */
423 memset(&init_data
, 0, sizeof(init_data
));
425 mutex_init(&adev
->dm
.dc_lock
);
427 if(amdgpu_dm_irq_init(adev
)) {
428 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
432 init_data
.asic_id
.chip_family
= adev
->family
;
434 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
435 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
437 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
438 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
439 init_data
.asic_id
.atombios_base_address
=
440 adev
->mode_info
.atom_context
->bios
;
442 init_data
.driver
= adev
;
444 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
446 if (!adev
->dm
.cgs_device
) {
447 DRM_ERROR("amdgpu: failed to create cgs device.\n");
451 init_data
.cgs_device
= adev
->dm
.cgs_device
;
453 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
456 * TODO debug why this doesn't work on Raven
458 if (adev
->flags
& AMD_IS_APU
&&
459 adev
->asic_type
>= CHIP_CARRIZO
&&
460 adev
->asic_type
< CHIP_RAVEN
)
461 init_data
.flags
.gpu_vm_support
= true;
463 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
464 init_data
.flags
.fbc_support
= true;
466 /* Display Core create. */
467 adev
->dm
.dc
= dc_create(&init_data
);
470 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
472 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
476 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
477 if (!adev
->dm
.freesync_module
) {
479 "amdgpu: failed to initialize freesync_module.\n");
481 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
482 adev
->dm
.freesync_module
);
484 amdgpu_dm_init_color_mod();
486 if (amdgpu_dm_initialize_drm_device(adev
)) {
488 "amdgpu: failed to initialize sw for display support.\n");
492 /* Update the actual used number of crtc */
493 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
495 /* TODO: Add_display_info? */
497 /* TODO use dynamic cursor width */
498 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
499 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
501 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
503 "amdgpu: failed to initialize sw for display support.\n");
507 #if defined(CONFIG_DEBUG_FS)
508 if (dtn_debugfs_init(adev
))
509 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
512 DRM_DEBUG_DRIVER("KMS initialized.\n");
516 amdgpu_dm_fini(adev
);
521 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
523 amdgpu_dm_destroy_drm_device(&adev
->dm
);
525 * TODO: pageflip, vlank interrupt
527 * amdgpu_dm_irq_fini(adev);
530 if (adev
->dm
.cgs_device
) {
531 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
532 adev
->dm
.cgs_device
= NULL
;
534 if (adev
->dm
.freesync_module
) {
535 mod_freesync_destroy(adev
->dm
.freesync_module
);
536 adev
->dm
.freesync_module
= NULL
;
538 /* DC Destroy TODO: Replace destroy DAL */
540 dc_destroy(&adev
->dm
.dc
);
542 mutex_destroy(&adev
->dm
.dc_lock
);
547 static int load_dmcu_fw(struct amdgpu_device
*adev
)
549 const char *fw_name_dmcu
;
551 const struct dmcu_firmware_header_v1_0
*hdr
;
553 switch(adev
->asic_type
) {
572 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
575 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
579 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
580 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
584 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
586 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
587 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
588 adev
->dm
.fw_dmcu
= NULL
;
592 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
597 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
599 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
601 release_firmware(adev
->dm
.fw_dmcu
);
602 adev
->dm
.fw_dmcu
= NULL
;
606 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
607 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
608 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
609 adev
->firmware
.fw_size
+=
610 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
612 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
613 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
614 adev
->firmware
.fw_size
+=
615 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
617 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
619 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
624 static int dm_sw_init(void *handle
)
626 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
628 return load_dmcu_fw(adev
);
631 static int dm_sw_fini(void *handle
)
633 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
635 if(adev
->dm
.fw_dmcu
) {
636 release_firmware(adev
->dm
.fw_dmcu
);
637 adev
->dm
.fw_dmcu
= NULL
;
643 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
645 struct amdgpu_dm_connector
*aconnector
;
646 struct drm_connector
*connector
;
649 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
651 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
652 aconnector
= to_amdgpu_dm_connector(connector
);
653 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
654 aconnector
->mst_mgr
.aux
) {
655 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
656 aconnector
, aconnector
->base
.base
.id
);
658 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
660 DRM_ERROR("DM_MST: Failed to start MST\n");
661 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
667 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
671 static int dm_late_init(void *handle
)
673 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
675 struct dmcu_iram_parameters params
;
676 unsigned int linear_lut
[16];
678 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
681 for (i
= 0; i
< 16; i
++)
682 linear_lut
[i
] = 0xFFFF * i
/ 15;
685 params
.backlight_ramping_start
= 0xCCCC;
686 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
687 params
.backlight_lut_array_size
= 16;
688 params
.backlight_lut_array
= linear_lut
;
690 ret
= dmcu_load_iram(dmcu
, params
);
695 return detect_mst_link_for_all_connectors(adev
->ddev
);
698 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
700 struct amdgpu_dm_connector
*aconnector
;
701 struct drm_connector
*connector
;
703 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
705 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
706 aconnector
= to_amdgpu_dm_connector(connector
);
707 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
708 !aconnector
->mst_port
) {
711 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
713 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
717 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
721 * dm_hw_init() - Initialize DC device
722 * @handle: The base driver device containing the amdpgu_dm device.
724 * Initialize the &struct amdgpu_display_manager device. This involves calling
725 * the initializers of each DM component, then populating the struct with them.
727 * Although the function implies hardware initialization, both hardware and
728 * software are initialized here. Splitting them out to their relevant init
729 * hooks is a future TODO item.
731 * Some notable things that are initialized here:
733 * - Display Core, both software and hardware
734 * - DC modules that we need (freesync and color management)
735 * - DRM software states
736 * - Interrupt sources and handlers
738 * - Debug FS entries, if enabled
740 static int dm_hw_init(void *handle
)
742 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
743 /* Create DAL display manager */
744 amdgpu_dm_init(adev
);
745 amdgpu_dm_hpd_init(adev
);
751 * dm_hw_fini() - Teardown DC device
752 * @handle: The base driver device containing the amdpgu_dm device.
754 * Teardown components within &struct amdgpu_display_manager that require
755 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
756 * were loaded. Also flush IRQ workqueues and disable them.
758 static int dm_hw_fini(void *handle
)
760 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
762 amdgpu_dm_hpd_fini(adev
);
764 amdgpu_dm_irq_fini(adev
);
765 amdgpu_dm_fini(adev
);
769 static int dm_suspend(void *handle
)
771 struct amdgpu_device
*adev
= handle
;
772 struct amdgpu_display_manager
*dm
= &adev
->dm
;
775 s3_handle_mst(adev
->ddev
, true);
777 amdgpu_dm_irq_suspend(adev
);
779 WARN_ON(adev
->dm
.cached_state
);
780 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
782 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
787 static struct amdgpu_dm_connector
*
788 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
789 struct drm_crtc
*crtc
)
792 struct drm_connector_state
*new_con_state
;
793 struct drm_connector
*connector
;
794 struct drm_crtc
*crtc_from_state
;
796 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
797 crtc_from_state
= new_con_state
->crtc
;
799 if (crtc_from_state
== crtc
)
800 return to_amdgpu_dm_connector(connector
);
806 static void emulated_link_detect(struct dc_link
*link
)
808 struct dc_sink_init_data sink_init_data
= { 0 };
809 struct display_sink_capability sink_caps
= { 0 };
810 enum dc_edid_status edid_status
;
811 struct dc_context
*dc_ctx
= link
->ctx
;
812 struct dc_sink
*sink
= NULL
;
813 struct dc_sink
*prev_sink
= NULL
;
815 link
->type
= dc_connection_none
;
816 prev_sink
= link
->local_sink
;
818 if (prev_sink
!= NULL
)
819 dc_sink_retain(prev_sink
);
821 switch (link
->connector_signal
) {
822 case SIGNAL_TYPE_HDMI_TYPE_A
: {
823 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
824 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
828 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
829 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
830 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
834 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
835 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
836 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
840 case SIGNAL_TYPE_LVDS
: {
841 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
842 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
846 case SIGNAL_TYPE_EDP
: {
847 sink_caps
.transaction_type
=
848 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
849 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
853 case SIGNAL_TYPE_DISPLAY_PORT
: {
854 sink_caps
.transaction_type
=
855 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
856 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
861 DC_ERROR("Invalid connector type! signal:%d\n",
862 link
->connector_signal
);
866 sink_init_data
.link
= link
;
867 sink_init_data
.sink_signal
= sink_caps
.signal
;
869 sink
= dc_sink_create(&sink_init_data
);
871 DC_ERROR("Failed to create sink!\n");
875 link
->local_sink
= sink
;
877 edid_status
= dm_helpers_read_local_edid(
882 if (edid_status
!= EDID_OK
)
883 DC_ERROR("Failed to read EDID");
887 static int dm_resume(void *handle
)
889 struct amdgpu_device
*adev
= handle
;
890 struct drm_device
*ddev
= adev
->ddev
;
891 struct amdgpu_display_manager
*dm
= &adev
->dm
;
892 struct amdgpu_dm_connector
*aconnector
;
893 struct drm_connector
*connector
;
894 struct drm_crtc
*crtc
;
895 struct drm_crtc_state
*new_crtc_state
;
896 struct dm_crtc_state
*dm_new_crtc_state
;
897 struct drm_plane
*plane
;
898 struct drm_plane_state
*new_plane_state
;
899 struct dm_plane_state
*dm_new_plane_state
;
900 enum dc_connection_type new_connection_type
= dc_connection_none
;
904 /* power on hardware */
905 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
907 /* program HPD filter */
910 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
911 s3_handle_mst(ddev
, false);
914 * early enable HPD Rx IRQ, should be done before set mode as short
915 * pulse interrupts are used for MST
917 amdgpu_dm_irq_resume_early(adev
);
920 list_for_each_entry(connector
, &ddev
->mode_config
.connector_list
, head
) {
921 aconnector
= to_amdgpu_dm_connector(connector
);
924 * this is the case when traversing through already created
925 * MST connectors, should be skipped
927 if (aconnector
->mst_port
)
930 mutex_lock(&aconnector
->hpd_lock
);
931 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
932 DRM_ERROR("KMS: Failed to detect connector\n");
934 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
935 emulated_link_detect(aconnector
->dc_link
);
937 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
939 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
940 aconnector
->fake_enable
= false;
942 aconnector
->dc_sink
= NULL
;
943 amdgpu_dm_update_connector_after_detect(aconnector
);
944 mutex_unlock(&aconnector
->hpd_lock
);
947 /* Force mode set in atomic commit */
948 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
949 new_crtc_state
->active_changed
= true;
952 * atomic_check is expected to create the dc states. We need to release
953 * them here, since they were duplicated as part of the suspend
956 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
957 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
958 if (dm_new_crtc_state
->stream
) {
959 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
960 dc_stream_release(dm_new_crtc_state
->stream
);
961 dm_new_crtc_state
->stream
= NULL
;
965 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
966 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
967 if (dm_new_plane_state
->dc_state
) {
968 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
969 dc_plane_state_release(dm_new_plane_state
->dc_state
);
970 dm_new_plane_state
->dc_state
= NULL
;
974 ret
= drm_atomic_helper_resume(ddev
, dm
->cached_state
);
976 dm
->cached_state
= NULL
;
978 amdgpu_dm_irq_resume_late(adev
);
986 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
987 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
988 * the base driver's device list to be initialized and torn down accordingly.
990 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
993 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
995 .early_init
= dm_early_init
,
996 .late_init
= dm_late_init
,
997 .sw_init
= dm_sw_init
,
998 .sw_fini
= dm_sw_fini
,
999 .hw_init
= dm_hw_init
,
1000 .hw_fini
= dm_hw_fini
,
1001 .suspend
= dm_suspend
,
1002 .resume
= dm_resume
,
1003 .is_idle
= dm_is_idle
,
1004 .wait_for_idle
= dm_wait_for_idle
,
1005 .check_soft_reset
= dm_check_soft_reset
,
1006 .soft_reset
= dm_soft_reset
,
1007 .set_clockgating_state
= dm_set_clockgating_state
,
1008 .set_powergating_state
= dm_set_powergating_state
,
1011 const struct amdgpu_ip_block_version dm_ip_block
=
1013 .type
= AMD_IP_BLOCK_TYPE_DCE
,
1017 .funcs
= &amdgpu_dm_funcs
,
1027 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
1028 .fb_create
= amdgpu_display_user_framebuffer_create
,
1029 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1030 .atomic_check
= amdgpu_dm_atomic_check
,
1031 .atomic_commit
= amdgpu_dm_atomic_commit
,
1034 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
1035 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
1039 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
1041 struct drm_connector
*connector
= &aconnector
->base
;
1042 struct drm_device
*dev
= connector
->dev
;
1043 struct dc_sink
*sink
;
1045 /* MST handled by drm_mst framework */
1046 if (aconnector
->mst_mgr
.mst_state
== true)
1050 sink
= aconnector
->dc_link
->local_sink
;
1053 * Edid mgmt connector gets first update only in mode_valid hook and then
1054 * the connector sink is set to either fake or physical sink depends on link status.
1055 * Skip if already done during boot.
1057 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1058 && aconnector
->dc_em_sink
) {
1061 * For S3 resume with headless use eml_sink to fake stream
1062 * because on resume connector->sink is set to NULL
1064 mutex_lock(&dev
->mode_config
.mutex
);
1067 if (aconnector
->dc_sink
) {
1068 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1070 * retain and release below are used to
1071 * bump up refcount for sink because the link doesn't point
1072 * to it anymore after disconnect, so on next crtc to connector
1073 * reshuffle by UMD we will get into unwanted dc_sink release
1075 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
1076 dc_sink_release(aconnector
->dc_sink
);
1078 aconnector
->dc_sink
= sink
;
1079 amdgpu_dm_update_freesync_caps(connector
,
1082 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1083 if (!aconnector
->dc_sink
)
1084 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1085 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
1086 dc_sink_retain(aconnector
->dc_sink
);
1089 mutex_unlock(&dev
->mode_config
.mutex
);
1094 * TODO: temporary guard to look for proper fix
1095 * if this sink is MST sink, we should not do anything
1097 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
1100 if (aconnector
->dc_sink
== sink
) {
1102 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1105 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1106 aconnector
->connector_id
);
1110 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1111 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1113 mutex_lock(&dev
->mode_config
.mutex
);
1116 * 1. Update status of the drm connector
1117 * 2. Send an event and let userspace tell us what to do
1121 * TODO: check if we still need the S3 mode update workaround.
1122 * If yes, put it here.
1124 if (aconnector
->dc_sink
)
1125 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1127 aconnector
->dc_sink
= sink
;
1128 if (sink
->dc_edid
.length
== 0) {
1129 aconnector
->edid
= NULL
;
1130 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1133 (struct edid
*) sink
->dc_edid
.raw_edid
;
1136 drm_connector_update_edid_property(connector
,
1138 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
1141 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
1144 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1145 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1146 drm_connector_update_edid_property(connector
, NULL
);
1147 aconnector
->num_modes
= 0;
1148 aconnector
->dc_sink
= NULL
;
1149 aconnector
->edid
= NULL
;
1152 mutex_unlock(&dev
->mode_config
.mutex
);
1155 static void handle_hpd_irq(void *param
)
1157 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1158 struct drm_connector
*connector
= &aconnector
->base
;
1159 struct drm_device
*dev
= connector
->dev
;
1160 enum dc_connection_type new_connection_type
= dc_connection_none
;
1163 * In case of failure or MST no need to update connector status or notify the OS
1164 * since (for MST case) MST does this in its own context.
1166 mutex_lock(&aconnector
->hpd_lock
);
1168 if (aconnector
->fake_enable
)
1169 aconnector
->fake_enable
= false;
1171 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1172 DRM_ERROR("KMS: Failed to detect connector\n");
1174 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1175 emulated_link_detect(aconnector
->dc_link
);
1178 drm_modeset_lock_all(dev
);
1179 dm_restore_drm_connector_state(dev
, connector
);
1180 drm_modeset_unlock_all(dev
);
1182 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1183 drm_kms_helper_hotplug_event(dev
);
1185 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
1186 amdgpu_dm_update_connector_after_detect(aconnector
);
1189 drm_modeset_lock_all(dev
);
1190 dm_restore_drm_connector_state(dev
, connector
);
1191 drm_modeset_unlock_all(dev
);
1193 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1194 drm_kms_helper_hotplug_event(dev
);
1196 mutex_unlock(&aconnector
->hpd_lock
);
1200 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
1202 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
1204 bool new_irq_handled
= false;
1206 int dpcd_bytes_to_read
;
1208 const int max_process_count
= 30;
1209 int process_count
= 0;
1211 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
1213 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
1214 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
1215 /* DPCD 0x200 - 0x201 for downstream IRQ */
1216 dpcd_addr
= DP_SINK_COUNT
;
1218 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
1219 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1220 dpcd_addr
= DP_SINK_COUNT_ESI
;
1223 dret
= drm_dp_dpcd_read(
1224 &aconnector
->dm_dp_aux
.aux
,
1227 dpcd_bytes_to_read
);
1229 while (dret
== dpcd_bytes_to_read
&&
1230 process_count
< max_process_count
) {
1236 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
1237 /* handle HPD short pulse irq */
1238 if (aconnector
->mst_mgr
.mst_state
)
1240 &aconnector
->mst_mgr
,
1244 if (new_irq_handled
) {
1245 /* ACK at DPCD to notify down stream */
1246 const int ack_dpcd_bytes_to_write
=
1247 dpcd_bytes_to_read
- 1;
1249 for (retry
= 0; retry
< 3; retry
++) {
1252 wret
= drm_dp_dpcd_write(
1253 &aconnector
->dm_dp_aux
.aux
,
1256 ack_dpcd_bytes_to_write
);
1257 if (wret
== ack_dpcd_bytes_to_write
)
1261 /* check if there is new irq to be handled */
1262 dret
= drm_dp_dpcd_read(
1263 &aconnector
->dm_dp_aux
.aux
,
1266 dpcd_bytes_to_read
);
1268 new_irq_handled
= false;
1274 if (process_count
== max_process_count
)
1275 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1278 static void handle_hpd_rx_irq(void *param
)
1280 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1281 struct drm_connector
*connector
= &aconnector
->base
;
1282 struct drm_device
*dev
= connector
->dev
;
1283 struct dc_link
*dc_link
= aconnector
->dc_link
;
1284 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
1285 enum dc_connection_type new_connection_type
= dc_connection_none
;
1288 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1289 * conflict, after implement i2c helper, this mutex should be
1292 if (dc_link
->type
!= dc_connection_mst_branch
)
1293 mutex_lock(&aconnector
->hpd_lock
);
1295 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
1296 !is_mst_root_connector
) {
1297 /* Downstream Port status changed. */
1298 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
1299 DRM_ERROR("KMS: Failed to detect connector\n");
1301 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1302 emulated_link_detect(dc_link
);
1304 if (aconnector
->fake_enable
)
1305 aconnector
->fake_enable
= false;
1307 amdgpu_dm_update_connector_after_detect(aconnector
);
1310 drm_modeset_lock_all(dev
);
1311 dm_restore_drm_connector_state(dev
, connector
);
1312 drm_modeset_unlock_all(dev
);
1314 drm_kms_helper_hotplug_event(dev
);
1315 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1317 if (aconnector
->fake_enable
)
1318 aconnector
->fake_enable
= false;
1320 amdgpu_dm_update_connector_after_detect(aconnector
);
1323 drm_modeset_lock_all(dev
);
1324 dm_restore_drm_connector_state(dev
, connector
);
1325 drm_modeset_unlock_all(dev
);
1327 drm_kms_helper_hotplug_event(dev
);
1330 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1331 (dc_link
->type
== dc_connection_mst_branch
))
1332 dm_handle_hpd_rx_irq(aconnector
);
1334 if (dc_link
->type
!= dc_connection_mst_branch
) {
1335 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
1336 mutex_unlock(&aconnector
->hpd_lock
);
1340 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1342 struct drm_device
*dev
= adev
->ddev
;
1343 struct drm_connector
*connector
;
1344 struct amdgpu_dm_connector
*aconnector
;
1345 const struct dc_link
*dc_link
;
1346 struct dc_interrupt_params int_params
= {0};
1348 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1349 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1351 list_for_each_entry(connector
,
1352 &dev
->mode_config
.connector_list
, head
) {
1354 aconnector
= to_amdgpu_dm_connector(connector
);
1355 dc_link
= aconnector
->dc_link
;
1357 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1358 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1359 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1361 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1363 (void *) aconnector
);
1366 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1368 /* Also register for DP short pulse (hpd_rx). */
1369 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1370 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1372 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1374 (void *) aconnector
);
1379 /* Register IRQ sources and initialize IRQ callbacks */
1380 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1382 struct dc
*dc
= adev
->dm
.dc
;
1383 struct common_irq_params
*c_irq_params
;
1384 struct dc_interrupt_params int_params
= {0};
1387 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
1389 if (adev
->asic_type
== CHIP_VEGA10
||
1390 adev
->asic_type
== CHIP_VEGA12
||
1391 adev
->asic_type
== CHIP_VEGA20
||
1392 adev
->asic_type
== CHIP_RAVEN
)
1393 client_id
= SOC15_IH_CLIENTID_DCE
;
1395 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1396 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1399 * Actions of amdgpu_irq_add_id():
1400 * 1. Register a set() function with base driver.
1401 * Base driver will call set() function to enable/disable an
1402 * interrupt in DC hardware.
1403 * 2. Register amdgpu_dm_irq_handler().
1404 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1405 * coming from DC hardware.
1406 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1407 * for acknowledging and handling. */
1409 /* Use VBLANK interrupt */
1410 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1411 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1413 DRM_ERROR("Failed to add crtc irq id!\n");
1417 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1418 int_params
.irq_source
=
1419 dc_interrupt_to_irq_source(dc
, i
, 0);
1421 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1423 c_irq_params
->adev
= adev
;
1424 c_irq_params
->irq_src
= int_params
.irq_source
;
1426 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1427 dm_crtc_high_irq
, c_irq_params
);
1430 /* Use GRPH_PFLIP interrupt */
1431 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1432 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1433 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1435 DRM_ERROR("Failed to add page flip irq id!\n");
1439 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1440 int_params
.irq_source
=
1441 dc_interrupt_to_irq_source(dc
, i
, 0);
1443 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1445 c_irq_params
->adev
= adev
;
1446 c_irq_params
->irq_src
= int_params
.irq_source
;
1448 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1449 dm_pflip_high_irq
, c_irq_params
);
1454 r
= amdgpu_irq_add_id(adev
, client_id
,
1455 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1457 DRM_ERROR("Failed to add hpd irq id!\n");
1461 register_hpd_handlers(adev
);
1466 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1467 /* Register IRQ sources and initialize IRQ callbacks */
1468 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1470 struct dc
*dc
= adev
->dm
.dc
;
1471 struct common_irq_params
*c_irq_params
;
1472 struct dc_interrupt_params int_params
= {0};
1476 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1477 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1480 * Actions of amdgpu_irq_add_id():
1481 * 1. Register a set() function with base driver.
1482 * Base driver will call set() function to enable/disable an
1483 * interrupt in DC hardware.
1484 * 2. Register amdgpu_dm_irq_handler().
1485 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1486 * coming from DC hardware.
1487 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1488 * for acknowledging and handling.
1491 /* Use VSTARTUP interrupt */
1492 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1493 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1495 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1498 DRM_ERROR("Failed to add crtc irq id!\n");
1502 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1503 int_params
.irq_source
=
1504 dc_interrupt_to_irq_source(dc
, i
, 0);
1506 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1508 c_irq_params
->adev
= adev
;
1509 c_irq_params
->irq_src
= int_params
.irq_source
;
1511 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1512 dm_crtc_high_irq
, c_irq_params
);
1515 /* Use GRPH_PFLIP interrupt */
1516 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1517 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1519 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1521 DRM_ERROR("Failed to add page flip irq id!\n");
1525 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1526 int_params
.irq_source
=
1527 dc_interrupt_to_irq_source(dc
, i
, 0);
1529 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1531 c_irq_params
->adev
= adev
;
1532 c_irq_params
->irq_src
= int_params
.irq_source
;
1534 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1535 dm_pflip_high_irq
, c_irq_params
);
1540 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1543 DRM_ERROR("Failed to add hpd irq id!\n");
1547 register_hpd_handlers(adev
);
1554 * Acquires the lock for the atomic state object and returns
1555 * the new atomic state.
1557 * This should only be called during atomic check.
1559 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
1560 struct dm_atomic_state
**dm_state
)
1562 struct drm_device
*dev
= state
->dev
;
1563 struct amdgpu_device
*adev
= dev
->dev_private
;
1564 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1565 struct drm_private_state
*priv_state
;
1571 ret
= drm_modeset_lock(&dm
->atomic_obj_lock
, state
->acquire_ctx
);
1575 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
1576 if (IS_ERR(priv_state
))
1577 return PTR_ERR(priv_state
);
1579 *dm_state
= to_dm_atomic_state(priv_state
);
1584 struct dm_atomic_state
*
1585 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
1587 struct drm_device
*dev
= state
->dev
;
1588 struct amdgpu_device
*adev
= dev
->dev_private
;
1589 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1590 struct drm_private_obj
*obj
;
1591 struct drm_private_state
*new_obj_state
;
1594 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
1595 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
1596 return to_dm_atomic_state(new_obj_state
);
1602 struct dm_atomic_state
*
1603 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
1605 struct drm_device
*dev
= state
->dev
;
1606 struct amdgpu_device
*adev
= dev
->dev_private
;
1607 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1608 struct drm_private_obj
*obj
;
1609 struct drm_private_state
*old_obj_state
;
1612 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
1613 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
1614 return to_dm_atomic_state(old_obj_state
);
1620 static struct drm_private_state
*
1621 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
1623 struct dm_atomic_state
*old_state
, *new_state
;
1625 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
1629 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
1631 new_state
->context
= dc_create_state();
1632 if (!new_state
->context
) {
1637 old_state
= to_dm_atomic_state(obj
->state
);
1638 if (old_state
&& old_state
->context
)
1639 dc_resource_state_copy_construct(old_state
->context
,
1640 new_state
->context
);
1642 return &new_state
->base
;
1645 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
1646 struct drm_private_state
*state
)
1648 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
1650 if (dm_state
&& dm_state
->context
)
1651 dc_release_state(dm_state
->context
);
1656 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
1657 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
1658 .atomic_destroy_state
= dm_atomic_destroy_state
,
1661 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1663 struct dm_atomic_state
*state
;
1666 adev
->mode_info
.mode_config_initialized
= true;
1668 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1669 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1671 adev
->ddev
->mode_config
.max_width
= 16384;
1672 adev
->ddev
->mode_config
.max_height
= 16384;
1674 adev
->ddev
->mode_config
.preferred_depth
= 24;
1675 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1676 /* indicates support for immediate flip */
1677 adev
->ddev
->mode_config
.async_page_flip
= true;
1679 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
1681 drm_modeset_lock_init(&adev
->dm
.atomic_obj_lock
);
1683 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
1687 state
->context
= dc_create_state();
1688 if (!state
->context
) {
1693 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
1695 drm_atomic_private_obj_init(adev
->ddev
,
1696 &adev
->dm
.atomic_obj
,
1698 &dm_atomic_state_funcs
);
1700 r
= amdgpu_display_modeset_create_props(adev
);
1707 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1708 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1710 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1711 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1713 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
1715 #if defined(CONFIG_ACPI)
1716 struct amdgpu_dm_backlight_caps caps
;
1718 if (dm
->backlight_caps
.caps_valid
)
1721 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
1722 if (caps
.caps_valid
) {
1723 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
1724 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
1725 dm
->backlight_caps
.caps_valid
= true;
1727 dm
->backlight_caps
.min_input_signal
=
1728 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
1729 dm
->backlight_caps
.max_input_signal
=
1730 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
1733 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
1734 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
1738 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1740 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1741 struct amdgpu_dm_backlight_caps caps
;
1742 uint32_t brightness
= bd
->props
.brightness
;
1744 amdgpu_dm_update_backlight_caps(dm
);
1745 caps
= dm
->backlight_caps
;
1747 * The brightness input is in the range 0-255
1748 * It needs to be rescaled to be between the
1749 * requested min and max input signal
1751 * It also needs to be scaled up by 0x101 to
1752 * match the DC interface which has a range of
1758 * (caps
.max_input_signal
- caps
.min_input_signal
)
1759 / AMDGPU_MAX_BL_LEVEL
1760 + caps
.min_input_signal
* 0x101;
1762 if (dc_link_set_backlight_level(dm
->backlight_link
,
1769 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1771 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1772 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
1774 if (ret
== DC_ERROR_UNEXPECTED
)
1775 return bd
->props
.brightness
;
1779 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1780 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1781 .update_status
= amdgpu_dm_backlight_update_status
,
1785 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1788 struct backlight_properties props
= { 0 };
1790 amdgpu_dm_update_backlight_caps(dm
);
1792 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1793 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
1794 props
.type
= BACKLIGHT_RAW
;
1796 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1797 dm
->adev
->ddev
->primary
->index
);
1799 dm
->backlight_dev
= backlight_device_register(bl_name
,
1800 dm
->adev
->ddev
->dev
,
1802 &amdgpu_dm_backlight_ops
,
1805 if (IS_ERR(dm
->backlight_dev
))
1806 DRM_ERROR("DM: Backlight registration failed!\n");
1808 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1813 static int initialize_plane(struct amdgpu_display_manager
*dm
,
1814 struct amdgpu_mode_info
*mode_info
,
1817 struct drm_plane
*plane
;
1818 unsigned long possible_crtcs
;
1821 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
1822 mode_info
->planes
[plane_id
] = plane
;
1825 DRM_ERROR("KMS: Failed to allocate plane\n");
1828 plane
->type
= mode_info
->plane_type
[plane_id
];
1831 * HACK: IGT tests expect that each plane can only have
1832 * one possible CRTC. For now, set one CRTC for each
1833 * plane that is not an underlay, but still allow multiple
1834 * CRTCs for underlay planes.
1836 possible_crtcs
= 1 << plane_id
;
1837 if (plane_id
>= dm
->dc
->caps
.max_streams
)
1838 possible_crtcs
= 0xff;
1840 ret
= amdgpu_dm_plane_init(dm
, mode_info
->planes
[plane_id
], possible_crtcs
);
1843 DRM_ERROR("KMS: Failed to initialize plane\n");
1851 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
1852 struct dc_link
*link
)
1854 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1855 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1857 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
1858 link
->type
!= dc_connection_none
) {
1860 * Event if registration failed, we should continue with
1861 * DM initialization because not having a backlight control
1862 * is better then a black screen.
1864 amdgpu_dm_register_backlight_device(dm
);
1866 if (dm
->backlight_dev
)
1867 dm
->backlight_link
= link
;
1874 * In this architecture, the association
1875 * connector -> encoder -> crtc
1876 * id not really requried. The crtc and connector will hold the
1877 * display_index as an abstraction to use with DAL component
1879 * Returns 0 on success
1881 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1883 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1885 struct amdgpu_dm_connector
*aconnector
= NULL
;
1886 struct amdgpu_encoder
*aencoder
= NULL
;
1887 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1889 int32_t total_overlay_planes
, total_primary_planes
;
1890 enum dc_connection_type new_connection_type
= dc_connection_none
;
1892 link_cnt
= dm
->dc
->caps
.max_links
;
1893 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1894 DRM_ERROR("DM: Failed to initialize mode config\n");
1898 /* Identify the number of planes to be initialized */
1899 total_overlay_planes
= dm
->dc
->caps
.max_slave_planes
;
1900 total_primary_planes
= dm
->dc
->caps
.max_planes
- dm
->dc
->caps
.max_slave_planes
;
1902 /* First initialize overlay planes, index starting after primary planes */
1903 for (i
= (total_overlay_planes
- 1); i
>= 0; i
--) {
1904 if (initialize_plane(dm
, mode_info
, (total_primary_planes
+ i
))) {
1905 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1910 /* Initialize primary planes */
1911 for (i
= (total_primary_planes
- 1); i
>= 0; i
--) {
1912 if (initialize_plane(dm
, mode_info
, i
)) {
1913 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1918 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1919 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
1920 DRM_ERROR("KMS: Failed to initialize crtc\n");
1924 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1926 /* loops over all connectors on the board */
1927 for (i
= 0; i
< link_cnt
; i
++) {
1928 struct dc_link
*link
= NULL
;
1930 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1932 "KMS: Cannot support more than %d display indexes\n",
1933 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1937 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1941 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1945 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1946 DRM_ERROR("KMS: Failed to initialize encoder\n");
1950 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1951 DRM_ERROR("KMS: Failed to initialize connector\n");
1955 link
= dc_get_link_at_index(dm
->dc
, i
);
1957 if (!dc_link_detect_sink(link
, &new_connection_type
))
1958 DRM_ERROR("KMS: Failed to detect connector\n");
1960 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1961 emulated_link_detect(link
);
1962 amdgpu_dm_update_connector_after_detect(aconnector
);
1964 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
1965 amdgpu_dm_update_connector_after_detect(aconnector
);
1966 register_backlight_device(dm
, link
);
1972 /* Software is initialized. Now we can register interrupt handlers. */
1973 switch (adev
->asic_type
) {
1983 case CHIP_POLARIS11
:
1984 case CHIP_POLARIS10
:
1985 case CHIP_POLARIS12
:
1990 if (dce110_register_irq_handlers(dm
->adev
)) {
1991 DRM_ERROR("DM: Failed to initialize IRQ\n");
1995 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1997 if (dcn10_register_irq_handlers(dm
->adev
)) {
1998 DRM_ERROR("DM: Failed to initialize IRQ\n");
2004 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2008 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
2009 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
2015 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
2016 kfree(mode_info
->planes
[i
]);
2020 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
2022 drm_mode_config_cleanup(dm
->ddev
);
2023 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
2027 /******************************************************************************
2028 * amdgpu_display_funcs functions
2029 *****************************************************************************/
2032 * dm_bandwidth_update - program display watermarks
2034 * @adev: amdgpu_device pointer
2036 * Calculate and program the display watermarks and line buffer allocation.
2038 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
2040 /* TODO: implement later */
2043 static const struct amdgpu_display_funcs dm_display_funcs
= {
2044 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
2045 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
2046 .backlight_set_level
= NULL
, /* never called for DC */
2047 .backlight_get_level
= NULL
, /* never called for DC */
2048 .hpd_sense
= NULL
,/* called unconditionally */
2049 .hpd_set_polarity
= NULL
, /* called unconditionally */
2050 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
2051 .page_flip_get_scanoutpos
=
2052 dm_crtc_get_scanoutpos
,/* called unconditionally */
2053 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
2054 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
2057 #if defined(CONFIG_DEBUG_KERNEL_DC)
2059 static ssize_t
s3_debug_store(struct device
*device
,
2060 struct device_attribute
*attr
,
2066 struct pci_dev
*pdev
= to_pci_dev(device
);
2067 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
2068 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
2070 ret
= kstrtoint(buf
, 0, &s3_state
);
2075 drm_kms_helper_hotplug_event(adev
->ddev
);
2080 return ret
== 0 ? count
: 0;
2083 DEVICE_ATTR_WO(s3_debug
);
2087 static int dm_early_init(void *handle
)
2089 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2091 switch (adev
->asic_type
) {
2094 adev
->mode_info
.num_crtc
= 6;
2095 adev
->mode_info
.num_hpd
= 6;
2096 adev
->mode_info
.num_dig
= 6;
2097 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2100 adev
->mode_info
.num_crtc
= 4;
2101 adev
->mode_info
.num_hpd
= 6;
2102 adev
->mode_info
.num_dig
= 7;
2103 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2107 adev
->mode_info
.num_crtc
= 2;
2108 adev
->mode_info
.num_hpd
= 6;
2109 adev
->mode_info
.num_dig
= 6;
2110 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2114 adev
->mode_info
.num_crtc
= 6;
2115 adev
->mode_info
.num_hpd
= 6;
2116 adev
->mode_info
.num_dig
= 7;
2117 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2120 adev
->mode_info
.num_crtc
= 3;
2121 adev
->mode_info
.num_hpd
= 6;
2122 adev
->mode_info
.num_dig
= 9;
2123 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
2126 adev
->mode_info
.num_crtc
= 2;
2127 adev
->mode_info
.num_hpd
= 6;
2128 adev
->mode_info
.num_dig
= 9;
2129 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
2131 case CHIP_POLARIS11
:
2132 case CHIP_POLARIS12
:
2133 adev
->mode_info
.num_crtc
= 5;
2134 adev
->mode_info
.num_hpd
= 5;
2135 adev
->mode_info
.num_dig
= 5;
2136 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2138 case CHIP_POLARIS10
:
2140 adev
->mode_info
.num_crtc
= 6;
2141 adev
->mode_info
.num_hpd
= 6;
2142 adev
->mode_info
.num_dig
= 6;
2143 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2148 adev
->mode_info
.num_crtc
= 6;
2149 adev
->mode_info
.num_hpd
= 6;
2150 adev
->mode_info
.num_dig
= 6;
2151 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2153 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2155 adev
->mode_info
.num_crtc
= 4;
2156 adev
->mode_info
.num_hpd
= 4;
2157 adev
->mode_info
.num_dig
= 4;
2158 adev
->mode_info
.plane_type
= dm_plane_type_default
;
2162 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2166 amdgpu_dm_set_irq_funcs(adev
);
2168 if (adev
->mode_info
.funcs
== NULL
)
2169 adev
->mode_info
.funcs
= &dm_display_funcs
;
2172 * Note: Do NOT change adev->audio_endpt_rreg and
2173 * adev->audio_endpt_wreg because they are initialised in
2174 * amdgpu_device_init()
2176 #if defined(CONFIG_DEBUG_KERNEL_DC)
2179 &dev_attr_s3_debug
);
2185 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
2186 struct dc_stream_state
*new_stream
,
2187 struct dc_stream_state
*old_stream
)
2189 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
2192 if (!crtc_state
->enable
)
2195 return crtc_state
->active
;
2198 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
2200 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
2203 return !crtc_state
->enable
|| !crtc_state
->active
;
2206 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
2208 drm_encoder_cleanup(encoder
);
2212 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
2213 .destroy
= amdgpu_dm_encoder_destroy
,
2216 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
2217 struct dc_plane_state
*plane_state
)
2219 plane_state
->src_rect
.x
= state
->src_x
>> 16;
2220 plane_state
->src_rect
.y
= state
->src_y
>> 16;
2221 /* we ignore the mantissa for now and do not deal with floating pixels :( */
2222 plane_state
->src_rect
.width
= state
->src_w
>> 16;
2224 if (plane_state
->src_rect
.width
== 0)
2227 plane_state
->src_rect
.height
= state
->src_h
>> 16;
2228 if (plane_state
->src_rect
.height
== 0)
2231 plane_state
->dst_rect
.x
= state
->crtc_x
;
2232 plane_state
->dst_rect
.y
= state
->crtc_y
;
2234 if (state
->crtc_w
== 0)
2237 plane_state
->dst_rect
.width
= state
->crtc_w
;
2239 if (state
->crtc_h
== 0)
2242 plane_state
->dst_rect
.height
= state
->crtc_h
;
2244 plane_state
->clip_rect
= plane_state
->dst_rect
;
2246 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
2247 case DRM_MODE_ROTATE_0
:
2248 plane_state
->rotation
= ROTATION_ANGLE_0
;
2250 case DRM_MODE_ROTATE_90
:
2251 plane_state
->rotation
= ROTATION_ANGLE_90
;
2253 case DRM_MODE_ROTATE_180
:
2254 plane_state
->rotation
= ROTATION_ANGLE_180
;
2256 case DRM_MODE_ROTATE_270
:
2257 plane_state
->rotation
= ROTATION_ANGLE_270
;
2260 plane_state
->rotation
= ROTATION_ANGLE_0
;
2266 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
2267 uint64_t *tiling_flags
)
2269 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
2270 int r
= amdgpu_bo_reserve(rbo
, false);
2273 /* Don't show error message when returning -ERESTARTSYS */
2274 if (r
!= -ERESTARTSYS
)
2275 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
2280 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
2282 amdgpu_bo_unreserve(rbo
);
2287 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
2289 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
2291 return offset
? (address
+ offset
* 256) : 0;
2294 static bool fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
2295 const struct amdgpu_framebuffer
*afb
,
2296 struct dc_plane_state
*plane_state
,
2299 struct dc
*dc
= adev
->dm
.dc
;
2300 struct dc_dcc_surface_param input
;
2301 struct dc_surface_dcc_cap output
;
2302 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
2303 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
2304 uint64_t dcc_address
;
2306 memset(&input
, 0, sizeof(input
));
2307 memset(&output
, 0, sizeof(output
));
2312 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
2315 input
.format
= plane_state
->format
;
2316 input
.surface_size
.width
=
2317 plane_state
->plane_size
.grph
.surface_size
.width
;
2318 input
.surface_size
.height
=
2319 plane_state
->plane_size
.grph
.surface_size
.height
;
2320 input
.swizzle_mode
= plane_state
->tiling_info
.gfx9
.swizzle
;
2322 if (plane_state
->rotation
== ROTATION_ANGLE_0
||
2323 plane_state
->rotation
== ROTATION_ANGLE_180
)
2324 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
2325 else if (plane_state
->rotation
== ROTATION_ANGLE_90
||
2326 plane_state
->rotation
== ROTATION_ANGLE_270
)
2327 input
.scan
= SCAN_DIRECTION_VERTICAL
;
2329 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
2332 if (!output
.capable
)
2335 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
2338 plane_state
->dcc
.enable
= 1;
2339 plane_state
->dcc
.grph
.meta_pitch
=
2340 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
2341 plane_state
->dcc
.grph
.independent_64b_blks
= i64b
;
2343 dcc_address
= get_dcc_address(afb
->address
, info
);
2344 plane_state
->address
.grph
.meta_addr
.low_part
=
2345 lower_32_bits(dcc_address
);
2346 plane_state
->address
.grph
.meta_addr
.high_part
=
2347 upper_32_bits(dcc_address
);
2352 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
2353 struct dc_plane_state
*plane_state
,
2354 const struct amdgpu_framebuffer
*amdgpu_fb
)
2356 uint64_t tiling_flags
;
2357 unsigned int awidth
;
2358 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
2360 struct drm_format_name_buf format_name
;
2369 switch (fb
->format
->format
) {
2371 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
2373 case DRM_FORMAT_RGB565
:
2374 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
2376 case DRM_FORMAT_XRGB8888
:
2377 case DRM_FORMAT_ARGB8888
:
2378 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
2380 case DRM_FORMAT_XRGB2101010
:
2381 case DRM_FORMAT_ARGB2101010
:
2382 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
2384 case DRM_FORMAT_XBGR2101010
:
2385 case DRM_FORMAT_ABGR2101010
:
2386 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
2388 case DRM_FORMAT_XBGR8888
:
2389 case DRM_FORMAT_ABGR8888
:
2390 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
2392 case DRM_FORMAT_NV21
:
2393 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
2395 case DRM_FORMAT_NV12
:
2396 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
2399 DRM_ERROR("Unsupported screen format %s\n",
2400 drm_get_format_name(fb
->format
->format
, &format_name
));
2404 memset(&plane_state
->address
, 0, sizeof(plane_state
->address
));
2405 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
2406 memset(&plane_state
->dcc
, 0, sizeof(plane_state
->dcc
));
2408 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2409 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
2410 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
2411 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
2412 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
2413 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
2414 plane_state
->plane_size
.grph
.surface_pitch
=
2415 fb
->pitches
[0] / fb
->format
->cpp
[0];
2416 /* TODO: unhardcode */
2417 plane_state
->color_space
= COLOR_SPACE_SRGB
;
2420 awidth
= ALIGN(fb
->width
, 64);
2421 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
2422 plane_state
->plane_size
.video
.luma_size
.x
= 0;
2423 plane_state
->plane_size
.video
.luma_size
.y
= 0;
2424 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
2425 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
2426 /* TODO: unhardcode */
2427 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
2429 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
2430 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
2431 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
2432 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
2433 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
2435 /* TODO: unhardcode */
2436 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
2439 /* Fill GFX8 params */
2440 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
2441 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
2443 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2444 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2445 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2446 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
2447 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2449 /* XXX fix me for VI */
2450 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
2451 plane_state
->tiling_info
.gfx8
.array_mode
=
2452 DC_ARRAY_2D_TILED_THIN1
;
2453 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
2454 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
2455 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
2456 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
2457 plane_state
->tiling_info
.gfx8
.tile_mode
=
2458 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
2459 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
2460 == DC_ARRAY_1D_TILED_THIN1
) {
2461 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
2464 plane_state
->tiling_info
.gfx8
.pipe_config
=
2465 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2467 if (adev
->asic_type
== CHIP_VEGA10
||
2468 adev
->asic_type
== CHIP_VEGA12
||
2469 adev
->asic_type
== CHIP_VEGA20
||
2470 adev
->asic_type
== CHIP_RAVEN
) {
2471 /* Fill GFX9 params */
2472 plane_state
->tiling_info
.gfx9
.num_pipes
=
2473 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
2474 plane_state
->tiling_info
.gfx9
.num_banks
=
2475 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
2476 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
2477 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
2478 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
2479 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
2480 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
2481 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
2482 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
2483 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
2484 plane_state
->tiling_info
.gfx9
.swizzle
=
2485 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
2486 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
2488 fill_plane_dcc_attributes(adev
, amdgpu_fb
, plane_state
,
2492 plane_state
->visible
= true;
2493 plane_state
->scaling_quality
.h_taps_c
= 0;
2494 plane_state
->scaling_quality
.v_taps_c
= 0;
2496 /* is this needed? is plane_state zeroed at allocation? */
2497 plane_state
->scaling_quality
.h_taps
= 0;
2498 plane_state
->scaling_quality
.v_taps
= 0;
2499 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
2505 static int fill_plane_attributes(struct amdgpu_device
*adev
,
2506 struct dc_plane_state
*dc_plane_state
,
2507 struct drm_plane_state
*plane_state
,
2508 struct drm_crtc_state
*crtc_state
)
2510 const struct amdgpu_framebuffer
*amdgpu_fb
=
2511 to_amdgpu_framebuffer(plane_state
->fb
);
2512 const struct drm_crtc
*crtc
= plane_state
->crtc
;
2515 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
2518 ret
= fill_plane_attributes_from_fb(
2519 crtc
->dev
->dev_private
,
2527 * Always set input transfer function, since plane state is refreshed
2530 ret
= amdgpu_dm_set_degamma_lut(crtc_state
, dc_plane_state
);
2532 dc_transfer_func_release(dc_plane_state
->in_transfer_func
);
2533 dc_plane_state
->in_transfer_func
= NULL
;
2539 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
2540 const struct dm_connector_state
*dm_state
,
2541 struct dc_stream_state
*stream
)
2543 enum amdgpu_rmx_type rmx_type
;
2545 struct rect src
= { 0 }; /* viewport in composition space*/
2546 struct rect dst
= { 0 }; /* stream addressable area */
2548 /* no mode. nothing to be done */
2552 /* Full screen scaling by default */
2553 src
.width
= mode
->hdisplay
;
2554 src
.height
= mode
->vdisplay
;
2555 dst
.width
= stream
->timing
.h_addressable
;
2556 dst
.height
= stream
->timing
.v_addressable
;
2559 rmx_type
= dm_state
->scaling
;
2560 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2561 if (src
.width
* dst
.height
<
2562 src
.height
* dst
.width
) {
2563 /* height needs less upscaling/more downscaling */
2564 dst
.width
= src
.width
*
2565 dst
.height
/ src
.height
;
2567 /* width needs less upscaling/more downscaling */
2568 dst
.height
= src
.height
*
2569 dst
.width
/ src
.width
;
2571 } else if (rmx_type
== RMX_CENTER
) {
2575 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2576 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2578 if (dm_state
->underscan_enable
) {
2579 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2580 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2581 dst
.width
-= dm_state
->underscan_hborder
;
2582 dst
.height
-= dm_state
->underscan_vborder
;
2589 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2590 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2594 static enum dc_color_depth
2595 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2597 struct dm_connector_state
*dm_conn_state
=
2598 to_dm_connector_state(connector
->state
);
2599 uint32_t bpc
= connector
->display_info
.bpc
;
2601 /* TODO: Remove this when there's support for max_bpc in drm */
2602 if (dm_conn_state
&& bpc
> dm_conn_state
->max_bpc
)
2603 /* Round down to nearest even number. */
2604 bpc
= dm_conn_state
->max_bpc
- (dm_conn_state
->max_bpc
& 1);
2609 * Temporary Work around, DRM doesn't parse color depth for
2610 * EDID revision before 1.4
2611 * TODO: Fix edid parsing
2613 return COLOR_DEPTH_888
;
2615 return COLOR_DEPTH_666
;
2617 return COLOR_DEPTH_888
;
2619 return COLOR_DEPTH_101010
;
2621 return COLOR_DEPTH_121212
;
2623 return COLOR_DEPTH_141414
;
2625 return COLOR_DEPTH_161616
;
2627 return COLOR_DEPTH_UNDEFINED
;
2631 static enum dc_aspect_ratio
2632 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2634 /* 1-1 mapping, since both enums follow the HDMI spec. */
2635 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
2638 static enum dc_color_space
2639 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2641 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2643 switch (dc_crtc_timing
->pixel_encoding
) {
2644 case PIXEL_ENCODING_YCBCR422
:
2645 case PIXEL_ENCODING_YCBCR444
:
2646 case PIXEL_ENCODING_YCBCR420
:
2649 * 27030khz is the separation point between HDTV and SDTV
2650 * according to HDMI spec, we use YCbCr709 and YCbCr601
2653 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
2654 if (dc_crtc_timing
->flags
.Y_ONLY
)
2656 COLOR_SPACE_YCBCR709_LIMITED
;
2658 color_space
= COLOR_SPACE_YCBCR709
;
2660 if (dc_crtc_timing
->flags
.Y_ONLY
)
2662 COLOR_SPACE_YCBCR601_LIMITED
;
2664 color_space
= COLOR_SPACE_YCBCR601
;
2669 case PIXEL_ENCODING_RGB
:
2670 color_space
= COLOR_SPACE_SRGB
;
2681 static void reduce_mode_colour_depth(struct dc_crtc_timing
*timing_out
)
2683 if (timing_out
->display_color_depth
<= COLOR_DEPTH_888
)
2686 timing_out
->display_color_depth
--;
2689 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing
*timing_out
,
2690 const struct drm_display_info
*info
)
2693 if (timing_out
->display_color_depth
<= COLOR_DEPTH_888
)
2696 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
2697 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2698 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
2699 normalized_clk
/= 2;
2700 /* Adjusting pix clock following on HDMI spec based on colour depth */
2701 switch (timing_out
->display_color_depth
) {
2702 case COLOR_DEPTH_101010
:
2703 normalized_clk
= (normalized_clk
* 30) / 24;
2705 case COLOR_DEPTH_121212
:
2706 normalized_clk
= (normalized_clk
* 36) / 24;
2708 case COLOR_DEPTH_161616
:
2709 normalized_clk
= (normalized_clk
* 48) / 24;
2714 if (normalized_clk
<= info
->max_tmds_clock
)
2716 reduce_mode_colour_depth(timing_out
);
2718 } while (timing_out
->display_color_depth
> COLOR_DEPTH_888
);
2723 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2724 const struct drm_display_mode
*mode_in
,
2725 const struct drm_connector
*connector
,
2726 const struct dc_stream_state
*old_stream
)
2728 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2729 const struct drm_display_info
*info
= &connector
->display_info
;
2731 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2733 timing_out
->h_border_left
= 0;
2734 timing_out
->h_border_right
= 0;
2735 timing_out
->v_border_top
= 0;
2736 timing_out
->v_border_bottom
= 0;
2737 /* TODO: un-hardcode */
2738 if (drm_mode_is_420_only(info
, mode_in
)
2739 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2740 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
2741 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2742 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2743 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2745 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2747 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2748 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2750 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2751 timing_out
->hdmi_vic
= 0;
2754 timing_out
->vic
= old_stream
->timing
.vic
;
2755 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
2756 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
2758 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2759 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2760 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2761 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2762 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2765 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2766 timing_out
->h_total
= mode_in
->crtc_htotal
;
2767 timing_out
->h_sync_width
=
2768 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2769 timing_out
->h_front_porch
=
2770 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2771 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2772 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2773 timing_out
->v_front_porch
=
2774 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2775 timing_out
->v_sync_width
=
2776 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2777 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
2778 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2780 stream
->output_color_space
= get_output_color_space(timing_out
);
2782 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
2783 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
2784 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2785 adjust_colour_depth_from_display_info(timing_out
, info
);
2788 static void fill_audio_info(struct audio_info
*audio_info
,
2789 const struct drm_connector
*drm_connector
,
2790 const struct dc_sink
*dc_sink
)
2793 int cea_revision
= 0;
2794 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2796 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2797 audio_info
->product_id
= edid_caps
->product_id
;
2799 cea_revision
= drm_connector
->display_info
.cea_rev
;
2801 strscpy(audio_info
->display_name
,
2802 edid_caps
->display_name
,
2803 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
2805 if (cea_revision
>= 3) {
2806 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2808 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2809 audio_info
->modes
[i
].format_code
=
2810 (enum audio_format_code
)
2811 (edid_caps
->audio_modes
[i
].format_code
);
2812 audio_info
->modes
[i
].channel_count
=
2813 edid_caps
->audio_modes
[i
].channel_count
;
2814 audio_info
->modes
[i
].sample_rates
.all
=
2815 edid_caps
->audio_modes
[i
].sample_rate
;
2816 audio_info
->modes
[i
].sample_size
=
2817 edid_caps
->audio_modes
[i
].sample_size
;
2821 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2823 /* TODO: We only check for the progressive mode, check for interlace mode too */
2824 if (drm_connector
->latency_present
[0]) {
2825 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2826 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2829 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2834 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2835 struct drm_display_mode
*dst_mode
)
2837 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2838 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2839 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2840 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2841 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2842 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2843 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2844 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2845 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2846 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2847 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2848 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2849 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2850 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2854 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2855 const struct drm_display_mode
*native_mode
,
2858 if (scale_enabled
) {
2859 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2860 } else if (native_mode
->clock
== drm_mode
->clock
&&
2861 native_mode
->htotal
== drm_mode
->htotal
&&
2862 native_mode
->vtotal
== drm_mode
->vtotal
) {
2863 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2865 /* no scaling nor amdgpu inserted, no need to patch */
2869 static struct dc_sink
*
2870 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2872 struct dc_sink_init_data sink_init_data
= { 0 };
2873 struct dc_sink
*sink
= NULL
;
2874 sink_init_data
.link
= aconnector
->dc_link
;
2875 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2877 sink
= dc_sink_create(&sink_init_data
);
2879 DRM_ERROR("Failed to create sink!\n");
2882 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2887 static void set_multisync_trigger_params(
2888 struct dc_stream_state
*stream
)
2890 if (stream
->triggered_crtc_reset
.enabled
) {
2891 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
2892 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
2896 static void set_master_stream(struct dc_stream_state
*stream_set
[],
2899 int j
, highest_rfr
= 0, master_stream
= 0;
2901 for (j
= 0; j
< stream_count
; j
++) {
2902 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
2903 int refresh_rate
= 0;
2905 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
2906 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
2907 if (refresh_rate
> highest_rfr
) {
2908 highest_rfr
= refresh_rate
;
2913 for (j
= 0; j
< stream_count
; j
++) {
2915 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
2919 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
2923 if (context
->stream_count
< 2)
2925 for (i
= 0; i
< context
->stream_count
; i
++) {
2926 if (!context
->streams
[i
])
2929 * TODO: add a function to read AMD VSDB bits and set
2930 * crtc_sync_master.multi_sync_enabled flag
2931 * For now it's set to false
2933 set_multisync_trigger_params(context
->streams
[i
]);
2935 set_master_stream(context
->streams
, context
->stream_count
);
2938 static struct dc_stream_state
*
2939 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2940 const struct drm_display_mode
*drm_mode
,
2941 const struct dm_connector_state
*dm_state
,
2942 const struct dc_stream_state
*old_stream
)
2944 struct drm_display_mode
*preferred_mode
= NULL
;
2945 struct drm_connector
*drm_connector
;
2946 struct dc_stream_state
*stream
= NULL
;
2947 struct drm_display_mode mode
= *drm_mode
;
2948 bool native_mode_found
= false;
2949 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
2951 int preferred_refresh
= 0;
2953 struct dc_sink
*sink
= NULL
;
2954 if (aconnector
== NULL
) {
2955 DRM_ERROR("aconnector is NULL!\n");
2959 drm_connector
= &aconnector
->base
;
2961 if (!aconnector
->dc_sink
) {
2962 sink
= create_fake_sink(aconnector
);
2966 sink
= aconnector
->dc_sink
;
2969 stream
= dc_create_stream_for_sink(sink
);
2971 if (stream
== NULL
) {
2972 DRM_ERROR("Failed to create stream for sink!\n");
2976 stream
->dm_stream_context
= aconnector
;
2978 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2979 /* Search for preferred mode */
2980 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2981 native_mode_found
= true;
2985 if (!native_mode_found
)
2986 preferred_mode
= list_first_entry_or_null(
2987 &aconnector
->base
.modes
,
2988 struct drm_display_mode
,
2991 mode_refresh
= drm_mode_vrefresh(&mode
);
2993 if (preferred_mode
== NULL
) {
2995 * This may not be an error, the use case is when we have no
2996 * usermode calls to reset and set mode upon hotplug. In this
2997 * case, we call set mode ourselves to restore the previous mode
2998 * and the modelist may not be filled in in time.
3000 DRM_DEBUG_DRIVER("No preferred mode found\n");
3002 decide_crtc_timing_for_drm_display_mode(
3003 &mode
, preferred_mode
,
3004 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
3005 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
3009 drm_mode_set_crtcinfo(&mode
, 0);
3012 * If scaling is enabled and refresh rate didn't change
3013 * we copy the vic and polarities of the old timings
3015 if (!scale
|| mode_refresh
!= preferred_refresh
)
3016 fill_stream_properties_from_drm_display_mode(stream
,
3017 &mode
, &aconnector
->base
, NULL
);
3019 fill_stream_properties_from_drm_display_mode(stream
,
3020 &mode
, &aconnector
->base
, old_stream
);
3022 update_stream_scaling_settings(&mode
, dm_state
, stream
);
3025 &stream
->audio_info
,
3029 update_stream_signal(stream
, sink
);
3032 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_VIRTUAL
&& aconnector
->base
.force
!= DRM_FORCE_ON
)
3033 dc_sink_release(sink
);
3038 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
3040 drm_crtc_cleanup(crtc
);
3044 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
3045 struct drm_crtc_state
*state
)
3047 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
3049 /* TODO Destroy dc_stream objects are stream object is flattened */
3051 dc_stream_release(cur
->stream
);
3054 __drm_atomic_helper_crtc_destroy_state(state
);
3060 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
3062 struct dm_crtc_state
*state
;
3065 dm_crtc_destroy_state(crtc
, crtc
->state
);
3067 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
3068 if (WARN_ON(!state
))
3071 crtc
->state
= &state
->base
;
3072 crtc
->state
->crtc
= crtc
;
3076 static struct drm_crtc_state
*
3077 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
3079 struct dm_crtc_state
*state
, *cur
;
3081 cur
= to_dm_crtc_state(crtc
->state
);
3083 if (WARN_ON(!crtc
->state
))
3086 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
3090 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
3093 state
->stream
= cur
->stream
;
3094 dc_stream_retain(state
->stream
);
3097 state
->vrr_params
= cur
->vrr_params
;
3098 state
->vrr_infopacket
= cur
->vrr_infopacket
;
3099 state
->abm_level
= cur
->abm_level
;
3100 state
->vrr_supported
= cur
->vrr_supported
;
3101 state
->freesync_config
= cur
->freesync_config
;
3102 state
->crc_enabled
= cur
->crc_enabled
;
3104 /* TODO Duplicate dc_stream after objects are stream object is flattened */
3106 return &state
->base
;
3110 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
3112 enum dc_irq_source irq_source
;
3113 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
3114 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3116 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
3117 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
3120 static int dm_enable_vblank(struct drm_crtc
*crtc
)
3122 return dm_set_vblank(crtc
, true);
3125 static void dm_disable_vblank(struct drm_crtc
*crtc
)
3127 dm_set_vblank(crtc
, false);
3130 /* Implemented only the options currently availible for the driver */
3131 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
3132 .reset
= dm_crtc_reset_state
,
3133 .destroy
= amdgpu_dm_crtc_destroy
,
3134 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
3135 .set_config
= drm_atomic_helper_set_config
,
3136 .page_flip
= drm_atomic_helper_page_flip
,
3137 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
3138 .atomic_destroy_state
= dm_crtc_destroy_state
,
3139 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
3140 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
3141 .enable_vblank
= dm_enable_vblank
,
3142 .disable_vblank
= dm_disable_vblank
,
3145 static enum drm_connector_status
3146 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
3149 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3153 * 1. This interface is NOT called in context of HPD irq.
3154 * 2. This interface *is called* in context of user-mode ioctl. Which
3155 * makes it a bad place for *any* MST-related activity.
3158 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
3159 !aconnector
->fake_enable
)
3160 connected
= (aconnector
->dc_sink
!= NULL
);
3162 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
3164 return (connected
? connector_status_connected
:
3165 connector_status_disconnected
);
3168 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
3169 struct drm_connector_state
*connector_state
,
3170 struct drm_property
*property
,
3173 struct drm_device
*dev
= connector
->dev
;
3174 struct amdgpu_device
*adev
= dev
->dev_private
;
3175 struct dm_connector_state
*dm_old_state
=
3176 to_dm_connector_state(connector
->state
);
3177 struct dm_connector_state
*dm_new_state
=
3178 to_dm_connector_state(connector_state
);
3182 if (property
== dev
->mode_config
.scaling_mode_property
) {
3183 enum amdgpu_rmx_type rmx_type
;
3186 case DRM_MODE_SCALE_CENTER
:
3187 rmx_type
= RMX_CENTER
;
3189 case DRM_MODE_SCALE_ASPECT
:
3190 rmx_type
= RMX_ASPECT
;
3192 case DRM_MODE_SCALE_FULLSCREEN
:
3193 rmx_type
= RMX_FULL
;
3195 case DRM_MODE_SCALE_NONE
:
3201 if (dm_old_state
->scaling
== rmx_type
)
3204 dm_new_state
->scaling
= rmx_type
;
3206 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
3207 dm_new_state
->underscan_hborder
= val
;
3209 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
3210 dm_new_state
->underscan_vborder
= val
;
3212 } else if (property
== adev
->mode_info
.underscan_property
) {
3213 dm_new_state
->underscan_enable
= val
;
3215 } else if (property
== adev
->mode_info
.max_bpc_property
) {
3216 dm_new_state
->max_bpc
= val
;
3218 } else if (property
== adev
->mode_info
.abm_level_property
) {
3219 dm_new_state
->abm_level
= val
;
3226 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
3227 const struct drm_connector_state
*state
,
3228 struct drm_property
*property
,
3231 struct drm_device
*dev
= connector
->dev
;
3232 struct amdgpu_device
*adev
= dev
->dev_private
;
3233 struct dm_connector_state
*dm_state
=
3234 to_dm_connector_state(state
);
3237 if (property
== dev
->mode_config
.scaling_mode_property
) {
3238 switch (dm_state
->scaling
) {
3240 *val
= DRM_MODE_SCALE_CENTER
;
3243 *val
= DRM_MODE_SCALE_ASPECT
;
3246 *val
= DRM_MODE_SCALE_FULLSCREEN
;
3250 *val
= DRM_MODE_SCALE_NONE
;
3254 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
3255 *val
= dm_state
->underscan_hborder
;
3257 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
3258 *val
= dm_state
->underscan_vborder
;
3260 } else if (property
== adev
->mode_info
.underscan_property
) {
3261 *val
= dm_state
->underscan_enable
;
3263 } else if (property
== adev
->mode_info
.max_bpc_property
) {
3264 *val
= dm_state
->max_bpc
;
3266 } else if (property
== adev
->mode_info
.abm_level_property
) {
3267 *val
= dm_state
->abm_level
;
3274 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
3276 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3277 const struct dc_link
*link
= aconnector
->dc_link
;
3278 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
3279 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3281 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3282 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3284 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
3285 link
->type
!= dc_connection_none
&&
3286 dm
->backlight_dev
) {
3287 backlight_device_unregister(dm
->backlight_dev
);
3288 dm
->backlight_dev
= NULL
;
3291 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
3292 drm_connector_unregister(connector
);
3293 drm_connector_cleanup(connector
);
3297 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
3299 struct dm_connector_state
*state
=
3300 to_dm_connector_state(connector
->state
);
3302 if (connector
->state
)
3303 __drm_atomic_helper_connector_destroy_state(connector
->state
);
3307 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
3310 state
->scaling
= RMX_OFF
;
3311 state
->underscan_enable
= false;
3312 state
->underscan_hborder
= 0;
3313 state
->underscan_vborder
= 0;
3316 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
3320 struct drm_connector_state
*
3321 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
3323 struct dm_connector_state
*state
=
3324 to_dm_connector_state(connector
->state
);
3326 struct dm_connector_state
*new_state
=
3327 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
3332 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
3334 new_state
->freesync_capable
= state
->freesync_capable
;
3335 new_state
->abm_level
= state
->abm_level
;
3336 new_state
->scaling
= state
->scaling
;
3337 new_state
->underscan_enable
= state
->underscan_enable
;
3338 new_state
->underscan_hborder
= state
->underscan_hborder
;
3339 new_state
->underscan_vborder
= state
->underscan_vborder
;
3340 new_state
->max_bpc
= state
->max_bpc
;
3342 return &new_state
->base
;
3345 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
3346 .reset
= amdgpu_dm_connector_funcs_reset
,
3347 .detect
= amdgpu_dm_connector_detect
,
3348 .fill_modes
= drm_helper_probe_single_connector_modes
,
3349 .destroy
= amdgpu_dm_connector_destroy
,
3350 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
3351 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
3352 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
3353 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
3356 static int get_modes(struct drm_connector
*connector
)
3358 return amdgpu_dm_connector_get_modes(connector
);
3361 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
3363 struct dc_sink_init_data init_params
= {
3364 .link
= aconnector
->dc_link
,
3365 .sink_signal
= SIGNAL_TYPE_VIRTUAL
3369 if (!aconnector
->base
.edid_blob_ptr
) {
3370 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3371 aconnector
->base
.name
);
3373 aconnector
->base
.force
= DRM_FORCE_OFF
;
3374 aconnector
->base
.override_edid
= false;
3378 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
3380 aconnector
->edid
= edid
;
3382 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
3383 aconnector
->dc_link
,
3385 (edid
->extensions
+ 1) * EDID_LENGTH
,
3388 if (aconnector
->base
.force
== DRM_FORCE_ON
)
3389 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
3390 aconnector
->dc_link
->local_sink
:
3391 aconnector
->dc_em_sink
;
3394 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
3396 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
3399 * In case of headless boot with force on for DP managed connector
3400 * Those settings have to be != 0 to get initial modeset
3402 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
3403 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
3404 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
3408 aconnector
->base
.override_edid
= true;
3409 create_eml_sink(aconnector
);
3412 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
3413 struct drm_display_mode
*mode
)
3415 int result
= MODE_ERROR
;
3416 struct dc_sink
*dc_sink
;
3417 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
3418 /* TODO: Unhardcode stream count */
3419 struct dc_stream_state
*stream
;
3420 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3421 enum dc_status dc_result
= DC_OK
;
3423 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
3424 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
3428 * Only run this the first time mode_valid is called to initilialize
3431 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
3432 !aconnector
->dc_em_sink
)
3433 handle_edid_mgmt(aconnector
);
3435 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
3437 if (dc_sink
== NULL
) {
3438 DRM_ERROR("dc_sink is NULL!\n");
3442 stream
= create_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
3443 if (stream
== NULL
) {
3444 DRM_ERROR("Failed to create stream for sink!\n");
3448 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
3450 if (dc_result
== DC_OK
)
3453 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3459 dc_stream_release(stream
);
3462 /* TODO: error handling*/
3466 static const struct drm_connector_helper_funcs
3467 amdgpu_dm_connector_helper_funcs
= {
3469 * If hotplugging a second bigger display in FB Con mode, bigger resolution
3470 * modes will be filtered by drm_mode_validate_size(), and those modes
3471 * are missing after user start lightdm. So we need to renew modes list.
3472 * in get_modes call back, not just return the modes count
3474 .get_modes
= get_modes
,
3475 .mode_valid
= amdgpu_dm_connector_mode_valid
,
3478 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
3482 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
3483 struct drm_crtc_state
*state
)
3485 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3486 struct dc
*dc
= adev
->dm
.dc
;
3487 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
3490 if (unlikely(!dm_crtc_state
->stream
&&
3491 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
3496 /* In some use cases, like reset, no stream is attached */
3497 if (!dm_crtc_state
->stream
)
3500 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
3506 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
3507 const struct drm_display_mode
*mode
,
3508 struct drm_display_mode
*adjusted_mode
)
3513 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
3514 .disable
= dm_crtc_helper_disable
,
3515 .atomic_check
= dm_crtc_helper_atomic_check
,
3516 .mode_fixup
= dm_crtc_helper_mode_fixup
3519 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
3524 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
3525 struct drm_crtc_state
*crtc_state
,
3526 struct drm_connector_state
*conn_state
)
3531 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
3532 .disable
= dm_encoder_helper_disable
,
3533 .atomic_check
= dm_encoder_helper_atomic_check
3536 static void dm_drm_plane_reset(struct drm_plane
*plane
)
3538 struct dm_plane_state
*amdgpu_state
= NULL
;
3541 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
3543 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
3544 WARN_ON(amdgpu_state
== NULL
);
3547 plane
->state
= &amdgpu_state
->base
;
3548 plane
->state
->plane
= plane
;
3549 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
3553 static struct drm_plane_state
*
3554 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
3556 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
3558 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
3559 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
3560 if (!dm_plane_state
)
3563 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
3565 if (old_dm_plane_state
->dc_state
) {
3566 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
3567 dc_plane_state_retain(dm_plane_state
->dc_state
);
3570 return &dm_plane_state
->base
;
3573 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
3574 struct drm_plane_state
*state
)
3576 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3578 if (dm_plane_state
->dc_state
)
3579 dc_plane_state_release(dm_plane_state
->dc_state
);
3581 drm_atomic_helper_plane_destroy_state(plane
, state
);
3584 static const struct drm_plane_funcs dm_plane_funcs
= {
3585 .update_plane
= drm_atomic_helper_update_plane
,
3586 .disable_plane
= drm_atomic_helper_disable_plane
,
3587 .destroy
= drm_primary_helper_destroy
,
3588 .reset
= dm_drm_plane_reset
,
3589 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
3590 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
3593 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
3594 struct drm_plane_state
*new_state
)
3596 struct amdgpu_framebuffer
*afb
;
3597 struct drm_gem_object
*obj
;
3598 struct amdgpu_device
*adev
;
3599 struct amdgpu_bo
*rbo
;
3600 uint64_t chroma_addr
= 0;
3601 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
3602 uint64_t tiling_flags
, dcc_address
;
3603 unsigned int awidth
;
3607 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
3608 dm_plane_state_new
= to_dm_plane_state(new_state
);
3610 if (!new_state
->fb
) {
3611 DRM_DEBUG_DRIVER("No FB bound\n");
3615 afb
= to_amdgpu_framebuffer(new_state
->fb
);
3616 obj
= new_state
->fb
->obj
[0];
3617 rbo
= gem_to_amdgpu_bo(obj
);
3618 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
3619 r
= amdgpu_bo_reserve(rbo
, false);
3620 if (unlikely(r
!= 0))
3623 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
3624 domain
= amdgpu_display_supported_domains(adev
);
3626 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
3628 r
= amdgpu_bo_pin(rbo
, domain
);
3629 if (unlikely(r
!= 0)) {
3630 if (r
!= -ERESTARTSYS
)
3631 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
3632 amdgpu_bo_unreserve(rbo
);
3636 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
3637 if (unlikely(r
!= 0)) {
3638 amdgpu_bo_unpin(rbo
);
3639 amdgpu_bo_unreserve(rbo
);
3640 DRM_ERROR("%p bind failed\n", rbo
);
3644 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
3646 amdgpu_bo_unreserve(rbo
);
3648 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
3652 if (dm_plane_state_new
->dc_state
&&
3653 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
3654 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
3656 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3657 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3658 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3661 get_dcc_address(afb
->address
, tiling_flags
);
3662 plane_state
->address
.grph
.meta_addr
.low_part
=
3663 lower_32_bits(dcc_address
);
3664 plane_state
->address
.grph
.meta_addr
.high_part
=
3665 upper_32_bits(dcc_address
);
3667 awidth
= ALIGN(new_state
->fb
->width
, 64);
3668 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3669 plane_state
->address
.video_progressive
.luma_addr
.low_part
3670 = lower_32_bits(afb
->address
);
3671 plane_state
->address
.video_progressive
.luma_addr
.high_part
3672 = upper_32_bits(afb
->address
);
3673 chroma_addr
= afb
->address
+ (u64
)awidth
* new_state
->fb
->height
;
3674 plane_state
->address
.video_progressive
.chroma_addr
.low_part
3675 = lower_32_bits(chroma_addr
);
3676 plane_state
->address
.video_progressive
.chroma_addr
.high_part
3677 = upper_32_bits(chroma_addr
);
3684 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
3685 struct drm_plane_state
*old_state
)
3687 struct amdgpu_bo
*rbo
;
3693 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
3694 r
= amdgpu_bo_reserve(rbo
, false);
3696 DRM_ERROR("failed to reserve rbo before unpin\n");
3700 amdgpu_bo_unpin(rbo
);
3701 amdgpu_bo_unreserve(rbo
);
3702 amdgpu_bo_unref(&rbo
);
3705 static int dm_plane_atomic_check(struct drm_plane
*plane
,
3706 struct drm_plane_state
*state
)
3708 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3709 struct dc
*dc
= adev
->dm
.dc
;
3710 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3712 if (!dm_plane_state
->dc_state
)
3715 if (!fill_rects_from_plane_state(state
, dm_plane_state
->dc_state
))
3718 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3724 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
3725 struct drm_plane_state
*new_plane_state
)
3727 struct drm_plane_state
*old_plane_state
=
3728 drm_atomic_get_old_plane_state(new_plane_state
->state
, plane
);
3730 /* Only support async updates on cursor planes. */
3731 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
3735 * DRM calls prepare_fb and cleanup_fb on new_plane_state for
3736 * async commits so don't allow fb changes.
3738 if (old_plane_state
->fb
!= new_plane_state
->fb
)
3744 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
3745 struct drm_plane_state
*new_state
)
3747 struct drm_plane_state
*old_state
=
3748 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
3750 if (plane
->state
->fb
!= new_state
->fb
)
3751 drm_atomic_set_fb_for_plane(plane
->state
, new_state
->fb
);
3753 plane
->state
->src_x
= new_state
->src_x
;
3754 plane
->state
->src_y
= new_state
->src_y
;
3755 plane
->state
->src_w
= new_state
->src_w
;
3756 plane
->state
->src_h
= new_state
->src_h
;
3757 plane
->state
->crtc_x
= new_state
->crtc_x
;
3758 plane
->state
->crtc_y
= new_state
->crtc_y
;
3759 plane
->state
->crtc_w
= new_state
->crtc_w
;
3760 plane
->state
->crtc_h
= new_state
->crtc_h
;
3762 handle_cursor_update(plane
, old_state
);
3765 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3766 .prepare_fb
= dm_plane_helper_prepare_fb
,
3767 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3768 .atomic_check
= dm_plane_atomic_check
,
3769 .atomic_async_check
= dm_plane_atomic_async_check
,
3770 .atomic_async_update
= dm_plane_atomic_async_update
3774 * TODO: these are currently initialized to rgb formats only.
3775 * For future use cases we should either initialize them dynamically based on
3776 * plane capabilities, or initialize this array to all formats, so internal drm
3777 * check will succeed, and let DC implement proper check
3779 static const uint32_t rgb_formats
[] = {
3781 DRM_FORMAT_XRGB8888
,
3782 DRM_FORMAT_ARGB8888
,
3783 DRM_FORMAT_RGBA8888
,
3784 DRM_FORMAT_XRGB2101010
,
3785 DRM_FORMAT_XBGR2101010
,
3786 DRM_FORMAT_ARGB2101010
,
3787 DRM_FORMAT_ABGR2101010
,
3788 DRM_FORMAT_XBGR8888
,
3789 DRM_FORMAT_ABGR8888
,
3792 static const uint32_t yuv_formats
[] = {
3797 static const u32 cursor_formats
[] = {
3801 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3802 struct drm_plane
*plane
,
3803 unsigned long possible_crtcs
)
3807 switch (plane
->type
) {
3808 case DRM_PLANE_TYPE_PRIMARY
:
3809 res
= drm_universal_plane_init(
3815 ARRAY_SIZE(rgb_formats
),
3816 NULL
, plane
->type
, NULL
);
3818 case DRM_PLANE_TYPE_OVERLAY
:
3819 res
= drm_universal_plane_init(
3825 ARRAY_SIZE(yuv_formats
),
3826 NULL
, plane
->type
, NULL
);
3828 case DRM_PLANE_TYPE_CURSOR
:
3829 res
= drm_universal_plane_init(
3835 ARRAY_SIZE(cursor_formats
),
3836 NULL
, plane
->type
, NULL
);
3840 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
3842 /* Create (reset) the plane state */
3843 if (plane
->funcs
->reset
)
3844 plane
->funcs
->reset(plane
);
3850 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3851 struct drm_plane
*plane
,
3852 uint32_t crtc_index
)
3854 struct amdgpu_crtc
*acrtc
= NULL
;
3855 struct drm_plane
*cursor_plane
;
3859 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3863 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
3864 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3866 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3870 res
= drm_crtc_init_with_planes(
3875 &amdgpu_dm_crtc_funcs
, NULL
);
3880 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3882 /* Create (reset) the plane state */
3883 if (acrtc
->base
.funcs
->reset
)
3884 acrtc
->base
.funcs
->reset(&acrtc
->base
);
3886 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3887 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3889 acrtc
->crtc_id
= crtc_index
;
3890 acrtc
->base
.enabled
= false;
3891 acrtc
->otg_inst
= -1;
3893 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3894 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
3895 true, MAX_COLOR_LUT_ENTRIES
);
3896 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
3902 kfree(cursor_plane
);
3907 static int to_drm_connector_type(enum signal_type st
)
3910 case SIGNAL_TYPE_HDMI_TYPE_A
:
3911 return DRM_MODE_CONNECTOR_HDMIA
;
3912 case SIGNAL_TYPE_EDP
:
3913 return DRM_MODE_CONNECTOR_eDP
;
3914 case SIGNAL_TYPE_LVDS
:
3915 return DRM_MODE_CONNECTOR_LVDS
;
3916 case SIGNAL_TYPE_RGB
:
3917 return DRM_MODE_CONNECTOR_VGA
;
3918 case SIGNAL_TYPE_DISPLAY_PORT
:
3919 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3920 return DRM_MODE_CONNECTOR_DisplayPort
;
3921 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3922 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3923 return DRM_MODE_CONNECTOR_DVID
;
3924 case SIGNAL_TYPE_VIRTUAL
:
3925 return DRM_MODE_CONNECTOR_VIRTUAL
;
3928 return DRM_MODE_CONNECTOR_Unknown
;
3932 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
3934 return drm_encoder_find(connector
->dev
, NULL
, connector
->encoder_ids
[0]);
3937 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3939 struct drm_encoder
*encoder
;
3940 struct amdgpu_encoder
*amdgpu_encoder
;
3942 encoder
= amdgpu_dm_connector_to_encoder(connector
);
3944 if (encoder
== NULL
)
3947 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3949 amdgpu_encoder
->native_mode
.clock
= 0;
3951 if (!list_empty(&connector
->probed_modes
)) {
3952 struct drm_display_mode
*preferred_mode
= NULL
;
3954 list_for_each_entry(preferred_mode
,
3955 &connector
->probed_modes
,
3957 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3958 amdgpu_encoder
->native_mode
= *preferred_mode
;
3966 static struct drm_display_mode
*
3967 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3969 int hdisplay
, int vdisplay
)
3971 struct drm_device
*dev
= encoder
->dev
;
3972 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3973 struct drm_display_mode
*mode
= NULL
;
3974 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3976 mode
= drm_mode_duplicate(dev
, native_mode
);
3981 mode
->hdisplay
= hdisplay
;
3982 mode
->vdisplay
= vdisplay
;
3983 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3984 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3990 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3991 struct drm_connector
*connector
)
3993 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3994 struct drm_display_mode
*mode
= NULL
;
3995 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3996 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3997 to_amdgpu_dm_connector(connector
);
4001 char name
[DRM_DISPLAY_MODE_LEN
];
4004 } common_modes
[] = {
4005 { "640x480", 640, 480},
4006 { "800x600", 800, 600},
4007 { "1024x768", 1024, 768},
4008 { "1280x720", 1280, 720},
4009 { "1280x800", 1280, 800},
4010 {"1280x1024", 1280, 1024},
4011 { "1440x900", 1440, 900},
4012 {"1680x1050", 1680, 1050},
4013 {"1600x1200", 1600, 1200},
4014 {"1920x1080", 1920, 1080},
4015 {"1920x1200", 1920, 1200}
4018 n
= ARRAY_SIZE(common_modes
);
4020 for (i
= 0; i
< n
; i
++) {
4021 struct drm_display_mode
*curmode
= NULL
;
4022 bool mode_existed
= false;
4024 if (common_modes
[i
].w
> native_mode
->hdisplay
||
4025 common_modes
[i
].h
> native_mode
->vdisplay
||
4026 (common_modes
[i
].w
== native_mode
->hdisplay
&&
4027 common_modes
[i
].h
== native_mode
->vdisplay
))
4030 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
4031 if (common_modes
[i
].w
== curmode
->hdisplay
&&
4032 common_modes
[i
].h
== curmode
->vdisplay
) {
4033 mode_existed
= true;
4041 mode
= amdgpu_dm_create_common_mode(encoder
,
4042 common_modes
[i
].name
, common_modes
[i
].w
,
4044 drm_mode_probed_add(connector
, mode
);
4045 amdgpu_dm_connector
->num_modes
++;
4049 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
4052 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4053 to_amdgpu_dm_connector(connector
);
4056 /* empty probed_modes */
4057 INIT_LIST_HEAD(&connector
->probed_modes
);
4058 amdgpu_dm_connector
->num_modes
=
4059 drm_add_edid_modes(connector
, edid
);
4061 amdgpu_dm_get_native_mode(connector
);
4063 amdgpu_dm_connector
->num_modes
= 0;
4067 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
4069 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4070 to_amdgpu_dm_connector(connector
);
4071 struct drm_encoder
*encoder
;
4072 struct edid
*edid
= amdgpu_dm_connector
->edid
;
4074 encoder
= amdgpu_dm_connector_to_encoder(connector
);
4076 if (!edid
|| !drm_edid_is_valid(edid
)) {
4077 amdgpu_dm_connector
->num_modes
=
4078 drm_add_modes_noedid(connector
, 640, 480);
4080 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
4081 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
4083 amdgpu_dm_fbc_init(connector
);
4085 return amdgpu_dm_connector
->num_modes
;
4088 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
4089 struct amdgpu_dm_connector
*aconnector
,
4091 struct dc_link
*link
,
4094 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
4096 aconnector
->connector_id
= link_index
;
4097 aconnector
->dc_link
= link
;
4098 aconnector
->base
.interlace_allowed
= false;
4099 aconnector
->base
.doublescan_allowed
= false;
4100 aconnector
->base
.stereo_allowed
= false;
4101 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
4102 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
4103 mutex_init(&aconnector
->hpd_lock
);
4106 * configure support HPD hot plug connector_>polled default value is 0
4107 * which means HPD hot plug not supported
4109 switch (connector_type
) {
4110 case DRM_MODE_CONNECTOR_HDMIA
:
4111 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
4112 aconnector
->base
.ycbcr_420_allowed
=
4113 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
4115 case DRM_MODE_CONNECTOR_DisplayPort
:
4116 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
4117 aconnector
->base
.ycbcr_420_allowed
=
4118 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
4120 case DRM_MODE_CONNECTOR_DVID
:
4121 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
4127 drm_object_attach_property(&aconnector
->base
.base
,
4128 dm
->ddev
->mode_config
.scaling_mode_property
,
4129 DRM_MODE_SCALE_NONE
);
4131 drm_object_attach_property(&aconnector
->base
.base
,
4132 adev
->mode_info
.underscan_property
,
4134 drm_object_attach_property(&aconnector
->base
.base
,
4135 adev
->mode_info
.underscan_hborder_property
,
4137 drm_object_attach_property(&aconnector
->base
.base
,
4138 adev
->mode_info
.underscan_vborder_property
,
4140 drm_object_attach_property(&aconnector
->base
.base
,
4141 adev
->mode_info
.max_bpc_property
,
4144 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
4145 dc_is_dmcu_initialized(adev
->dm
.dc
)) {
4146 drm_object_attach_property(&aconnector
->base
.base
,
4147 adev
->mode_info
.abm_level_property
, 0);
4150 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
4151 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
) {
4152 drm_connector_attach_vrr_capable_property(
4157 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
4158 struct i2c_msg
*msgs
, int num
)
4160 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
4161 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
4162 struct i2c_command cmd
;
4166 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
4171 cmd
.number_of_payloads
= num
;
4172 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
4175 for (i
= 0; i
< num
; i
++) {
4176 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
4177 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
4178 cmd
.payloads
[i
].length
= msgs
[i
].len
;
4179 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
4183 ddc_service
->ctx
->dc
,
4184 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
4188 kfree(cmd
.payloads
);
4192 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
4194 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
4197 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
4198 .master_xfer
= amdgpu_dm_i2c_xfer
,
4199 .functionality
= amdgpu_dm_i2c_func
,
4202 static struct amdgpu_i2c_adapter
*
4203 create_i2c(struct ddc_service
*ddc_service
,
4207 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
4208 struct amdgpu_i2c_adapter
*i2c
;
4210 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
4213 i2c
->base
.owner
= THIS_MODULE
;
4214 i2c
->base
.class = I2C_CLASS_DDC
;
4215 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
4216 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
4217 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
4218 i2c_set_adapdata(&i2c
->base
, i2c
);
4219 i2c
->ddc_service
= ddc_service
;
4220 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
4227 * Note: this function assumes that dc_link_detect() was called for the
4228 * dc_link which will be represented by this aconnector.
4230 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
4231 struct amdgpu_dm_connector
*aconnector
,
4232 uint32_t link_index
,
4233 struct amdgpu_encoder
*aencoder
)
4237 struct dc
*dc
= dm
->dc
;
4238 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
4239 struct amdgpu_i2c_adapter
*i2c
;
4241 link
->priv
= aconnector
;
4243 DRM_DEBUG_DRIVER("%s()\n", __func__
);
4245 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
4247 DRM_ERROR("Failed to create i2c adapter data\n");
4251 aconnector
->i2c
= i2c
;
4252 res
= i2c_add_adapter(&i2c
->base
);
4255 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
4259 connector_type
= to_drm_connector_type(link
->connector_signal
);
4261 res
= drm_connector_init(
4264 &amdgpu_dm_connector_funcs
,
4268 DRM_ERROR("connector_init failed\n");
4269 aconnector
->connector_id
= -1;
4273 drm_connector_helper_add(
4275 &amdgpu_dm_connector_helper_funcs
);
4277 if (aconnector
->base
.funcs
->reset
)
4278 aconnector
->base
.funcs
->reset(&aconnector
->base
);
4280 amdgpu_dm_connector_init_helper(
4287 drm_connector_attach_encoder(
4288 &aconnector
->base
, &aencoder
->base
);
4290 drm_connector_register(&aconnector
->base
);
4291 #if defined(CONFIG_DEBUG_FS)
4292 res
= connector_debugfs_init(aconnector
);
4294 DRM_ERROR("Failed to create debugfs for connector");
4299 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
4300 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
4301 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
4306 aconnector
->i2c
= NULL
;
4311 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
4313 switch (adev
->mode_info
.num_crtc
) {
4330 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
4331 struct amdgpu_encoder
*aencoder
,
4332 uint32_t link_index
)
4334 struct amdgpu_device
*adev
= dev
->dev_private
;
4336 int res
= drm_encoder_init(dev
,
4338 &amdgpu_dm_encoder_funcs
,
4339 DRM_MODE_ENCODER_TMDS
,
4342 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
4345 aencoder
->encoder_id
= link_index
;
4347 aencoder
->encoder_id
= -1;
4349 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
4354 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
4355 struct amdgpu_crtc
*acrtc
,
4359 * this is not correct translation but will work as soon as VBLANK
4360 * constant is the same as PFLIP
4363 amdgpu_display_crtc_idx_to_irq_type(
4368 drm_crtc_vblank_on(&acrtc
->base
);
4371 &adev
->pageflip_irq
,
4377 &adev
->pageflip_irq
,
4379 drm_crtc_vblank_off(&acrtc
->base
);
4384 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
4385 const struct dm_connector_state
*old_dm_state
)
4387 if (dm_state
->scaling
!= old_dm_state
->scaling
)
4389 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
4390 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
4392 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
4393 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
4395 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
4396 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
4401 static void remove_stream(struct amdgpu_device
*adev
,
4402 struct amdgpu_crtc
*acrtc
,
4403 struct dc_stream_state
*stream
)
4405 /* this is the update mode case */
4407 acrtc
->otg_inst
= -1;
4408 acrtc
->enabled
= false;
4411 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
4412 struct dc_cursor_position
*position
)
4414 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
4416 int xorigin
= 0, yorigin
= 0;
4418 if (!crtc
|| !plane
->state
->fb
) {
4419 position
->enable
= false;
4425 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
4426 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
4427 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4429 plane
->state
->crtc_w
,
4430 plane
->state
->crtc_h
);
4434 x
= plane
->state
->crtc_x
;
4435 y
= plane
->state
->crtc_y
;
4436 /* avivo cursor are offset into the total surface */
4437 x
+= crtc
->primary
->state
->src_x
>> 16;
4438 y
+= crtc
->primary
->state
->src_y
>> 16;
4440 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
4444 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
4447 position
->enable
= true;
4450 position
->x_hotspot
= xorigin
;
4451 position
->y_hotspot
= yorigin
;
4456 static void handle_cursor_update(struct drm_plane
*plane
,
4457 struct drm_plane_state
*old_plane_state
)
4459 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
4460 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
4461 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
4462 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
4463 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
4464 uint64_t address
= afb
? afb
->address
: 0;
4465 struct dc_cursor_position position
;
4466 struct dc_cursor_attributes attributes
;
4469 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
4472 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4474 amdgpu_crtc
->crtc_id
,
4475 plane
->state
->crtc_w
,
4476 plane
->state
->crtc_h
);
4478 ret
= get_cursor_position(plane
, crtc
, &position
);
4482 if (!position
.enable
) {
4483 /* turn off cursor */
4484 if (crtc_state
&& crtc_state
->stream
) {
4485 mutex_lock(&adev
->dm
.dc_lock
);
4486 dc_stream_set_cursor_position(crtc_state
->stream
,
4488 mutex_unlock(&adev
->dm
.dc_lock
);
4493 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
4494 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
4496 attributes
.address
.high_part
= upper_32_bits(address
);
4497 attributes
.address
.low_part
= lower_32_bits(address
);
4498 attributes
.width
= plane
->state
->crtc_w
;
4499 attributes
.height
= plane
->state
->crtc_h
;
4500 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
4501 attributes
.rotation_angle
= 0;
4502 attributes
.attribute_flags
.value
= 0;
4504 attributes
.pitch
= attributes
.width
;
4506 if (crtc_state
->stream
) {
4507 mutex_lock(&adev
->dm
.dc_lock
);
4508 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
4510 DRM_ERROR("DC failed to set cursor attributes\n");
4512 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
4514 DRM_ERROR("DC failed to set cursor position\n");
4515 mutex_unlock(&adev
->dm
.dc_lock
);
4519 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
4522 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
4523 WARN_ON(acrtc
->event
);
4525 acrtc
->event
= acrtc
->base
.state
->event
;
4527 /* Set the flip status */
4528 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
4530 /* Mark this event as consumed */
4531 acrtc
->base
.state
->event
= NULL
;
4533 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4537 static void update_freesync_state_on_stream(
4538 struct amdgpu_display_manager
*dm
,
4539 struct dm_crtc_state
*new_crtc_state
,
4540 struct dc_stream_state
*new_stream
,
4541 struct dc_plane_state
*surface
,
4542 u32 flip_timestamp_in_us
)
4544 struct mod_vrr_params vrr_params
= new_crtc_state
->vrr_params
;
4545 struct dc_info_packet vrr_infopacket
= {0};
4546 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
4552 * TODO: Determine why min/max totals and vrefresh can be 0 here.
4553 * For now it's sufficient to just guard against these conditions.
4556 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
4559 if (new_crtc_state
->vrr_supported
&&
4560 config
.min_refresh_in_uhz
&&
4561 config
.max_refresh_in_uhz
) {
4562 config
.state
= new_crtc_state
->base
.vrr_enabled
?
4563 VRR_STATE_ACTIVE_VARIABLE
:
4566 config
.state
= VRR_STATE_UNSUPPORTED
;
4569 mod_freesync_build_vrr_params(dm
->freesync_module
,
4571 &config
, &vrr_params
);
4574 mod_freesync_handle_preflip(
4575 dm
->freesync_module
,
4578 flip_timestamp_in_us
,
4582 mod_freesync_build_vrr_infopacket(
4583 dm
->freesync_module
,
4587 TRANSFER_FUNC_UNKNOWN
,
4590 new_crtc_state
->freesync_timing_changed
|=
4591 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
4593 sizeof(vrr_params
.adjust
)) != 0);
4595 new_crtc_state
->freesync_vrr_info_changed
|=
4596 (memcmp(&new_crtc_state
->vrr_infopacket
,
4598 sizeof(vrr_infopacket
)) != 0);
4600 new_crtc_state
->vrr_params
= vrr_params
;
4601 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
4603 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
4604 new_stream
->vrr_infopacket
= vrr_infopacket
;
4606 if (new_crtc_state
->freesync_vrr_info_changed
)
4607 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4608 new_crtc_state
->base
.crtc
->base
.id
,
4609 (int)new_crtc_state
->base
.vrr_enabled
,
4610 (int)vrr_params
.state
);
4613 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
4614 struct dc_state
*dc_state
,
4615 struct drm_device
*dev
,
4616 struct amdgpu_display_manager
*dm
,
4617 struct drm_crtc
*pcrtc
,
4618 bool *wait_for_vblank
)
4621 uint64_t timestamp_ns
;
4622 struct drm_plane
*plane
;
4623 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4624 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
4625 struct drm_crtc_state
*new_pcrtc_state
=
4626 drm_atomic_get_new_crtc_state(state
, pcrtc
);
4627 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
4628 struct dm_crtc_state
*dm_old_crtc_state
=
4629 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
4630 int flip_count
= 0, planes_count
= 0, vpos
, hpos
;
4631 unsigned long flags
;
4632 struct amdgpu_bo
*abo
;
4633 uint64_t tiling_flags
, dcc_address
;
4634 uint32_t target
, target_vblank
;
4637 struct dc_surface_update surface_updates
[MAX_SURFACES
];
4638 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
4639 struct dc_stream_update stream_update
;
4643 struct dc_surface_update surface_updates
[MAX_SURFACES
];
4644 struct dc_plane_info plane_infos
[MAX_SURFACES
];
4645 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
4646 struct dc_stream_update stream_update
;
4649 flip
= kzalloc(sizeof(*flip
), GFP_KERNEL
);
4650 full
= kzalloc(sizeof(*full
), GFP_KERNEL
);
4652 if (!flip
|| !full
) {
4653 dm_error("Failed to allocate update bundles\n");
4657 /* update planes when needed */
4658 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4659 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
4660 struct drm_crtc_state
*new_crtc_state
;
4661 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
4662 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
4664 struct dc_plane_state
*dc_plane
;
4665 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4667 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
4668 handle_cursor_update(plane
, old_plane_state
);
4672 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
4675 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4676 if (!new_crtc_state
->active
)
4679 pflip_needed
= old_plane_state
->fb
&&
4680 old_plane_state
->fb
!= new_plane_state
->fb
;
4682 dc_plane
= dm_new_plane_state
->dc_state
;
4686 * Assume even ONE crtc with immediate flip means
4687 * entire can't wait for VBLANK
4688 * TODO Check if it's correct
4690 if (new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
)
4691 *wait_for_vblank
= false;
4694 * TODO This might fail and hence better not used, wait
4695 * explicitly on fences instead
4696 * and in general should be called for
4697 * blocking commit to as per framework helpers
4699 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
4700 r
= amdgpu_bo_reserve(abo
, true);
4701 if (unlikely(r
!= 0)) {
4702 DRM_ERROR("failed to reserve buffer before flip\n");
4706 /* Wait for all fences on this FB */
4707 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
4708 MAX_SCHEDULE_TIMEOUT
) < 0);
4710 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
4712 amdgpu_bo_unreserve(abo
);
4714 flip
->flip_addrs
[flip_count
].address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
4715 flip
->flip_addrs
[flip_count
].address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
4717 dcc_address
= get_dcc_address(afb
->address
, tiling_flags
);
4718 flip
->flip_addrs
[flip_count
].address
.grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
4719 flip
->flip_addrs
[flip_count
].address
.grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
4721 flip
->flip_addrs
[flip_count
].flip_immediate
=
4722 (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
4724 timestamp_ns
= ktime_get_ns();
4725 flip
->flip_addrs
[flip_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
4726 flip
->surface_updates
[flip_count
].flip_addr
= &flip
->flip_addrs
[flip_count
];
4727 flip
->surface_updates
[flip_count
].surface
= dc_plane
;
4729 if (!flip
->surface_updates
[flip_count
].surface
) {
4730 DRM_ERROR("No surface for CRTC: id=%d\n",
4731 acrtc_attach
->crtc_id
);
4735 if (plane
== pcrtc
->primary
)
4736 update_freesync_state_on_stream(
4739 acrtc_state
->stream
,
4741 flip
->flip_addrs
[flip_count
].flip_timestamp_in_us
);
4743 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
4745 flip
->flip_addrs
[flip_count
].address
.grph
.addr
.high_part
,
4746 flip
->flip_addrs
[flip_count
].address
.grph
.addr
.low_part
);
4751 full
->surface_updates
[planes_count
].surface
= dc_plane
;
4752 if (new_pcrtc_state
->color_mgmt_changed
) {
4753 full
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
4754 full
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
4758 full
->scaling_infos
[planes_count
].scaling_quality
= dc_plane
->scaling_quality
;
4759 full
->scaling_infos
[planes_count
].src_rect
= dc_plane
->src_rect
;
4760 full
->scaling_infos
[planes_count
].dst_rect
= dc_plane
->dst_rect
;
4761 full
->scaling_infos
[planes_count
].clip_rect
= dc_plane
->clip_rect
;
4762 full
->surface_updates
[planes_count
].scaling_info
= &full
->scaling_infos
[planes_count
];
4765 full
->plane_infos
[planes_count
].color_space
= dc_plane
->color_space
;
4766 full
->plane_infos
[planes_count
].format
= dc_plane
->format
;
4767 full
->plane_infos
[planes_count
].plane_size
= dc_plane
->plane_size
;
4768 full
->plane_infos
[planes_count
].rotation
= dc_plane
->rotation
;
4769 full
->plane_infos
[planes_count
].horizontal_mirror
= dc_plane
->horizontal_mirror
;
4770 full
->plane_infos
[planes_count
].stereo_format
= dc_plane
->stereo_format
;
4771 full
->plane_infos
[planes_count
].tiling_info
= dc_plane
->tiling_info
;
4772 full
->plane_infos
[planes_count
].visible
= dc_plane
->visible
;
4773 full
->plane_infos
[planes_count
].per_pixel_alpha
= dc_plane
->per_pixel_alpha
;
4774 full
->plane_infos
[planes_count
].dcc
= dc_plane
->dcc
;
4775 full
->surface_updates
[planes_count
].plane_info
= &full
->plane_infos
[planes_count
];
4782 * TODO: For proper atomic behaviour, we should be calling into DC once with
4783 * all the changes. However, DC refuses to do pageflips and non-pageflip
4784 * changes in the same call. Change DC to respect atomic behaviour,
4785 * hopefully eliminating dc_*_update structs in their entirety.
4788 target
= (uint32_t)drm_crtc_vblank_count(pcrtc
) + *wait_for_vblank
;
4789 /* Prepare wait for target vblank early - before the fence-waits */
4790 target_vblank
= target
- (uint32_t)drm_crtc_vblank_count(pcrtc
) +
4791 amdgpu_get_vblank_counter_kms(pcrtc
->dev
, acrtc_attach
->crtc_id
);
4794 * Wait until we're out of the vertical blank period before the one
4795 * targeted by the flip
4797 while ((acrtc_attach
->enabled
&&
4798 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
4799 0, &vpos
, &hpos
, NULL
,
4800 NULL
, &pcrtc
->hwmode
)
4801 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
4802 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
4803 (int)(target_vblank
-
4804 amdgpu_get_vblank_counter_kms(dm
->ddev
, acrtc_attach
->crtc_id
)) > 0)) {
4805 usleep_range(1000, 1100);
4808 if (acrtc_attach
->base
.state
->event
) {
4809 drm_crtc_vblank_get(pcrtc
);
4811 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
4813 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
4814 prepare_flip_isr(acrtc_attach
);
4816 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
4819 if (acrtc_state
->stream
) {
4821 if (acrtc_state
->freesync_timing_changed
)
4822 flip
->stream_update
.adjust
=
4823 &acrtc_state
->stream
->adjust
;
4825 if (acrtc_state
->freesync_vrr_info_changed
)
4826 flip
->stream_update
.vrr_infopacket
=
4827 &acrtc_state
->stream
->vrr_infopacket
;
4830 mutex_lock(&dm
->dc_lock
);
4831 dc_commit_updates_for_stream(dm
->dc
,
4832 flip
->surface_updates
,
4834 acrtc_state
->stream
,
4835 &flip
->stream_update
,
4837 mutex_unlock(&dm
->dc_lock
);
4841 if (new_pcrtc_state
->mode_changed
) {
4842 full
->stream_update
.src
= acrtc_state
->stream
->src
;
4843 full
->stream_update
.dst
= acrtc_state
->stream
->dst
;
4846 if (new_pcrtc_state
->color_mgmt_changed
)
4847 full
->stream_update
.out_transfer_func
= acrtc_state
->stream
->out_transfer_func
;
4849 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
4850 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
4851 full
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
4853 mutex_lock(&dm
->dc_lock
);
4854 dc_commit_updates_for_stream(dm
->dc
,
4855 full
->surface_updates
,
4857 acrtc_state
->stream
,
4858 &full
->stream_update
,
4860 mutex_unlock(&dm
->dc_lock
);
4869 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4870 * @crtc_state: the DRM CRTC state
4871 * @stream_state: the DC stream state.
4873 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4874 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4876 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
4877 struct dc_stream_state
*stream_state
)
4879 stream_state
->mode_changed
=
4880 crtc_state
->mode_changed
|| crtc_state
->active_changed
;
4883 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
4884 struct drm_atomic_state
*state
,
4887 struct drm_crtc
*crtc
;
4888 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4889 struct amdgpu_device
*adev
= dev
->dev_private
;
4893 * We evade vblanks and pflips on crtc that
4894 * should be changed. We do it here to flush & disable
4895 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4896 * it will update crtc->dm_crtc_state->stream pointer which is used in
4899 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4900 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4901 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4902 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4904 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)
4905 && dm_old_crtc_state
->stream
) {
4907 * If the stream is removed and CRC capture was
4908 * enabled on the CRTC the extra vblank reference
4909 * needs to be dropped since CRC capture will be
4912 if (!dm_new_crtc_state
->stream
4913 && dm_new_crtc_state
->crc_enabled
) {
4914 drm_crtc_vblank_put(crtc
);
4915 dm_new_crtc_state
->crc_enabled
= false;
4918 manage_dm_interrupts(adev
, acrtc
, false);
4922 * Add check here for SoC's that support hardware cursor plane, to
4923 * unset legacy_cursor_update
4926 return drm_atomic_helper_commit(dev
, state
, nonblock
);
4928 /*TODO Handle EINTR, reenable IRQ*/
4932 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
4933 * @state: The atomic state to commit
4935 * This will tell DC to commit the constructed DC state from atomic_check,
4936 * programming the hardware. Any failures here implies a hardware failure, since
4937 * atomic check should have filtered anything non-kosher.
4939 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
4941 struct drm_device
*dev
= state
->dev
;
4942 struct amdgpu_device
*adev
= dev
->dev_private
;
4943 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4944 struct dm_atomic_state
*dm_state
;
4945 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
4947 struct drm_crtc
*crtc
;
4948 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4949 unsigned long flags
;
4950 bool wait_for_vblank
= true;
4951 struct drm_connector
*connector
;
4952 struct drm_connector_state
*old_con_state
, *new_con_state
;
4953 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4954 int crtc_disable_count
= 0;
4956 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4958 dm_state
= dm_atomic_get_new_state(state
);
4959 if (dm_state
&& dm_state
->context
) {
4960 dc_state
= dm_state
->context
;
4962 /* No state changes, retain current state. */
4963 dc_state_temp
= dc_create_state();
4964 ASSERT(dc_state_temp
);
4965 dc_state
= dc_state_temp
;
4966 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
4969 /* update changed items */
4970 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4971 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4973 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4974 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4977 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4978 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4979 "connectors_changed:%d\n",
4981 new_crtc_state
->enable
,
4982 new_crtc_state
->active
,
4983 new_crtc_state
->planes_changed
,
4984 new_crtc_state
->mode_changed
,
4985 new_crtc_state
->active_changed
,
4986 new_crtc_state
->connectors_changed
);
4988 /* Copy all transient state flags into dc state */
4989 if (dm_new_crtc_state
->stream
) {
4990 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
4991 dm_new_crtc_state
->stream
);
4994 /* handles headless hotplug case, updating new_state and
4995 * aconnector as needed
4998 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
5000 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
5002 if (!dm_new_crtc_state
->stream
) {
5004 * this could happen because of issues with
5005 * userspace notifications delivery.
5006 * In this case userspace tries to set mode on
5007 * display which is disconnected in fact.
5008 * dc_sink is NULL in this case on aconnector.
5009 * We expect reset mode will come soon.
5011 * This can also happen when unplug is done
5012 * during resume sequence ended
5014 * In this case, we want to pretend we still
5015 * have a sink to keep the pipe running so that
5016 * hw state is consistent with the sw state
5018 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5019 __func__
, acrtc
->base
.base
.id
);
5023 if (dm_old_crtc_state
->stream
)
5024 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
5026 pm_runtime_get_noresume(dev
->dev
);
5028 acrtc
->enabled
= true;
5029 acrtc
->hw_mode
= new_crtc_state
->mode
;
5030 crtc
->hwmode
= new_crtc_state
->mode
;
5031 } else if (modereset_required(new_crtc_state
)) {
5032 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
5034 /* i.e. reset mode */
5035 if (dm_old_crtc_state
->stream
)
5036 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
5038 } /* for_each_crtc_in_state() */
5041 dm_enable_per_frame_crtc_master_sync(dc_state
);
5042 mutex_lock(&dm
->dc_lock
);
5043 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
5044 mutex_unlock(&dm
->dc_lock
);
5047 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
5048 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
5050 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5052 if (dm_new_crtc_state
->stream
!= NULL
) {
5053 const struct dc_stream_status
*status
=
5054 dc_stream_get_status(dm_new_crtc_state
->stream
);
5057 status
= dc_stream_get_status_from_state(dc_state
,
5058 dm_new_crtc_state
->stream
);
5061 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
5063 acrtc
->otg_inst
= status
->primary_otg_inst
;
5067 /* Handle connector state changes */
5068 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5069 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
5070 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
5071 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
5072 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
5073 struct dc_stream_update stream_update
;
5074 struct dc_stream_status
*status
= NULL
;
5076 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
5077 memset(&stream_update
, 0, sizeof(stream_update
));
5080 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
5081 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
5084 /* Skip any modesets/resets */
5085 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
5088 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5089 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5091 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
) &&
5092 (dm_new_crtc_state
->abm_level
== dm_old_crtc_state
->abm_level
))
5095 if (is_scaling_state_different(dm_new_con_state
, dm_old_con_state
)) {
5096 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
5097 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
5099 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
5100 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
5103 if (dm_new_crtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
) {
5104 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
5106 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
5109 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
5111 WARN_ON(!status
->plane_count
);
5114 * TODO: DC refuses to perform stream updates without a dc_surface_update.
5115 * Here we create an empty update on each plane.
5116 * To fix this, DC should permit updating only stream properties.
5118 for (j
= 0; j
< status
->plane_count
; j
++)
5119 dummy_updates
[j
].surface
= status
->plane_states
[0];
5122 mutex_lock(&dm
->dc_lock
);
5123 dc_commit_updates_for_stream(dm
->dc
,
5125 status
->plane_count
,
5126 dm_new_crtc_state
->stream
,
5129 mutex_unlock(&dm
->dc_lock
);
5132 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
5133 new_crtc_state
, i
) {
5135 * loop to enable interrupts on newly arrived crtc
5137 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
5138 bool modeset_needed
;
5140 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
5141 crtc_disable_count
++;
5143 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5144 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5145 modeset_needed
= modeset_required(
5147 dm_new_crtc_state
->stream
,
5148 dm_old_crtc_state
->stream
);
5150 if (dm_new_crtc_state
->stream
== NULL
|| !modeset_needed
)
5153 manage_dm_interrupts(adev
, acrtc
, true);
5155 #ifdef CONFIG_DEBUG_FS
5156 /* The stream has changed so CRC capture needs to re-enabled. */
5157 if (dm_new_crtc_state
->crc_enabled
)
5158 amdgpu_dm_crtc_set_crc_source(crtc
, "auto");
5162 /* update planes when needed per crtc*/
5163 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
5164 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5166 if (dm_new_crtc_state
->stream
)
5167 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
5168 dm
, crtc
, &wait_for_vblank
);
5173 * send vblank event on all events not handled in flip and
5174 * mark consumed event for drm_atomic_helper_commit_hw_done
5176 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
5177 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
5179 if (new_crtc_state
->event
)
5180 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
5182 new_crtc_state
->event
= NULL
;
5184 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
5186 /* Signal HW programming completion */
5187 drm_atomic_helper_commit_hw_done(state
);
5189 if (wait_for_vblank
)
5190 drm_atomic_helper_wait_for_flip_done(dev
, state
);
5192 drm_atomic_helper_cleanup_planes(dev
, state
);
5195 * Finally, drop a runtime PM reference for each newly disabled CRTC,
5196 * so we can put the GPU into runtime suspend if we're not driving any
5199 for (i
= 0; i
< crtc_disable_count
; i
++)
5200 pm_runtime_put_autosuspend(dev
->dev
);
5201 pm_runtime_mark_last_busy(dev
->dev
);
5204 dc_release_state(dc_state_temp
);
5208 static int dm_force_atomic_commit(struct drm_connector
*connector
)
5211 struct drm_device
*ddev
= connector
->dev
;
5212 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
5213 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
5214 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
5215 struct drm_connector_state
*conn_state
;
5216 struct drm_crtc_state
*crtc_state
;
5217 struct drm_plane_state
*plane_state
;
5222 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
5224 /* Construct an atomic state to restore previous display setting */
5227 * Attach connectors to drm_atomic_state
5229 conn_state
= drm_atomic_get_connector_state(state
, connector
);
5231 ret
= PTR_ERR_OR_ZERO(conn_state
);
5235 /* Attach crtc to drm_atomic_state*/
5236 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
5238 ret
= PTR_ERR_OR_ZERO(crtc_state
);
5242 /* force a restore */
5243 crtc_state
->mode_changed
= true;
5245 /* Attach plane to drm_atomic_state */
5246 plane_state
= drm_atomic_get_plane_state(state
, plane
);
5248 ret
= PTR_ERR_OR_ZERO(plane_state
);
5253 /* Call commit internally with the state we just constructed */
5254 ret
= drm_atomic_commit(state
);
5259 DRM_ERROR("Restoring old state failed with %i\n", ret
);
5260 drm_atomic_state_put(state
);
5266 * This function handles all cases when set mode does not come upon hotplug.
5267 * This includes when a display is unplugged then plugged back into the
5268 * same port and when running without usermode desktop manager supprot
5270 void dm_restore_drm_connector_state(struct drm_device
*dev
,
5271 struct drm_connector
*connector
)
5273 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5274 struct amdgpu_crtc
*disconnected_acrtc
;
5275 struct dm_crtc_state
*acrtc_state
;
5277 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
5280 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
5281 if (!disconnected_acrtc
)
5284 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
5285 if (!acrtc_state
->stream
)
5289 * If the previous sink is not released and different from the current,
5290 * we deduce we are in a state where we can not rely on usermode call
5291 * to turn on the display, so we do it here
5293 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
5294 dm_force_atomic_commit(&aconnector
->base
);
5298 * Grabs all modesetting locks to serialize against any blocking commits,
5299 * Waits for completion of all non blocking commits.
5301 static int do_aquire_global_lock(struct drm_device
*dev
,
5302 struct drm_atomic_state
*state
)
5304 struct drm_crtc
*crtc
;
5305 struct drm_crtc_commit
*commit
;
5309 * Adding all modeset locks to aquire_ctx will
5310 * ensure that when the framework release it the
5311 * extra locks we are locking here will get released to
5313 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
5317 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
5318 spin_lock(&crtc
->commit_lock
);
5319 commit
= list_first_entry_or_null(&crtc
->commit_list
,
5320 struct drm_crtc_commit
, commit_entry
);
5322 drm_crtc_commit_get(commit
);
5323 spin_unlock(&crtc
->commit_lock
);
5329 * Make sure all pending HW programming completed and
5332 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
5335 ret
= wait_for_completion_interruptible_timeout(
5336 &commit
->flip_done
, 10*HZ
);
5339 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
5340 "timed out\n", crtc
->base
.id
, crtc
->name
);
5342 drm_crtc_commit_put(commit
);
5345 return ret
< 0 ? ret
: 0;
5348 static void get_freesync_config_for_crtc(
5349 struct dm_crtc_state
*new_crtc_state
,
5350 struct dm_connector_state
*new_con_state
)
5352 struct mod_freesync_config config
= {0};
5353 struct amdgpu_dm_connector
*aconnector
=
5354 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
5355 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
5357 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
5358 aconnector
->min_vfreq
<= drm_mode_vrefresh(mode
);
5360 if (new_crtc_state
->vrr_supported
) {
5361 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
5362 config
.state
= new_crtc_state
->base
.vrr_enabled
?
5363 VRR_STATE_ACTIVE_VARIABLE
:
5365 config
.min_refresh_in_uhz
=
5366 aconnector
->min_vfreq
* 1000000;
5367 config
.max_refresh_in_uhz
=
5368 aconnector
->max_vfreq
* 1000000;
5369 config
.vsif_supported
= true;
5373 new_crtc_state
->freesync_config
= config
;
5376 static void reset_freesync_config_for_crtc(
5377 struct dm_crtc_state
*new_crtc_state
)
5379 new_crtc_state
->vrr_supported
= false;
5381 memset(&new_crtc_state
->vrr_params
, 0,
5382 sizeof(new_crtc_state
->vrr_params
));
5383 memset(&new_crtc_state
->vrr_infopacket
, 0,
5384 sizeof(new_crtc_state
->vrr_infopacket
));
5387 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
5388 struct drm_atomic_state
*state
,
5389 struct drm_crtc
*crtc
,
5390 struct drm_crtc_state
*old_crtc_state
,
5391 struct drm_crtc_state
*new_crtc_state
,
5393 bool *lock_and_validation_needed
)
5395 struct dm_atomic_state
*dm_state
= NULL
;
5396 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
5397 struct dc_stream_state
*new_stream
;
5401 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5402 * update changed items
5404 struct amdgpu_crtc
*acrtc
= NULL
;
5405 struct amdgpu_dm_connector
*aconnector
= NULL
;
5406 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
5407 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
5408 struct drm_plane_state
*new_plane_state
= NULL
;
5412 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5413 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5414 acrtc
= to_amdgpu_crtc(crtc
);
5416 new_plane_state
= drm_atomic_get_new_plane_state(state
, new_crtc_state
->crtc
->primary
);
5418 if (new_crtc_state
->enable
&& new_plane_state
&& !new_plane_state
->fb
) {
5423 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
5425 /* TODO This hack should go away */
5426 if (aconnector
&& enable
) {
5427 /* Make sure fake sink is created in plug-in scenario */
5428 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
5430 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
5433 if (IS_ERR(drm_new_conn_state
)) {
5434 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
5438 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
5439 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
5441 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
5444 new_stream
= create_stream_for_sink(aconnector
,
5445 &new_crtc_state
->mode
,
5447 dm_old_crtc_state
->stream
);
5450 * we can have no stream on ACTION_SET if a display
5451 * was disconnected during S3, in this case it is not an
5452 * error, the OS will be updated after detection, and
5453 * will do the right thing on next atomic commit
5457 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5458 __func__
, acrtc
->base
.base
.id
);
5463 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
5465 if (dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
5466 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
5467 new_crtc_state
->mode_changed
= false;
5468 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5469 new_crtc_state
->mode_changed
);
5473 /* mode_changed flag may get updated above, need to check again */
5474 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
5478 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5479 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5480 "connectors_changed:%d\n",
5482 new_crtc_state
->enable
,
5483 new_crtc_state
->active
,
5484 new_crtc_state
->planes_changed
,
5485 new_crtc_state
->mode_changed
,
5486 new_crtc_state
->active_changed
,
5487 new_crtc_state
->connectors_changed
);
5489 /* Remove stream for any changed/disabled CRTC */
5492 if (!dm_old_crtc_state
->stream
)
5495 ret
= dm_atomic_get_state(state
, &dm_state
);
5499 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5502 /* i.e. reset mode */
5503 if (dc_remove_stream_from_ctx(
5506 dm_old_crtc_state
->stream
) != DC_OK
) {
5511 dc_stream_release(dm_old_crtc_state
->stream
);
5512 dm_new_crtc_state
->stream
= NULL
;
5514 reset_freesync_config_for_crtc(dm_new_crtc_state
);
5516 *lock_and_validation_needed
= true;
5518 } else {/* Add stream for any updated/enabled CRTC */
5520 * Quick fix to prevent NULL pointer on new_stream when
5521 * added MST connectors not found in existing crtc_state in the chained mode
5522 * TODO: need to dig out the root cause of that
5524 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
5527 if (modereset_required(new_crtc_state
))
5530 if (modeset_required(new_crtc_state
, new_stream
,
5531 dm_old_crtc_state
->stream
)) {
5533 WARN_ON(dm_new_crtc_state
->stream
);
5535 ret
= dm_atomic_get_state(state
, &dm_state
);
5539 dm_new_crtc_state
->stream
= new_stream
;
5541 dc_stream_retain(new_stream
);
5543 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5546 if (dc_add_stream_to_ctx(
5549 dm_new_crtc_state
->stream
) != DC_OK
) {
5554 *lock_and_validation_needed
= true;
5559 /* Release extra reference */
5561 dc_stream_release(new_stream
);
5564 * We want to do dc stream updates that do not require a
5565 * full modeset below.
5567 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
5568 new_crtc_state
->active
))
5571 * Given above conditions, the dc state cannot be NULL because:
5572 * 1. We're in the process of enabling CRTCs (just been added
5573 * to the dc context, or already is on the context)
5574 * 2. Has a valid connector attached, and
5575 * 3. Is currently active and enabled.
5576 * => The dc stream state currently exists.
5578 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
5580 /* Scaling or underscan settings */
5581 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
5582 update_stream_scaling_settings(
5583 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
5586 * Color management settings. We also update color properties
5587 * when a modeset is needed, to ensure it gets reprogrammed.
5589 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
5590 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
5591 ret
= amdgpu_dm_set_regamma_lut(dm_new_crtc_state
);
5594 amdgpu_dm_set_ctm(dm_new_crtc_state
);
5597 /* Update Freesync settings. */
5598 get_freesync_config_for_crtc(dm_new_crtc_state
,
5605 dc_stream_release(new_stream
);
5609 static int dm_update_plane_state(struct dc
*dc
,
5610 struct drm_atomic_state
*state
,
5611 struct drm_plane
*plane
,
5612 struct drm_plane_state
*old_plane_state
,
5613 struct drm_plane_state
*new_plane_state
,
5615 bool *lock_and_validation_needed
)
5618 struct dm_atomic_state
*dm_state
= NULL
;
5619 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
5620 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
5621 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
5622 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
5623 /* TODO return page_flip_needed() function */
5624 bool pflip_needed
= !state
->allow_modeset
;
5628 new_plane_crtc
= new_plane_state
->crtc
;
5629 old_plane_crtc
= old_plane_state
->crtc
;
5630 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
5631 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
5633 /*TODO Implement atomic check for cursor plane */
5634 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5637 /* Remove any changed/removed planes */
5640 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
5643 if (!old_plane_crtc
)
5646 old_crtc_state
= drm_atomic_get_old_crtc_state(
5647 state
, old_plane_crtc
);
5648 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5650 if (!dm_old_crtc_state
->stream
)
5653 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5654 plane
->base
.id
, old_plane_crtc
->base
.id
);
5656 ret
= dm_atomic_get_state(state
, &dm_state
);
5660 if (!dc_remove_plane_from_context(
5662 dm_old_crtc_state
->stream
,
5663 dm_old_plane_state
->dc_state
,
5664 dm_state
->context
)) {
5671 dc_plane_state_release(dm_old_plane_state
->dc_state
);
5672 dm_new_plane_state
->dc_state
= NULL
;
5674 *lock_and_validation_needed
= true;
5676 } else { /* Add new planes */
5677 struct dc_plane_state
*dc_new_plane_state
;
5679 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
5682 if (!new_plane_crtc
)
5685 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
5686 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5688 if (!dm_new_crtc_state
->stream
)
5691 if (pflip_needed
&& plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
5694 WARN_ON(dm_new_plane_state
->dc_state
);
5696 dc_new_plane_state
= dc_create_plane_state(dc
);
5697 if (!dc_new_plane_state
)
5700 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5701 plane
->base
.id
, new_plane_crtc
->base
.id
);
5703 ret
= fill_plane_attributes(
5704 new_plane_crtc
->dev
->dev_private
,
5709 dc_plane_state_release(dc_new_plane_state
);
5713 ret
= dm_atomic_get_state(state
, &dm_state
);
5715 dc_plane_state_release(dc_new_plane_state
);
5720 * Any atomic check errors that occur after this will
5721 * not need a release. The plane state will be attached
5722 * to the stream, and therefore part of the atomic
5723 * state. It'll be released when the atomic state is
5726 if (!dc_add_plane_to_context(
5728 dm_new_crtc_state
->stream
,
5730 dm_state
->context
)) {
5732 dc_plane_state_release(dc_new_plane_state
);
5736 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
5738 /* Tell DC to do a full surface update every time there
5739 * is a plane change. Inefficient, but works for now.
5741 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
5743 *lock_and_validation_needed
= true;
5751 dm_determine_update_type_for_commit(struct dc
*dc
,
5752 struct drm_atomic_state
*state
,
5753 enum surface_update_type
*out_type
)
5755 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
5756 int i
, j
, num_plane
, ret
= 0;
5757 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
5758 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
5759 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
5760 struct drm_plane
*plane
;
5762 struct drm_crtc
*crtc
;
5763 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
5764 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
5765 struct dc_stream_status
*status
= NULL
;
5767 struct dc_surface_update
*updates
;
5768 struct dc_plane_state
*surface
;
5769 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
5771 updates
= kcalloc(MAX_SURFACES
, sizeof(*updates
), GFP_KERNEL
);
5772 surface
= kcalloc(MAX_SURFACES
, sizeof(*surface
), GFP_KERNEL
);
5774 if (!updates
|| !surface
) {
5775 DRM_ERROR("Plane or surface update failed to allocate");
5776 /* Set type to FULL to avoid crashing in DC*/
5777 update_type
= UPDATE_TYPE_FULL
;
5781 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
5782 struct dc_stream_update stream_update
= { 0 };
5784 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5785 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5788 if (!new_dm_crtc_state
->stream
) {
5789 if (!new_dm_crtc_state
->stream
&& old_dm_crtc_state
->stream
) {
5790 update_type
= UPDATE_TYPE_FULL
;
5797 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
5798 new_plane_crtc
= new_plane_state
->crtc
;
5799 old_plane_crtc
= old_plane_state
->crtc
;
5800 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
5801 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
5803 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5806 if (!state
->allow_modeset
)
5809 if (crtc
!= new_plane_crtc
)
5812 updates
[num_plane
].surface
= &surface
[num_plane
];
5814 if (new_crtc_state
->mode_changed
) {
5815 updates
[num_plane
].surface
->src_rect
=
5816 new_dm_plane_state
->dc_state
->src_rect
;
5817 updates
[num_plane
].surface
->dst_rect
=
5818 new_dm_plane_state
->dc_state
->dst_rect
;
5819 updates
[num_plane
].surface
->rotation
=
5820 new_dm_plane_state
->dc_state
->rotation
;
5821 updates
[num_plane
].surface
->in_transfer_func
=
5822 new_dm_plane_state
->dc_state
->in_transfer_func
;
5823 stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
5824 stream_update
.src
= new_dm_crtc_state
->stream
->src
;
5827 if (new_crtc_state
->color_mgmt_changed
) {
5828 updates
[num_plane
].gamma
=
5829 new_dm_plane_state
->dc_state
->gamma_correction
;
5830 updates
[num_plane
].in_transfer_func
=
5831 new_dm_plane_state
->dc_state
->in_transfer_func
;
5832 stream_update
.gamut_remap
=
5833 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
5834 stream_update
.out_transfer_func
=
5835 new_dm_crtc_state
->stream
->out_transfer_func
;
5844 ret
= dm_atomic_get_state(state
, &dm_state
);
5848 old_dm_state
= dm_atomic_get_old_state(state
);
5849 if (!old_dm_state
) {
5854 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
5855 new_dm_crtc_state
->stream
);
5857 update_type
= dc_check_update_surfaces_for_stream(dc
, updates
, num_plane
,
5858 &stream_update
, status
);
5860 if (update_type
> UPDATE_TYPE_MED
) {
5861 update_type
= UPDATE_TYPE_FULL
;
5870 *out_type
= update_type
;
5875 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
5876 * @dev: The DRM device
5877 * @state: The atomic state to commit
5879 * Validate that the given atomic state is programmable by DC into hardware.
5880 * This involves constructing a &struct dc_state reflecting the new hardware
5881 * state we wish to commit, then querying DC to see if it is programmable. It's
5882 * important not to modify the existing DC state. Otherwise, atomic_check
5883 * may unexpectedly commit hardware changes.
5885 * When validating the DC state, it's important that the right locks are
5886 * acquired. For full updates case which removes/adds/updates streams on one
5887 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
5888 * that any such full update commit will wait for completion of any outstanding
5889 * flip using DRMs synchronization events. See
5890 * dm_determine_update_type_for_commit()
5892 * Note that DM adds the affected connectors for all CRTCs in state, when that
5893 * might not seem necessary. This is because DC stream creation requires the
5894 * DC sink, which is tied to the DRM connector state. Cleaning this up should
5895 * be possible but non-trivial - a possible TODO item.
5897 * Return: -Error code if validation failed.
5899 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
5900 struct drm_atomic_state
*state
)
5902 struct amdgpu_device
*adev
= dev
->dev_private
;
5903 struct dm_atomic_state
*dm_state
= NULL
;
5904 struct dc
*dc
= adev
->dm
.dc
;
5905 struct drm_connector
*connector
;
5906 struct drm_connector_state
*old_con_state
, *new_con_state
;
5907 struct drm_crtc
*crtc
;
5908 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
5909 struct drm_plane
*plane
;
5910 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
5911 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
5912 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
5917 * This bool will be set for true for any modeset/reset
5918 * or plane update which implies non fast surface update.
5920 bool lock_and_validation_needed
= false;
5922 ret
= drm_atomic_helper_check_modeset(dev
, state
);
5926 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
5927 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
5928 !new_crtc_state
->color_mgmt_changed
&&
5929 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
5932 if (!new_crtc_state
->enable
)
5935 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
5939 ret
= drm_atomic_add_affected_planes(state
, crtc
);
5944 /* Remove exiting planes if they are modified */
5945 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
5946 ret
= dm_update_plane_state(dc
, state
, plane
,
5950 &lock_and_validation_needed
);
5955 /* Disable all crtcs which require disable */
5956 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
5957 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
5961 &lock_and_validation_needed
);
5966 /* Enable all crtcs which require enable */
5967 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
5968 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
5972 &lock_and_validation_needed
);
5977 /* Add new/modified planes */
5978 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
5979 ret
= dm_update_plane_state(dc
, state
, plane
,
5983 &lock_and_validation_needed
);
5988 /* Run this here since we want to validate the streams we created */
5989 ret
= drm_atomic_helper_check_planes(dev
, state
);
5993 /* Check scaling and underscan changes*/
5994 /* TODO Removed scaling changes validation due to inability to commit
5995 * new stream into context w\o causing full reset. Need to
5996 * decide how to handle.
5998 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5999 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
6000 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
6001 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
6003 /* Skip any modesets/resets */
6004 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
6005 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
6008 /* Skip any thing not scale or underscan changes */
6009 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
6012 overall_update_type
= UPDATE_TYPE_FULL
;
6013 lock_and_validation_needed
= true;
6016 ret
= dm_determine_update_type_for_commit(dc
, state
, &update_type
);
6020 if (overall_update_type
< update_type
)
6021 overall_update_type
= update_type
;
6024 * lock_and_validation_needed was an old way to determine if we need to set
6025 * the global lock. Leaving it in to check if we broke any corner cases
6026 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6027 * lock_and_validation_needed false = UPDATE_TYPE_FAST
6029 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
6030 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6031 else if (!lock_and_validation_needed
&& overall_update_type
> UPDATE_TYPE_FAST
)
6032 WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
6035 if (overall_update_type
> UPDATE_TYPE_FAST
) {
6036 ret
= dm_atomic_get_state(state
, &dm_state
);
6040 ret
= do_aquire_global_lock(dev
, state
);
6044 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
6048 } else if (state
->legacy_cursor_update
) {
6050 * This is a fast cursor update coming from the plane update
6051 * helper, check if it can be done asynchronously for better
6054 state
->async_update
= !drm_atomic_helper_async_check(dev
, state
);
6057 /* Must be success */
6062 if (ret
== -EDEADLK
)
6063 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
6064 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
6065 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
6067 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
6072 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
6073 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
6076 bool capable
= false;
6078 if (amdgpu_dm_connector
->dc_link
&&
6079 dm_helpers_dp_read_dpcd(
6081 amdgpu_dm_connector
->dc_link
,
6082 DP_DOWN_STREAM_PORT_COUNT
,
6084 sizeof(dpcd_data
))) {
6085 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
6090 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
6094 bool edid_check_required
;
6095 struct detailed_timing
*timing
;
6096 struct detailed_non_pixel
*data
;
6097 struct detailed_data_monitor_range
*range
;
6098 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6099 to_amdgpu_dm_connector(connector
);
6100 struct dm_connector_state
*dm_con_state
= NULL
;
6102 struct drm_device
*dev
= connector
->dev
;
6103 struct amdgpu_device
*adev
= dev
->dev_private
;
6104 bool freesync_capable
= false;
6106 if (!connector
->state
) {
6107 DRM_ERROR("%s - Connector has no state", __func__
);
6112 dm_con_state
= to_dm_connector_state(connector
->state
);
6114 amdgpu_dm_connector
->min_vfreq
= 0;
6115 amdgpu_dm_connector
->max_vfreq
= 0;
6116 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
6121 dm_con_state
= to_dm_connector_state(connector
->state
);
6123 edid_check_required
= false;
6124 if (!amdgpu_dm_connector
->dc_sink
) {
6125 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
6128 if (!adev
->dm
.freesync_module
)
6131 * if edid non zero restrict freesync only for dp and edp
6134 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
6135 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
6136 edid_check_required
= is_dp_capable_without_timing_msa(
6138 amdgpu_dm_connector
);
6141 if (edid_check_required
== true && (edid
->version
> 1 ||
6142 (edid
->version
== 1 && edid
->revision
> 1))) {
6143 for (i
= 0; i
< 4; i
++) {
6145 timing
= &edid
->detailed_timings
[i
];
6146 data
= &timing
->data
.other_data
;
6147 range
= &data
->data
.range
;
6149 * Check if monitor has continuous frequency mode
6151 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
6154 * Check for flag range limits only. If flag == 1 then
6155 * no additional timing information provided.
6156 * Default GTF, GTF Secondary curve and CVT are not
6159 if (range
->flags
!= 1)
6162 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
6163 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
6164 amdgpu_dm_connector
->pixel_clock_mhz
=
6165 range
->pixel_clock_mhz
* 10;
6169 if (amdgpu_dm_connector
->max_vfreq
-
6170 amdgpu_dm_connector
->min_vfreq
> 10) {
6172 freesync_capable
= true;
6178 dm_con_state
->freesync_capable
= freesync_capable
;
6180 if (connector
->vrr_capable_property
)
6181 drm_connector_set_vrr_capable_property(connector
,