2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
28 #include "dc/inc/core_types.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_ucode.h"
35 #include "amdgpu_dm.h"
36 #include "amdgpu_pm.h"
38 #include "amd_shared.h"
39 #include "amdgpu_dm_irq.h"
40 #include "dm_helpers.h"
41 #include "dm_services_types.h"
42 #include "amdgpu_dm_mst_types.h"
43 #if defined(CONFIG_DEBUG_FS)
44 #include "amdgpu_dm_debugfs.h"
47 #include "ivsrcid/ivsrcid_vislands30.h"
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/version.h>
52 #include <linux/types.h>
53 #include <linux/pm_runtime.h>
54 #include <linux/firmware.h>
57 #include <drm/drm_atomic.h>
58 #include <drm/drm_atomic_helper.h>
59 #include <drm/drm_dp_mst_helper.h>
60 #include <drm/drm_fb_helper.h>
61 #include <drm/drm_edid.h>
63 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
64 #include "ivsrcid/irqsrcs_dcn_1_0.h"
66 #include "dcn/dcn_1_0_offset.h"
67 #include "dcn/dcn_1_0_sh_mask.h"
68 #include "soc15_hw_ip.h"
69 #include "vega10_ip_offset.h"
71 #include "soc15_common.h"
74 #include "modules/inc/mod_freesync.h"
76 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
77 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
79 /* basic init/fini API */
80 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
81 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
84 * initializes drm_device display related structures, based on the information
85 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
86 * drm_encoder, drm_mode_config
88 * Returns 0 on success
90 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
91 /* removes and deallocates the drm structures, created by the above function */
92 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
95 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
);
97 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
98 struct amdgpu_plane
*aplane
,
99 unsigned long possible_crtcs
);
100 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
101 struct drm_plane
*plane
,
102 uint32_t link_index
);
103 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
104 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
106 struct amdgpu_encoder
*amdgpu_encoder
);
107 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
108 struct amdgpu_encoder
*aencoder
,
109 uint32_t link_index
);
111 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
113 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
114 struct drm_atomic_state
*state
,
117 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
119 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
120 struct drm_atomic_state
*state
);
125 static const enum drm_plane_type dm_plane_type_default
[AMDGPU_MAX_PLANES
] = {
126 DRM_PLANE_TYPE_PRIMARY
,
127 DRM_PLANE_TYPE_PRIMARY
,
128 DRM_PLANE_TYPE_PRIMARY
,
129 DRM_PLANE_TYPE_PRIMARY
,
130 DRM_PLANE_TYPE_PRIMARY
,
131 DRM_PLANE_TYPE_PRIMARY
,
134 static const enum drm_plane_type dm_plane_type_carizzo
[AMDGPU_MAX_PLANES
] = {
135 DRM_PLANE_TYPE_PRIMARY
,
136 DRM_PLANE_TYPE_PRIMARY
,
137 DRM_PLANE_TYPE_PRIMARY
,
138 DRM_PLANE_TYPE_OVERLAY
,/* YUV Capable Underlay */
141 static const enum drm_plane_type dm_plane_type_stoney
[AMDGPU_MAX_PLANES
] = {
142 DRM_PLANE_TYPE_PRIMARY
,
143 DRM_PLANE_TYPE_PRIMARY
,
144 DRM_PLANE_TYPE_OVERLAY
, /* YUV Capable Underlay */
148 * dm_vblank_get_counter
151 * Get counter for number of vertical blanks
154 * struct amdgpu_device *adev - [in] desired amdgpu device
155 * int disp_idx - [in] which CRTC to get the counter from
158 * Counter for vertical blanks
160 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
162 if (crtc
>= adev
->mode_info
.num_crtc
)
165 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
166 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
170 if (acrtc_state
->stream
== NULL
) {
171 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
176 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
180 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
181 u32
*vbl
, u32
*position
)
183 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
185 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
188 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
189 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
192 if (acrtc_state
->stream
== NULL
) {
193 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 * TODO rework base driver to use values directly.
200 * for now parse it back into reg-format
202 dc_stream_get_scanoutpos(acrtc_state
->stream
,
208 *position
= v_position
| (h_position
<< 16);
209 *vbl
= v_blank_start
| (v_blank_end
<< 16);
215 static bool dm_is_idle(void *handle
)
221 static int dm_wait_for_idle(void *handle
)
227 static bool dm_check_soft_reset(void *handle
)
232 static int dm_soft_reset(void *handle
)
238 static struct amdgpu_crtc
*
239 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
242 struct drm_device
*dev
= adev
->ddev
;
243 struct drm_crtc
*crtc
;
244 struct amdgpu_crtc
*amdgpu_crtc
;
246 if (otg_inst
== -1) {
248 return adev
->mode_info
.crtcs
[0];
251 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
252 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
254 if (amdgpu_crtc
->otg_inst
== otg_inst
)
261 static void dm_pflip_high_irq(void *interrupt_params
)
263 struct amdgpu_crtc
*amdgpu_crtc
;
264 struct common_irq_params
*irq_params
= interrupt_params
;
265 struct amdgpu_device
*adev
= irq_params
->adev
;
268 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
270 /* IRQ could occur when in initial stage */
271 /* TODO work and BO cleanup */
272 if (amdgpu_crtc
== NULL
) {
273 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
277 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
279 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
280 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
281 amdgpu_crtc
->pflip_status
,
282 AMDGPU_FLIP_SUBMITTED
,
283 amdgpu_crtc
->crtc_id
,
285 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
290 /* wake up userspace */
291 if (amdgpu_crtc
->event
) {
292 /* Update to correct count(s) if racing with vblank irq */
293 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
295 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, amdgpu_crtc
->event
);
297 /* page flip completed. clean up */
298 amdgpu_crtc
->event
= NULL
;
303 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
304 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
306 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
307 __func__
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
);
309 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
312 static void dm_crtc_high_irq(void *interrupt_params
)
314 struct common_irq_params
*irq_params
= interrupt_params
;
315 struct amdgpu_device
*adev
= irq_params
->adev
;
316 struct amdgpu_crtc
*acrtc
;
318 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
321 drm_crtc_handle_vblank(&acrtc
->base
);
322 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
326 static int dm_set_clockgating_state(void *handle
,
327 enum amd_clockgating_state state
)
332 static int dm_set_powergating_state(void *handle
,
333 enum amd_powergating_state state
)
338 /* Prototypes of private functions */
339 static int dm_early_init(void* handle
);
341 /* Allocate memory for FBC compressed data */
342 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
344 struct drm_device
*dev
= connector
->dev
;
345 struct amdgpu_device
*adev
= dev
->dev_private
;
346 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
347 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
348 struct drm_display_mode
*mode
;
349 unsigned long max_size
= 0;
351 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
354 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
357 if (compressor
->bo_ptr
)
361 list_for_each_entry(mode
, &connector
->modes
, head
) {
362 if (max_size
< mode
->htotal
* mode
->vtotal
)
363 max_size
= mode
->htotal
* mode
->vtotal
;
367 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
368 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
369 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
372 DRM_ERROR("DM: Failed to initialize FBC\n");
374 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
375 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
385 * Returns 0 on success
387 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
389 struct dc_init_data init_data
;
390 adev
->dm
.ddev
= adev
->ddev
;
391 adev
->dm
.adev
= adev
;
393 /* Zero all the fields */
394 memset(&init_data
, 0, sizeof(init_data
));
396 if(amdgpu_dm_irq_init(adev
)) {
397 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
401 init_data
.asic_id
.chip_family
= adev
->family
;
403 init_data
.asic_id
.pci_revision_id
= adev
->rev_id
;
404 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
406 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
407 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
408 init_data
.asic_id
.atombios_base_address
=
409 adev
->mode_info
.atom_context
->bios
;
411 init_data
.driver
= adev
;
413 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
415 if (!adev
->dm
.cgs_device
) {
416 DRM_ERROR("amdgpu: failed to create cgs device.\n");
420 init_data
.cgs_device
= adev
->dm
.cgs_device
;
422 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
425 * TODO debug why this doesn't work on Raven
427 if (adev
->flags
& AMD_IS_APU
&&
428 adev
->asic_type
>= CHIP_CARRIZO
&&
429 adev
->asic_type
< CHIP_RAVEN
)
430 init_data
.flags
.gpu_vm_support
= true;
432 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
433 init_data
.flags
.fbc_support
= true;
435 /* Display Core create. */
436 adev
->dm
.dc
= dc_create(&init_data
);
439 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
441 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
445 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
446 if (!adev
->dm
.freesync_module
) {
448 "amdgpu: failed to initialize freesync_module.\n");
450 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
451 adev
->dm
.freesync_module
);
453 amdgpu_dm_init_color_mod();
455 if (amdgpu_dm_initialize_drm_device(adev
)) {
457 "amdgpu: failed to initialize sw for display support.\n");
461 /* Update the actual used number of crtc */
462 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
464 /* TODO: Add_display_info? */
466 /* TODO use dynamic cursor width */
467 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
468 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
470 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
472 "amdgpu: failed to initialize sw for display support.\n");
476 #if defined(CONFIG_DEBUG_FS)
477 if (dtn_debugfs_init(adev
))
478 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
481 DRM_DEBUG_DRIVER("KMS initialized.\n");
485 amdgpu_dm_fini(adev
);
490 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
492 amdgpu_dm_destroy_drm_device(&adev
->dm
);
494 * TODO: pageflip, vlank interrupt
496 * amdgpu_dm_irq_fini(adev);
499 if (adev
->dm
.cgs_device
) {
500 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
501 adev
->dm
.cgs_device
= NULL
;
503 if (adev
->dm
.freesync_module
) {
504 mod_freesync_destroy(adev
->dm
.freesync_module
);
505 adev
->dm
.freesync_module
= NULL
;
507 /* DC Destroy TODO: Replace destroy DAL */
509 dc_destroy(&adev
->dm
.dc
);
513 static int load_dmcu_fw(struct amdgpu_device
*adev
)
515 const char *fw_name_dmcu
;
517 const struct dmcu_firmware_header_v1_0
*hdr
;
519 switch(adev
->asic_type
) {
538 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
541 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
545 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
546 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
550 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
552 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
553 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
554 adev
->dm
.fw_dmcu
= NULL
;
558 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
563 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
565 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
567 release_firmware(adev
->dm
.fw_dmcu
);
568 adev
->dm
.fw_dmcu
= NULL
;
572 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
573 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
574 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
575 adev
->firmware
.fw_size
+=
576 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
578 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
579 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
580 adev
->firmware
.fw_size
+=
581 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
583 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
585 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
590 static int dm_sw_init(void *handle
)
592 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
594 return load_dmcu_fw(adev
);
597 static int dm_sw_fini(void *handle
)
599 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
601 if(adev
->dm
.fw_dmcu
) {
602 release_firmware(adev
->dm
.fw_dmcu
);
603 adev
->dm
.fw_dmcu
= NULL
;
609 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
611 struct amdgpu_dm_connector
*aconnector
;
612 struct drm_connector
*connector
;
615 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
617 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
618 aconnector
= to_amdgpu_dm_connector(connector
);
619 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
620 aconnector
->mst_mgr
.aux
) {
621 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
622 aconnector
, aconnector
->base
.base
.id
);
624 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
626 DRM_ERROR("DM_MST: Failed to start MST\n");
627 ((struct dc_link
*)aconnector
->dc_link
)->type
= dc_connection_single
;
633 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
637 static int dm_late_init(void *handle
)
639 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
641 return detect_mst_link_for_all_connectors(adev
->ddev
);
644 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
646 struct amdgpu_dm_connector
*aconnector
;
647 struct drm_connector
*connector
;
649 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
651 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
652 aconnector
= to_amdgpu_dm_connector(connector
);
653 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
654 !aconnector
->mst_port
) {
657 drm_dp_mst_topology_mgr_suspend(&aconnector
->mst_mgr
);
659 drm_dp_mst_topology_mgr_resume(&aconnector
->mst_mgr
);
663 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
666 static int dm_hw_init(void *handle
)
668 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
669 /* Create DAL display manager */
670 amdgpu_dm_init(adev
);
671 amdgpu_dm_hpd_init(adev
);
676 static int dm_hw_fini(void *handle
)
678 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
680 amdgpu_dm_hpd_fini(adev
);
682 amdgpu_dm_irq_fini(adev
);
683 amdgpu_dm_fini(adev
);
687 static int dm_suspend(void *handle
)
689 struct amdgpu_device
*adev
= handle
;
690 struct amdgpu_display_manager
*dm
= &adev
->dm
;
693 s3_handle_mst(adev
->ddev
, true);
695 amdgpu_dm_irq_suspend(adev
);
697 WARN_ON(adev
->dm
.cached_state
);
698 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
700 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
705 static struct amdgpu_dm_connector
*
706 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
707 struct drm_crtc
*crtc
)
710 struct drm_connector_state
*new_con_state
;
711 struct drm_connector
*connector
;
712 struct drm_crtc
*crtc_from_state
;
714 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
715 crtc_from_state
= new_con_state
->crtc
;
717 if (crtc_from_state
== crtc
)
718 return to_amdgpu_dm_connector(connector
);
724 static void emulated_link_detect(struct dc_link
*link
)
726 struct dc_sink_init_data sink_init_data
= { 0 };
727 struct display_sink_capability sink_caps
= { 0 };
728 enum dc_edid_status edid_status
;
729 struct dc_context
*dc_ctx
= link
->ctx
;
730 struct dc_sink
*sink
= NULL
;
731 struct dc_sink
*prev_sink
= NULL
;
733 link
->type
= dc_connection_none
;
734 prev_sink
= link
->local_sink
;
736 if (prev_sink
!= NULL
)
737 dc_sink_retain(prev_sink
);
739 switch (link
->connector_signal
) {
740 case SIGNAL_TYPE_HDMI_TYPE_A
: {
741 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
742 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
746 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
747 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
748 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
752 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
753 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
754 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
758 case SIGNAL_TYPE_LVDS
: {
759 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
760 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
764 case SIGNAL_TYPE_EDP
: {
765 sink_caps
.transaction_type
=
766 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
767 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
771 case SIGNAL_TYPE_DISPLAY_PORT
: {
772 sink_caps
.transaction_type
=
773 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
774 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
779 DC_ERROR("Invalid connector type! signal:%d\n",
780 link
->connector_signal
);
784 sink_init_data
.link
= link
;
785 sink_init_data
.sink_signal
= sink_caps
.signal
;
787 sink
= dc_sink_create(&sink_init_data
);
789 DC_ERROR("Failed to create sink!\n");
793 link
->local_sink
= sink
;
795 edid_status
= dm_helpers_read_local_edid(
800 if (edid_status
!= EDID_OK
)
801 DC_ERROR("Failed to read EDID");
805 static int dm_resume(void *handle
)
807 struct amdgpu_device
*adev
= handle
;
808 struct drm_device
*ddev
= adev
->ddev
;
809 struct amdgpu_display_manager
*dm
= &adev
->dm
;
810 struct amdgpu_dm_connector
*aconnector
;
811 struct drm_connector
*connector
;
812 struct drm_crtc
*crtc
;
813 struct drm_crtc_state
*new_crtc_state
;
814 struct dm_crtc_state
*dm_new_crtc_state
;
815 struct drm_plane
*plane
;
816 struct drm_plane_state
*new_plane_state
;
817 struct dm_plane_state
*dm_new_plane_state
;
818 enum dc_connection_type new_connection_type
= dc_connection_none
;
822 /* power on hardware */
823 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
825 /* program HPD filter */
828 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
829 s3_handle_mst(ddev
, false);
832 * early enable HPD Rx IRQ, should be done before set mode as short
833 * pulse interrupts are used for MST
835 amdgpu_dm_irq_resume_early(adev
);
838 list_for_each_entry(connector
, &ddev
->mode_config
.connector_list
, head
) {
839 aconnector
= to_amdgpu_dm_connector(connector
);
842 * this is the case when traversing through already created
843 * MST connectors, should be skipped
845 if (aconnector
->mst_port
)
848 mutex_lock(&aconnector
->hpd_lock
);
849 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
850 DRM_ERROR("KMS: Failed to detect connector\n");
852 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
853 emulated_link_detect(aconnector
->dc_link
);
855 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
857 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
858 aconnector
->fake_enable
= false;
860 aconnector
->dc_sink
= NULL
;
861 amdgpu_dm_update_connector_after_detect(aconnector
);
862 mutex_unlock(&aconnector
->hpd_lock
);
865 /* Force mode set in atomic commit */
866 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
867 new_crtc_state
->active_changed
= true;
870 * atomic_check is expected to create the dc states. We need to release
871 * them here, since they were duplicated as part of the suspend
874 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
875 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
876 if (dm_new_crtc_state
->stream
) {
877 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
878 dc_stream_release(dm_new_crtc_state
->stream
);
879 dm_new_crtc_state
->stream
= NULL
;
883 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
884 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
885 if (dm_new_plane_state
->dc_state
) {
886 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
887 dc_plane_state_release(dm_new_plane_state
->dc_state
);
888 dm_new_plane_state
->dc_state
= NULL
;
892 ret
= drm_atomic_helper_resume(ddev
, dm
->cached_state
);
894 dm
->cached_state
= NULL
;
896 amdgpu_dm_irq_resume_late(adev
);
901 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
903 .early_init
= dm_early_init
,
904 .late_init
= dm_late_init
,
905 .sw_init
= dm_sw_init
,
906 .sw_fini
= dm_sw_fini
,
907 .hw_init
= dm_hw_init
,
908 .hw_fini
= dm_hw_fini
,
909 .suspend
= dm_suspend
,
911 .is_idle
= dm_is_idle
,
912 .wait_for_idle
= dm_wait_for_idle
,
913 .check_soft_reset
= dm_check_soft_reset
,
914 .soft_reset
= dm_soft_reset
,
915 .set_clockgating_state
= dm_set_clockgating_state
,
916 .set_powergating_state
= dm_set_powergating_state
,
919 const struct amdgpu_ip_block_version dm_ip_block
=
921 .type
= AMD_IP_BLOCK_TYPE_DCE
,
925 .funcs
= &amdgpu_dm_funcs
,
929 static struct drm_atomic_state
*
930 dm_atomic_state_alloc(struct drm_device
*dev
)
932 struct dm_atomic_state
*state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
937 if (drm_atomic_state_init(dev
, &state
->base
) < 0)
948 dm_atomic_state_clear(struct drm_atomic_state
*state
)
950 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
952 if (dm_state
->context
) {
953 dc_release_state(dm_state
->context
);
954 dm_state
->context
= NULL
;
957 drm_atomic_state_default_clear(state
);
961 dm_atomic_state_alloc_free(struct drm_atomic_state
*state
)
963 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
964 drm_atomic_state_default_release(state
);
968 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
969 .fb_create
= amdgpu_display_user_framebuffer_create
,
970 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
971 .atomic_check
= amdgpu_dm_atomic_check
,
972 .atomic_commit
= amdgpu_dm_atomic_commit
,
973 .atomic_state_alloc
= dm_atomic_state_alloc
,
974 .atomic_state_clear
= dm_atomic_state_clear
,
975 .atomic_state_free
= dm_atomic_state_alloc_free
978 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
979 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
983 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector
*aconnector
)
985 struct drm_connector
*connector
= &aconnector
->base
;
986 struct drm_device
*dev
= connector
->dev
;
987 struct dc_sink
*sink
;
989 /* MST handled by drm_mst framework */
990 if (aconnector
->mst_mgr
.mst_state
== true)
994 sink
= aconnector
->dc_link
->local_sink
;
997 * Edid mgmt connector gets first update only in mode_valid hook and then
998 * the connector sink is set to either fake or physical sink depends on link status.
999 * Skip if already done during boot.
1001 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1002 && aconnector
->dc_em_sink
) {
1005 * For S3 resume with headless use eml_sink to fake stream
1006 * because on resume connector->sink is set to NULL
1008 mutex_lock(&dev
->mode_config
.mutex
);
1011 if (aconnector
->dc_sink
) {
1012 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1014 * retain and release below are used to
1015 * bump up refcount for sink because the link doesn't point
1016 * to it anymore after disconnect, so on next crtc to connector
1017 * reshuffle by UMD we will get into unwanted dc_sink release
1019 if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
1020 dc_sink_release(aconnector
->dc_sink
);
1022 aconnector
->dc_sink
= sink
;
1023 amdgpu_dm_update_freesync_caps(connector
,
1026 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1027 if (!aconnector
->dc_sink
)
1028 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1029 else if (aconnector
->dc_sink
!= aconnector
->dc_em_sink
)
1030 dc_sink_retain(aconnector
->dc_sink
);
1033 mutex_unlock(&dev
->mode_config
.mutex
);
1038 * TODO: temporary guard to look for proper fix
1039 * if this sink is MST sink, we should not do anything
1041 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
)
1044 if (aconnector
->dc_sink
== sink
) {
1046 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1049 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1050 aconnector
->connector_id
);
1054 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1055 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1057 mutex_lock(&dev
->mode_config
.mutex
);
1060 * 1. Update status of the drm connector
1061 * 2. Send an event and let userspace tell us what to do
1065 * TODO: check if we still need the S3 mode update workaround.
1066 * If yes, put it here.
1068 if (aconnector
->dc_sink
)
1069 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1071 aconnector
->dc_sink
= sink
;
1072 if (sink
->dc_edid
.length
== 0) {
1073 aconnector
->edid
= NULL
;
1074 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1077 (struct edid
*) sink
->dc_edid
.raw_edid
;
1080 drm_connector_update_edid_property(connector
,
1082 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
1085 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
1088 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1089 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1090 drm_connector_update_edid_property(connector
, NULL
);
1091 aconnector
->num_modes
= 0;
1092 aconnector
->dc_sink
= NULL
;
1093 aconnector
->edid
= NULL
;
1096 mutex_unlock(&dev
->mode_config
.mutex
);
1099 static void handle_hpd_irq(void *param
)
1101 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1102 struct drm_connector
*connector
= &aconnector
->base
;
1103 struct drm_device
*dev
= connector
->dev
;
1104 enum dc_connection_type new_connection_type
= dc_connection_none
;
1107 * In case of failure or MST no need to update connector status or notify the OS
1108 * since (for MST case) MST does this in its own context.
1110 mutex_lock(&aconnector
->hpd_lock
);
1112 if (aconnector
->fake_enable
)
1113 aconnector
->fake_enable
= false;
1115 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1116 DRM_ERROR("KMS: Failed to detect connector\n");
1118 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1119 emulated_link_detect(aconnector
->dc_link
);
1122 drm_modeset_lock_all(dev
);
1123 dm_restore_drm_connector_state(dev
, connector
);
1124 drm_modeset_unlock_all(dev
);
1126 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1127 drm_kms_helper_hotplug_event(dev
);
1129 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
1130 amdgpu_dm_update_connector_after_detect(aconnector
);
1133 drm_modeset_lock_all(dev
);
1134 dm_restore_drm_connector_state(dev
, connector
);
1135 drm_modeset_unlock_all(dev
);
1137 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
1138 drm_kms_helper_hotplug_event(dev
);
1140 mutex_unlock(&aconnector
->hpd_lock
);
1144 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
1146 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
1148 bool new_irq_handled
= false;
1150 int dpcd_bytes_to_read
;
1152 const int max_process_count
= 30;
1153 int process_count
= 0;
1155 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
1157 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
1158 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
1159 /* DPCD 0x200 - 0x201 for downstream IRQ */
1160 dpcd_addr
= DP_SINK_COUNT
;
1162 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
1163 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1164 dpcd_addr
= DP_SINK_COUNT_ESI
;
1167 dret
= drm_dp_dpcd_read(
1168 &aconnector
->dm_dp_aux
.aux
,
1171 dpcd_bytes_to_read
);
1173 while (dret
== dpcd_bytes_to_read
&&
1174 process_count
< max_process_count
) {
1180 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
1181 /* handle HPD short pulse irq */
1182 if (aconnector
->mst_mgr
.mst_state
)
1184 &aconnector
->mst_mgr
,
1188 if (new_irq_handled
) {
1189 /* ACK at DPCD to notify down stream */
1190 const int ack_dpcd_bytes_to_write
=
1191 dpcd_bytes_to_read
- 1;
1193 for (retry
= 0; retry
< 3; retry
++) {
1196 wret
= drm_dp_dpcd_write(
1197 &aconnector
->dm_dp_aux
.aux
,
1200 ack_dpcd_bytes_to_write
);
1201 if (wret
== ack_dpcd_bytes_to_write
)
1205 /* check if there is new irq to be handled */
1206 dret
= drm_dp_dpcd_read(
1207 &aconnector
->dm_dp_aux
.aux
,
1210 dpcd_bytes_to_read
);
1212 new_irq_handled
= false;
1218 if (process_count
== max_process_count
)
1219 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1222 static void handle_hpd_rx_irq(void *param
)
1224 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
1225 struct drm_connector
*connector
= &aconnector
->base
;
1226 struct drm_device
*dev
= connector
->dev
;
1227 struct dc_link
*dc_link
= aconnector
->dc_link
;
1228 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
1229 enum dc_connection_type new_connection_type
= dc_connection_none
;
1232 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1233 * conflict, after implement i2c helper, this mutex should be
1236 if (dc_link
->type
!= dc_connection_mst_branch
)
1237 mutex_lock(&aconnector
->hpd_lock
);
1239 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
1240 !is_mst_root_connector
) {
1241 /* Downstream Port status changed. */
1242 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
1243 DRM_ERROR("KMS: Failed to detect connector\n");
1245 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1246 emulated_link_detect(dc_link
);
1248 if (aconnector
->fake_enable
)
1249 aconnector
->fake_enable
= false;
1251 amdgpu_dm_update_connector_after_detect(aconnector
);
1254 drm_modeset_lock_all(dev
);
1255 dm_restore_drm_connector_state(dev
, connector
);
1256 drm_modeset_unlock_all(dev
);
1258 drm_kms_helper_hotplug_event(dev
);
1259 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
1261 if (aconnector
->fake_enable
)
1262 aconnector
->fake_enable
= false;
1264 amdgpu_dm_update_connector_after_detect(aconnector
);
1267 drm_modeset_lock_all(dev
);
1268 dm_restore_drm_connector_state(dev
, connector
);
1269 drm_modeset_unlock_all(dev
);
1271 drm_kms_helper_hotplug_event(dev
);
1274 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
1275 (dc_link
->type
== dc_connection_mst_branch
))
1276 dm_handle_hpd_rx_irq(aconnector
);
1278 if (dc_link
->type
!= dc_connection_mst_branch
) {
1279 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
1280 mutex_unlock(&aconnector
->hpd_lock
);
1284 static void register_hpd_handlers(struct amdgpu_device
*adev
)
1286 struct drm_device
*dev
= adev
->ddev
;
1287 struct drm_connector
*connector
;
1288 struct amdgpu_dm_connector
*aconnector
;
1289 const struct dc_link
*dc_link
;
1290 struct dc_interrupt_params int_params
= {0};
1292 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1293 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1295 list_for_each_entry(connector
,
1296 &dev
->mode_config
.connector_list
, head
) {
1298 aconnector
= to_amdgpu_dm_connector(connector
);
1299 dc_link
= aconnector
->dc_link
;
1301 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
1302 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1303 int_params
.irq_source
= dc_link
->irq_source_hpd
;
1305 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1307 (void *) aconnector
);
1310 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
1312 /* Also register for DP short pulse (hpd_rx). */
1313 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
1314 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
1316 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1318 (void *) aconnector
);
1323 /* Register IRQ sources and initialize IRQ callbacks */
1324 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
1326 struct dc
*dc
= adev
->dm
.dc
;
1327 struct common_irq_params
*c_irq_params
;
1328 struct dc_interrupt_params int_params
= {0};
1331 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
1333 if (adev
->asic_type
== CHIP_VEGA10
||
1334 adev
->asic_type
== CHIP_VEGA12
||
1335 adev
->asic_type
== CHIP_VEGA20
||
1336 adev
->asic_type
== CHIP_RAVEN
)
1337 client_id
= SOC15_IH_CLIENTID_DCE
;
1339 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1340 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1343 * Actions of amdgpu_irq_add_id():
1344 * 1. Register a set() function with base driver.
1345 * Base driver will call set() function to enable/disable an
1346 * interrupt in DC hardware.
1347 * 2. Register amdgpu_dm_irq_handler().
1348 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1349 * coming from DC hardware.
1350 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1351 * for acknowledging and handling. */
1353 /* Use VBLANK interrupt */
1354 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
1355 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
1357 DRM_ERROR("Failed to add crtc irq id!\n");
1361 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1362 int_params
.irq_source
=
1363 dc_interrupt_to_irq_source(dc
, i
, 0);
1365 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1367 c_irq_params
->adev
= adev
;
1368 c_irq_params
->irq_src
= int_params
.irq_source
;
1370 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1371 dm_crtc_high_irq
, c_irq_params
);
1374 /* Use GRPH_PFLIP interrupt */
1375 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
1376 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
1377 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
1379 DRM_ERROR("Failed to add page flip irq id!\n");
1383 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1384 int_params
.irq_source
=
1385 dc_interrupt_to_irq_source(dc
, i
, 0);
1387 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1389 c_irq_params
->adev
= adev
;
1390 c_irq_params
->irq_src
= int_params
.irq_source
;
1392 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1393 dm_pflip_high_irq
, c_irq_params
);
1398 r
= amdgpu_irq_add_id(adev
, client_id
,
1399 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
1401 DRM_ERROR("Failed to add hpd irq id!\n");
1405 register_hpd_handlers(adev
);
1410 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1411 /* Register IRQ sources and initialize IRQ callbacks */
1412 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
1414 struct dc
*dc
= adev
->dm
.dc
;
1415 struct common_irq_params
*c_irq_params
;
1416 struct dc_interrupt_params int_params
= {0};
1420 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1421 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
1424 * Actions of amdgpu_irq_add_id():
1425 * 1. Register a set() function with base driver.
1426 * Base driver will call set() function to enable/disable an
1427 * interrupt in DC hardware.
1428 * 2. Register amdgpu_dm_irq_handler().
1429 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1430 * coming from DC hardware.
1431 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1432 * for acknowledging and handling.
1435 /* Use VSTARTUP interrupt */
1436 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
1437 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
1439 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
1442 DRM_ERROR("Failed to add crtc irq id!\n");
1446 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1447 int_params
.irq_source
=
1448 dc_interrupt_to_irq_source(dc
, i
, 0);
1450 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
1452 c_irq_params
->adev
= adev
;
1453 c_irq_params
->irq_src
= int_params
.irq_source
;
1455 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1456 dm_crtc_high_irq
, c_irq_params
);
1459 /* Use GRPH_PFLIP interrupt */
1460 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
1461 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
1463 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
1465 DRM_ERROR("Failed to add page flip irq id!\n");
1469 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
1470 int_params
.irq_source
=
1471 dc_interrupt_to_irq_source(dc
, i
, 0);
1473 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
1475 c_irq_params
->adev
= adev
;
1476 c_irq_params
->irq_src
= int_params
.irq_source
;
1478 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
1479 dm_pflip_high_irq
, c_irq_params
);
1484 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
1487 DRM_ERROR("Failed to add hpd irq id!\n");
1491 register_hpd_handlers(adev
);
1497 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
1501 adev
->mode_info
.mode_config_initialized
= true;
1503 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
1504 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
1506 adev
->ddev
->mode_config
.max_width
= 16384;
1507 adev
->ddev
->mode_config
.max_height
= 16384;
1509 adev
->ddev
->mode_config
.preferred_depth
= 24;
1510 adev
->ddev
->mode_config
.prefer_shadow
= 1;
1511 /* indicates support for immediate flip */
1512 adev
->ddev
->mode_config
.async_page_flip
= true;
1514 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
1516 r
= amdgpu_display_modeset_create_props(adev
);
1523 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1524 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1526 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
1528 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1530 if (dc_link_set_backlight_level(dm
->backlight_link
,
1531 bd
->props
.brightness
, 0, 0))
1537 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
1539 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
1540 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
1542 if (ret
== DC_ERROR_UNEXPECTED
)
1543 return bd
->props
.brightness
;
1547 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
1548 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
1549 .update_status
= amdgpu_dm_backlight_update_status
,
1553 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
1556 struct backlight_properties props
= { 0 };
1558 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
1559 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
1560 props
.type
= BACKLIGHT_RAW
;
1562 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
1563 dm
->adev
->ddev
->primary
->index
);
1565 dm
->backlight_dev
= backlight_device_register(bl_name
,
1566 dm
->adev
->ddev
->dev
,
1568 &amdgpu_dm_backlight_ops
,
1571 if (IS_ERR(dm
->backlight_dev
))
1572 DRM_ERROR("DM: Backlight registration failed!\n");
1574 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
1579 static int initialize_plane(struct amdgpu_display_manager
*dm
,
1580 struct amdgpu_mode_info
*mode_info
,
1583 struct amdgpu_plane
*plane
;
1584 unsigned long possible_crtcs
;
1587 plane
= kzalloc(sizeof(struct amdgpu_plane
), GFP_KERNEL
);
1588 mode_info
->planes
[plane_id
] = plane
;
1591 DRM_ERROR("KMS: Failed to allocate plane\n");
1594 plane
->base
.type
= mode_info
->plane_type
[plane_id
];
1597 * HACK: IGT tests expect that each plane can only have
1598 * one possible CRTC. For now, set one CRTC for each
1599 * plane that is not an underlay, but still allow multiple
1600 * CRTCs for underlay planes.
1602 possible_crtcs
= 1 << plane_id
;
1603 if (plane_id
>= dm
->dc
->caps
.max_streams
)
1604 possible_crtcs
= 0xff;
1606 ret
= amdgpu_dm_plane_init(dm
, mode_info
->planes
[plane_id
], possible_crtcs
);
1609 DRM_ERROR("KMS: Failed to initialize plane\n");
1617 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
1618 struct dc_link
*link
)
1620 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1621 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1623 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
1624 link
->type
!= dc_connection_none
) {
1626 * Event if registration failed, we should continue with
1627 * DM initialization because not having a backlight control
1628 * is better then a black screen.
1630 amdgpu_dm_register_backlight_device(dm
);
1632 if (dm
->backlight_dev
)
1633 dm
->backlight_link
= link
;
1640 * In this architecture, the association
1641 * connector -> encoder -> crtc
1642 * id not really requried. The crtc and connector will hold the
1643 * display_index as an abstraction to use with DAL component
1645 * Returns 0 on success
1647 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
1649 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1651 struct amdgpu_dm_connector
*aconnector
= NULL
;
1652 struct amdgpu_encoder
*aencoder
= NULL
;
1653 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1655 int32_t total_overlay_planes
, total_primary_planes
;
1656 enum dc_connection_type new_connection_type
= dc_connection_none
;
1658 link_cnt
= dm
->dc
->caps
.max_links
;
1659 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
1660 DRM_ERROR("DM: Failed to initialize mode config\n");
1664 /* Identify the number of planes to be initialized */
1665 total_overlay_planes
= dm
->dc
->caps
.max_slave_planes
;
1666 total_primary_planes
= dm
->dc
->caps
.max_planes
- dm
->dc
->caps
.max_slave_planes
;
1668 /* First initialize overlay planes, index starting after primary planes */
1669 for (i
= (total_overlay_planes
- 1); i
>= 0; i
--) {
1670 if (initialize_plane(dm
, mode_info
, (total_primary_planes
+ i
))) {
1671 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1676 /* Initialize primary planes */
1677 for (i
= (total_primary_planes
- 1); i
>= 0; i
--) {
1678 if (initialize_plane(dm
, mode_info
, i
)) {
1679 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1684 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
1685 if (amdgpu_dm_crtc_init(dm
, &mode_info
->planes
[i
]->base
, i
)) {
1686 DRM_ERROR("KMS: Failed to initialize crtc\n");
1690 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
1692 /* loops over all connectors on the board */
1693 for (i
= 0; i
< link_cnt
; i
++) {
1694 struct dc_link
*link
= NULL
;
1696 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
1698 "KMS: Cannot support more than %d display indexes\n",
1699 AMDGPU_DM_MAX_DISPLAY_INDEX
);
1703 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
1707 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
1711 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
1712 DRM_ERROR("KMS: Failed to initialize encoder\n");
1716 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
1717 DRM_ERROR("KMS: Failed to initialize connector\n");
1721 link
= dc_get_link_at_index(dm
->dc
, i
);
1723 if (!dc_link_detect_sink(link
, &new_connection_type
))
1724 DRM_ERROR("KMS: Failed to detect connector\n");
1726 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
1727 emulated_link_detect(link
);
1728 amdgpu_dm_update_connector_after_detect(aconnector
);
1730 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
1731 amdgpu_dm_update_connector_after_detect(aconnector
);
1732 register_backlight_device(dm
, link
);
1738 /* Software is initialized. Now we can register interrupt handlers. */
1739 switch (adev
->asic_type
) {
1749 case CHIP_POLARIS11
:
1750 case CHIP_POLARIS10
:
1751 case CHIP_POLARIS12
:
1756 if (dce110_register_irq_handlers(dm
->adev
)) {
1757 DRM_ERROR("DM: Failed to initialize IRQ\n");
1761 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1763 if (dcn10_register_irq_handlers(dm
->adev
)) {
1764 DRM_ERROR("DM: Failed to initialize IRQ\n");
1770 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1774 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
1775 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
1781 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; i
++)
1782 kfree(mode_info
->planes
[i
]);
1786 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
1788 drm_mode_config_cleanup(dm
->ddev
);
1792 /******************************************************************************
1793 * amdgpu_display_funcs functions
1794 *****************************************************************************/
1797 * dm_bandwidth_update - program display watermarks
1799 * @adev: amdgpu_device pointer
1801 * Calculate and program the display watermarks and line buffer allocation.
1803 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
1805 /* TODO: implement later */
1808 static int amdgpu_notify_freesync(struct drm_device
*dev
, void *data
,
1809 struct drm_file
*filp
)
1811 struct drm_atomic_state
*state
;
1812 struct drm_modeset_acquire_ctx ctx
;
1813 struct drm_crtc
*crtc
;
1814 struct drm_connector
*connector
;
1815 struct drm_connector_state
*old_con_state
, *new_con_state
;
1818 bool enable
= false;
1820 drm_modeset_acquire_init(&ctx
, 0);
1822 state
= drm_atomic_state_alloc(dev
);
1827 state
->acquire_ctx
= &ctx
;
1830 drm_for_each_crtc(crtc
, dev
) {
1831 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
1835 /* TODO rework amdgpu_dm_commit_planes so we don't need this */
1836 ret
= drm_atomic_add_affected_planes(state
, crtc
);
1841 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
1842 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
1843 struct drm_crtc_state
*new_crtc_state
;
1844 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
1845 struct dm_crtc_state
*dm_new_crtc_state
;
1852 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
1853 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1855 dm_new_crtc_state
->freesync_enabled
= enable
;
1858 ret
= drm_atomic_commit(state
);
1861 if (ret
== -EDEADLK
) {
1862 drm_atomic_state_clear(state
);
1863 drm_modeset_backoff(&ctx
);
1867 drm_atomic_state_put(state
);
1870 drm_modeset_drop_locks(&ctx
);
1871 drm_modeset_acquire_fini(&ctx
);
1875 static const struct amdgpu_display_funcs dm_display_funcs
= {
1876 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
1877 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
1878 .backlight_set_level
= NULL
, /* never called for DC */
1879 .backlight_get_level
= NULL
, /* never called for DC */
1880 .hpd_sense
= NULL
,/* called unconditionally */
1881 .hpd_set_polarity
= NULL
, /* called unconditionally */
1882 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
1883 .page_flip_get_scanoutpos
=
1884 dm_crtc_get_scanoutpos
,/* called unconditionally */
1885 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
1886 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
1887 .notify_freesync
= amdgpu_notify_freesync
,
1891 #if defined(CONFIG_DEBUG_KERNEL_DC)
1893 static ssize_t
s3_debug_store(struct device
*device
,
1894 struct device_attribute
*attr
,
1900 struct pci_dev
*pdev
= to_pci_dev(device
);
1901 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1902 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
1904 ret
= kstrtoint(buf
, 0, &s3_state
);
1909 drm_kms_helper_hotplug_event(adev
->ddev
);
1914 return ret
== 0 ? count
: 0;
1917 DEVICE_ATTR_WO(s3_debug
);
1921 static int dm_early_init(void *handle
)
1923 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1925 switch (adev
->asic_type
) {
1928 adev
->mode_info
.num_crtc
= 6;
1929 adev
->mode_info
.num_hpd
= 6;
1930 adev
->mode_info
.num_dig
= 6;
1931 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1934 adev
->mode_info
.num_crtc
= 4;
1935 adev
->mode_info
.num_hpd
= 6;
1936 adev
->mode_info
.num_dig
= 7;
1937 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1941 adev
->mode_info
.num_crtc
= 2;
1942 adev
->mode_info
.num_hpd
= 6;
1943 adev
->mode_info
.num_dig
= 6;
1944 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1948 adev
->mode_info
.num_crtc
= 6;
1949 adev
->mode_info
.num_hpd
= 6;
1950 adev
->mode_info
.num_dig
= 7;
1951 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1954 adev
->mode_info
.num_crtc
= 3;
1955 adev
->mode_info
.num_hpd
= 6;
1956 adev
->mode_info
.num_dig
= 9;
1957 adev
->mode_info
.plane_type
= dm_plane_type_carizzo
;
1960 adev
->mode_info
.num_crtc
= 2;
1961 adev
->mode_info
.num_hpd
= 6;
1962 adev
->mode_info
.num_dig
= 9;
1963 adev
->mode_info
.plane_type
= dm_plane_type_stoney
;
1965 case CHIP_POLARIS11
:
1966 case CHIP_POLARIS12
:
1967 adev
->mode_info
.num_crtc
= 5;
1968 adev
->mode_info
.num_hpd
= 5;
1969 adev
->mode_info
.num_dig
= 5;
1970 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1972 case CHIP_POLARIS10
:
1974 adev
->mode_info
.num_crtc
= 6;
1975 adev
->mode_info
.num_hpd
= 6;
1976 adev
->mode_info
.num_dig
= 6;
1977 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1982 adev
->mode_info
.num_crtc
= 6;
1983 adev
->mode_info
.num_hpd
= 6;
1984 adev
->mode_info
.num_dig
= 6;
1985 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1987 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1989 adev
->mode_info
.num_crtc
= 4;
1990 adev
->mode_info
.num_hpd
= 4;
1991 adev
->mode_info
.num_dig
= 4;
1992 adev
->mode_info
.plane_type
= dm_plane_type_default
;
1996 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
2000 amdgpu_dm_set_irq_funcs(adev
);
2002 if (adev
->mode_info
.funcs
== NULL
)
2003 adev
->mode_info
.funcs
= &dm_display_funcs
;
2006 * Note: Do NOT change adev->audio_endpt_rreg and
2007 * adev->audio_endpt_wreg because they are initialised in
2008 * amdgpu_device_init()
2010 #if defined(CONFIG_DEBUG_KERNEL_DC)
2013 &dev_attr_s3_debug
);
2019 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
2020 struct dc_stream_state
*new_stream
,
2021 struct dc_stream_state
*old_stream
)
2023 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
2026 if (!crtc_state
->enable
)
2029 return crtc_state
->active
;
2032 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
2034 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
2037 return !crtc_state
->enable
|| !crtc_state
->active
;
2040 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
2042 drm_encoder_cleanup(encoder
);
2046 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
2047 .destroy
= amdgpu_dm_encoder_destroy
,
2050 static bool fill_rects_from_plane_state(const struct drm_plane_state
*state
,
2051 struct dc_plane_state
*plane_state
)
2053 plane_state
->src_rect
.x
= state
->src_x
>> 16;
2054 plane_state
->src_rect
.y
= state
->src_y
>> 16;
2055 /* we ignore the mantissa for now and do not deal with floating pixels :( */
2056 plane_state
->src_rect
.width
= state
->src_w
>> 16;
2058 if (plane_state
->src_rect
.width
== 0)
2061 plane_state
->src_rect
.height
= state
->src_h
>> 16;
2062 if (plane_state
->src_rect
.height
== 0)
2065 plane_state
->dst_rect
.x
= state
->crtc_x
;
2066 plane_state
->dst_rect
.y
= state
->crtc_y
;
2068 if (state
->crtc_w
== 0)
2071 plane_state
->dst_rect
.width
= state
->crtc_w
;
2073 if (state
->crtc_h
== 0)
2076 plane_state
->dst_rect
.height
= state
->crtc_h
;
2078 plane_state
->clip_rect
= plane_state
->dst_rect
;
2080 switch (state
->rotation
& DRM_MODE_ROTATE_MASK
) {
2081 case DRM_MODE_ROTATE_0
:
2082 plane_state
->rotation
= ROTATION_ANGLE_0
;
2084 case DRM_MODE_ROTATE_90
:
2085 plane_state
->rotation
= ROTATION_ANGLE_90
;
2087 case DRM_MODE_ROTATE_180
:
2088 plane_state
->rotation
= ROTATION_ANGLE_180
;
2090 case DRM_MODE_ROTATE_270
:
2091 plane_state
->rotation
= ROTATION_ANGLE_270
;
2094 plane_state
->rotation
= ROTATION_ANGLE_0
;
2100 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
2101 uint64_t *tiling_flags
)
2103 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
2104 int r
= amdgpu_bo_reserve(rbo
, false);
2107 /* Don't show error message when returning -ERESTARTSYS */
2108 if (r
!= -ERESTARTSYS
)
2109 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
2114 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
2116 amdgpu_bo_unreserve(rbo
);
2121 static int fill_plane_attributes_from_fb(struct amdgpu_device
*adev
,
2122 struct dc_plane_state
*plane_state
,
2123 const struct amdgpu_framebuffer
*amdgpu_fb
)
2125 uint64_t tiling_flags
;
2126 unsigned int awidth
;
2127 const struct drm_framebuffer
*fb
= &amdgpu_fb
->base
;
2129 struct drm_format_name_buf format_name
;
2138 switch (fb
->format
->format
) {
2140 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
2142 case DRM_FORMAT_RGB565
:
2143 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
2145 case DRM_FORMAT_XRGB8888
:
2146 case DRM_FORMAT_ARGB8888
:
2147 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
2149 case DRM_FORMAT_XRGB2101010
:
2150 case DRM_FORMAT_ARGB2101010
:
2151 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
2153 case DRM_FORMAT_XBGR2101010
:
2154 case DRM_FORMAT_ABGR2101010
:
2155 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
2157 case DRM_FORMAT_XBGR8888
:
2158 case DRM_FORMAT_ABGR8888
:
2159 plane_state
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
2161 case DRM_FORMAT_NV21
:
2162 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
2164 case DRM_FORMAT_NV12
:
2165 plane_state
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
2168 DRM_ERROR("Unsupported screen format %s\n",
2169 drm_get_format_name(fb
->format
->format
, &format_name
));
2173 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2174 plane_state
->address
.type
= PLN_ADDR_TYPE_GRAPHICS
;
2175 plane_state
->plane_size
.grph
.surface_size
.x
= 0;
2176 plane_state
->plane_size
.grph
.surface_size
.y
= 0;
2177 plane_state
->plane_size
.grph
.surface_size
.width
= fb
->width
;
2178 plane_state
->plane_size
.grph
.surface_size
.height
= fb
->height
;
2179 plane_state
->plane_size
.grph
.surface_pitch
=
2180 fb
->pitches
[0] / fb
->format
->cpp
[0];
2181 /* TODO: unhardcode */
2182 plane_state
->color_space
= COLOR_SPACE_SRGB
;
2185 awidth
= ALIGN(fb
->width
, 64);
2186 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
2187 plane_state
->plane_size
.video
.luma_size
.x
= 0;
2188 plane_state
->plane_size
.video
.luma_size
.y
= 0;
2189 plane_state
->plane_size
.video
.luma_size
.width
= awidth
;
2190 plane_state
->plane_size
.video
.luma_size
.height
= fb
->height
;
2191 /* TODO: unhardcode */
2192 plane_state
->plane_size
.video
.luma_pitch
= awidth
;
2194 plane_state
->plane_size
.video
.chroma_size
.x
= 0;
2195 plane_state
->plane_size
.video
.chroma_size
.y
= 0;
2196 plane_state
->plane_size
.video
.chroma_size
.width
= awidth
;
2197 plane_state
->plane_size
.video
.chroma_size
.height
= fb
->height
;
2198 plane_state
->plane_size
.video
.chroma_pitch
= awidth
/ 2;
2200 /* TODO: unhardcode */
2201 plane_state
->color_space
= COLOR_SPACE_YCBCR709
;
2204 memset(&plane_state
->tiling_info
, 0, sizeof(plane_state
->tiling_info
));
2206 /* Fill GFX8 params */
2207 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
2208 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
2210 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2211 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2212 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2213 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
2214 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2216 /* XXX fix me for VI */
2217 plane_state
->tiling_info
.gfx8
.num_banks
= num_banks
;
2218 plane_state
->tiling_info
.gfx8
.array_mode
=
2219 DC_ARRAY_2D_TILED_THIN1
;
2220 plane_state
->tiling_info
.gfx8
.tile_split
= tile_split
;
2221 plane_state
->tiling_info
.gfx8
.bank_width
= bankw
;
2222 plane_state
->tiling_info
.gfx8
.bank_height
= bankh
;
2223 plane_state
->tiling_info
.gfx8
.tile_aspect
= mtaspect
;
2224 plane_state
->tiling_info
.gfx8
.tile_mode
=
2225 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
2226 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
2227 == DC_ARRAY_1D_TILED_THIN1
) {
2228 plane_state
->tiling_info
.gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
2231 plane_state
->tiling_info
.gfx8
.pipe_config
=
2232 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2234 if (adev
->asic_type
== CHIP_VEGA10
||
2235 adev
->asic_type
== CHIP_VEGA12
||
2236 adev
->asic_type
== CHIP_VEGA20
||
2237 adev
->asic_type
== CHIP_RAVEN
) {
2238 /* Fill GFX9 params */
2239 plane_state
->tiling_info
.gfx9
.num_pipes
=
2240 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
2241 plane_state
->tiling_info
.gfx9
.num_banks
=
2242 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
2243 plane_state
->tiling_info
.gfx9
.pipe_interleave
=
2244 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
2245 plane_state
->tiling_info
.gfx9
.num_shader_engines
=
2246 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
2247 plane_state
->tiling_info
.gfx9
.max_compressed_frags
=
2248 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
2249 plane_state
->tiling_info
.gfx9
.num_rb_per_se
=
2250 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
2251 plane_state
->tiling_info
.gfx9
.swizzle
=
2252 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
2253 plane_state
->tiling_info
.gfx9
.shaderEnable
= 1;
2256 plane_state
->visible
= true;
2257 plane_state
->scaling_quality
.h_taps_c
= 0;
2258 plane_state
->scaling_quality
.v_taps_c
= 0;
2260 /* is this needed? is plane_state zeroed at allocation? */
2261 plane_state
->scaling_quality
.h_taps
= 0;
2262 plane_state
->scaling_quality
.v_taps
= 0;
2263 plane_state
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
2269 static int fill_plane_attributes(struct amdgpu_device
*adev
,
2270 struct dc_plane_state
*dc_plane_state
,
2271 struct drm_plane_state
*plane_state
,
2272 struct drm_crtc_state
*crtc_state
)
2274 const struct amdgpu_framebuffer
*amdgpu_fb
=
2275 to_amdgpu_framebuffer(plane_state
->fb
);
2276 const struct drm_crtc
*crtc
= plane_state
->crtc
;
2279 if (!fill_rects_from_plane_state(plane_state
, dc_plane_state
))
2282 ret
= fill_plane_attributes_from_fb(
2283 crtc
->dev
->dev_private
,
2291 * Always set input transfer function, since plane state is refreshed
2294 ret
= amdgpu_dm_set_degamma_lut(crtc_state
, dc_plane_state
);
2296 dc_transfer_func_release(dc_plane_state
->in_transfer_func
);
2297 dc_plane_state
->in_transfer_func
= NULL
;
2303 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
2304 const struct dm_connector_state
*dm_state
,
2305 struct dc_stream_state
*stream
)
2307 enum amdgpu_rmx_type rmx_type
;
2309 struct rect src
= { 0 }; /* viewport in composition space*/
2310 struct rect dst
= { 0 }; /* stream addressable area */
2312 /* no mode. nothing to be done */
2316 /* Full screen scaling by default */
2317 src
.width
= mode
->hdisplay
;
2318 src
.height
= mode
->vdisplay
;
2319 dst
.width
= stream
->timing
.h_addressable
;
2320 dst
.height
= stream
->timing
.v_addressable
;
2323 rmx_type
= dm_state
->scaling
;
2324 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
2325 if (src
.width
* dst
.height
<
2326 src
.height
* dst
.width
) {
2327 /* height needs less upscaling/more downscaling */
2328 dst
.width
= src
.width
*
2329 dst
.height
/ src
.height
;
2331 /* width needs less upscaling/more downscaling */
2332 dst
.height
= src
.height
*
2333 dst
.width
/ src
.width
;
2335 } else if (rmx_type
== RMX_CENTER
) {
2339 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
2340 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
2342 if (dm_state
->underscan_enable
) {
2343 dst
.x
+= dm_state
->underscan_hborder
/ 2;
2344 dst
.y
+= dm_state
->underscan_vborder
/ 2;
2345 dst
.width
-= dm_state
->underscan_hborder
;
2346 dst
.height
-= dm_state
->underscan_vborder
;
2353 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2354 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
2358 static enum dc_color_depth
2359 convert_color_depth_from_display_info(const struct drm_connector
*connector
)
2361 struct dm_connector_state
*dm_conn_state
=
2362 to_dm_connector_state(connector
->state
);
2363 uint32_t bpc
= connector
->display_info
.bpc
;
2365 /* TODO: Remove this when there's support for max_bpc in drm */
2366 if (dm_conn_state
&& bpc
> dm_conn_state
->max_bpc
)
2367 /* Round down to nearest even number. */
2368 bpc
= dm_conn_state
->max_bpc
- (dm_conn_state
->max_bpc
& 1);
2373 * Temporary Work around, DRM doesn't parse color depth for
2374 * EDID revision before 1.4
2375 * TODO: Fix edid parsing
2377 return COLOR_DEPTH_888
;
2379 return COLOR_DEPTH_666
;
2381 return COLOR_DEPTH_888
;
2383 return COLOR_DEPTH_101010
;
2385 return COLOR_DEPTH_121212
;
2387 return COLOR_DEPTH_141414
;
2389 return COLOR_DEPTH_161616
;
2391 return COLOR_DEPTH_UNDEFINED
;
2395 static enum dc_aspect_ratio
2396 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
2398 /* 1-1 mapping, since both enums follow the HDMI spec. */
2399 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
2402 static enum dc_color_space
2403 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
2405 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
2407 switch (dc_crtc_timing
->pixel_encoding
) {
2408 case PIXEL_ENCODING_YCBCR422
:
2409 case PIXEL_ENCODING_YCBCR444
:
2410 case PIXEL_ENCODING_YCBCR420
:
2413 * 27030khz is the separation point between HDTV and SDTV
2414 * according to HDMI spec, we use YCbCr709 and YCbCr601
2417 if (dc_crtc_timing
->pix_clk_khz
> 27030) {
2418 if (dc_crtc_timing
->flags
.Y_ONLY
)
2420 COLOR_SPACE_YCBCR709_LIMITED
;
2422 color_space
= COLOR_SPACE_YCBCR709
;
2424 if (dc_crtc_timing
->flags
.Y_ONLY
)
2426 COLOR_SPACE_YCBCR601_LIMITED
;
2428 color_space
= COLOR_SPACE_YCBCR601
;
2433 case PIXEL_ENCODING_RGB
:
2434 color_space
= COLOR_SPACE_SRGB
;
2445 static void reduce_mode_colour_depth(struct dc_crtc_timing
*timing_out
)
2447 if (timing_out
->display_color_depth
<= COLOR_DEPTH_888
)
2450 timing_out
->display_color_depth
--;
2453 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing
*timing_out
,
2454 const struct drm_display_info
*info
)
2457 if (timing_out
->display_color_depth
<= COLOR_DEPTH_888
)
2460 normalized_clk
= timing_out
->pix_clk_khz
;
2461 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2462 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
2463 normalized_clk
/= 2;
2464 /* Adjusting pix clock following on HDMI spec based on colour depth */
2465 switch (timing_out
->display_color_depth
) {
2466 case COLOR_DEPTH_101010
:
2467 normalized_clk
= (normalized_clk
* 30) / 24;
2469 case COLOR_DEPTH_121212
:
2470 normalized_clk
= (normalized_clk
* 36) / 24;
2472 case COLOR_DEPTH_161616
:
2473 normalized_clk
= (normalized_clk
* 48) / 24;
2478 if (normalized_clk
<= info
->max_tmds_clock
)
2480 reduce_mode_colour_depth(timing_out
);
2482 } while (timing_out
->display_color_depth
> COLOR_DEPTH_888
);
2487 fill_stream_properties_from_drm_display_mode(struct dc_stream_state
*stream
,
2488 const struct drm_display_mode
*mode_in
,
2489 const struct drm_connector
*connector
)
2491 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
2492 const struct drm_display_info
*info
= &connector
->display_info
;
2494 memset(timing_out
, 0, sizeof(struct dc_crtc_timing
));
2496 timing_out
->h_border_left
= 0;
2497 timing_out
->h_border_right
= 0;
2498 timing_out
->v_border_top
= 0;
2499 timing_out
->v_border_bottom
= 0;
2500 /* TODO: un-hardcode */
2501 if (drm_mode_is_420_only(info
, mode_in
)
2502 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2503 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
2504 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
2505 && stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2506 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
2508 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
2510 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
2511 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
2513 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
2514 timing_out
->hdmi_vic
= 0;
2515 timing_out
->vic
= drm_match_cea_mode(mode_in
);
2517 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
2518 timing_out
->h_total
= mode_in
->crtc_htotal
;
2519 timing_out
->h_sync_width
=
2520 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
2521 timing_out
->h_front_porch
=
2522 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
2523 timing_out
->v_total
= mode_in
->crtc_vtotal
;
2524 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
2525 timing_out
->v_front_porch
=
2526 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
2527 timing_out
->v_sync_width
=
2528 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
2529 timing_out
->pix_clk_khz
= mode_in
->crtc_clock
;
2530 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
2531 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
2532 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
2533 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
2534 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
2536 stream
->output_color_space
= get_output_color_space(timing_out
);
2538 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
2539 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
2540 if (stream
->sink
->sink_signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
2541 adjust_colour_depth_from_display_info(timing_out
, info
);
2544 static void fill_audio_info(struct audio_info
*audio_info
,
2545 const struct drm_connector
*drm_connector
,
2546 const struct dc_sink
*dc_sink
)
2549 int cea_revision
= 0;
2550 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
2552 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
2553 audio_info
->product_id
= edid_caps
->product_id
;
2555 cea_revision
= drm_connector
->display_info
.cea_rev
;
2557 strncpy(audio_info
->display_name
,
2558 edid_caps
->display_name
,
2559 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
- 1);
2561 if (cea_revision
>= 3) {
2562 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
2564 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
2565 audio_info
->modes
[i
].format_code
=
2566 (enum audio_format_code
)
2567 (edid_caps
->audio_modes
[i
].format_code
);
2568 audio_info
->modes
[i
].channel_count
=
2569 edid_caps
->audio_modes
[i
].channel_count
;
2570 audio_info
->modes
[i
].sample_rates
.all
=
2571 edid_caps
->audio_modes
[i
].sample_rate
;
2572 audio_info
->modes
[i
].sample_size
=
2573 edid_caps
->audio_modes
[i
].sample_size
;
2577 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
2579 /* TODO: We only check for the progressive mode, check for interlace mode too */
2580 if (drm_connector
->latency_present
[0]) {
2581 audio_info
->video_latency
= drm_connector
->video_latency
[0];
2582 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
2585 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2590 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
2591 struct drm_display_mode
*dst_mode
)
2593 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
2594 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
2595 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
2596 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
2597 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
2598 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
2599 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
2600 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
2601 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
2602 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
2603 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
2604 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
2605 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
2606 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
2610 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
2611 const struct drm_display_mode
*native_mode
,
2614 if (scale_enabled
) {
2615 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2616 } else if (native_mode
->clock
== drm_mode
->clock
&&
2617 native_mode
->htotal
== drm_mode
->htotal
&&
2618 native_mode
->vtotal
== drm_mode
->vtotal
) {
2619 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
2621 /* no scaling nor amdgpu inserted, no need to patch */
2625 static struct dc_sink
*
2626 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
2628 struct dc_sink_init_data sink_init_data
= { 0 };
2629 struct dc_sink
*sink
= NULL
;
2630 sink_init_data
.link
= aconnector
->dc_link
;
2631 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
2633 sink
= dc_sink_create(&sink_init_data
);
2635 DRM_ERROR("Failed to create sink!\n");
2638 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
2643 static void set_multisync_trigger_params(
2644 struct dc_stream_state
*stream
)
2646 if (stream
->triggered_crtc_reset
.enabled
) {
2647 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
2648 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
2652 static void set_master_stream(struct dc_stream_state
*stream_set
[],
2655 int j
, highest_rfr
= 0, master_stream
= 0;
2657 for (j
= 0; j
< stream_count
; j
++) {
2658 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
2659 int refresh_rate
= 0;
2661 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_khz
*1000)/
2662 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
2663 if (refresh_rate
> highest_rfr
) {
2664 highest_rfr
= refresh_rate
;
2669 for (j
= 0; j
< stream_count
; j
++) {
2671 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
2675 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
2679 if (context
->stream_count
< 2)
2681 for (i
= 0; i
< context
->stream_count
; i
++) {
2682 if (!context
->streams
[i
])
2685 * TODO: add a function to read AMD VSDB bits and set
2686 * crtc_sync_master.multi_sync_enabled flag
2687 * For now it's set to false
2689 set_multisync_trigger_params(context
->streams
[i
]);
2691 set_master_stream(context
->streams
, context
->stream_count
);
2694 static struct dc_stream_state
*
2695 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
2696 const struct drm_display_mode
*drm_mode
,
2697 const struct dm_connector_state
*dm_state
)
2699 struct drm_display_mode
*preferred_mode
= NULL
;
2700 struct drm_connector
*drm_connector
;
2701 struct dc_stream_state
*stream
= NULL
;
2702 struct drm_display_mode mode
= *drm_mode
;
2703 bool native_mode_found
= false;
2704 struct dc_sink
*sink
= NULL
;
2705 if (aconnector
== NULL
) {
2706 DRM_ERROR("aconnector is NULL!\n");
2710 drm_connector
= &aconnector
->base
;
2712 if (!aconnector
->dc_sink
) {
2713 if (!aconnector
->mst_port
) {
2714 sink
= create_fake_sink(aconnector
);
2719 sink
= aconnector
->dc_sink
;
2722 stream
= dc_create_stream_for_sink(sink
);
2724 if (stream
== NULL
) {
2725 DRM_ERROR("Failed to create stream for sink!\n");
2729 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
2730 /* Search for preferred mode */
2731 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
2732 native_mode_found
= true;
2736 if (!native_mode_found
)
2737 preferred_mode
= list_first_entry_or_null(
2738 &aconnector
->base
.modes
,
2739 struct drm_display_mode
,
2742 if (preferred_mode
== NULL
) {
2744 * This may not be an error, the use case is when we have no
2745 * usermode calls to reset and set mode upon hotplug. In this
2746 * case, we call set mode ourselves to restore the previous mode
2747 * and the modelist may not be filled in in time.
2749 DRM_DEBUG_DRIVER("No preferred mode found\n");
2751 decide_crtc_timing_for_drm_display_mode(
2752 &mode
, preferred_mode
,
2753 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
2757 drm_mode_set_crtcinfo(&mode
, 0);
2759 fill_stream_properties_from_drm_display_mode(stream
,
2760 &mode
, &aconnector
->base
);
2761 update_stream_scaling_settings(&mode
, dm_state
, stream
);
2764 &stream
->audio_info
,
2768 update_stream_signal(stream
);
2770 if (dm_state
&& dm_state
->freesync_capable
)
2771 stream
->ignore_msa_timing_param
= true;
2773 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_VIRTUAL
&& aconnector
->base
.force
!= DRM_FORCE_ON
)
2774 dc_sink_release(sink
);
2779 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
2781 drm_crtc_cleanup(crtc
);
2785 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
2786 struct drm_crtc_state
*state
)
2788 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
2790 /* TODO Destroy dc_stream objects are stream object is flattened */
2792 dc_stream_release(cur
->stream
);
2795 __drm_atomic_helper_crtc_destroy_state(state
);
2801 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
2803 struct dm_crtc_state
*state
;
2806 dm_crtc_destroy_state(crtc
, crtc
->state
);
2808 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2809 if (WARN_ON(!state
))
2812 crtc
->state
= &state
->base
;
2813 crtc
->state
->crtc
= crtc
;
2817 static struct drm_crtc_state
*
2818 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
2820 struct dm_crtc_state
*state
, *cur
;
2822 cur
= to_dm_crtc_state(crtc
->state
);
2824 if (WARN_ON(!crtc
->state
))
2827 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2831 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
2834 state
->stream
= cur
->stream
;
2835 dc_stream_retain(state
->stream
);
2838 state
->adjust
= cur
->adjust
;
2839 state
->vrr_infopacket
= cur
->vrr_infopacket
;
2840 state
->freesync_enabled
= cur
->freesync_enabled
;
2842 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2844 return &state
->base
;
2848 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
2850 enum dc_irq_source irq_source
;
2851 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
2852 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2854 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
2855 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
2858 static int dm_enable_vblank(struct drm_crtc
*crtc
)
2860 return dm_set_vblank(crtc
, true);
2863 static void dm_disable_vblank(struct drm_crtc
*crtc
)
2865 dm_set_vblank(crtc
, false);
2868 /* Implemented only the options currently availible for the driver */
2869 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
2870 .reset
= dm_crtc_reset_state
,
2871 .destroy
= amdgpu_dm_crtc_destroy
,
2872 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
2873 .set_config
= drm_atomic_helper_set_config
,
2874 .page_flip
= drm_atomic_helper_page_flip
,
2875 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
2876 .atomic_destroy_state
= dm_crtc_destroy_state
,
2877 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
2878 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
2879 .enable_vblank
= dm_enable_vblank
,
2880 .disable_vblank
= dm_disable_vblank
,
2883 static enum drm_connector_status
2884 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
2887 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
2891 * 1. This interface is NOT called in context of HPD irq.
2892 * 2. This interface *is called* in context of user-mode ioctl. Which
2893 * makes it a bad place for *any* MST-related activity.
2896 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
2897 !aconnector
->fake_enable
)
2898 connected
= (aconnector
->dc_sink
!= NULL
);
2900 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
2902 return (connected
? connector_status_connected
:
2903 connector_status_disconnected
);
2906 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
2907 struct drm_connector_state
*connector_state
,
2908 struct drm_property
*property
,
2911 struct drm_device
*dev
= connector
->dev
;
2912 struct amdgpu_device
*adev
= dev
->dev_private
;
2913 struct dm_connector_state
*dm_old_state
=
2914 to_dm_connector_state(connector
->state
);
2915 struct dm_connector_state
*dm_new_state
=
2916 to_dm_connector_state(connector_state
);
2920 if (property
== dev
->mode_config
.scaling_mode_property
) {
2921 enum amdgpu_rmx_type rmx_type
;
2924 case DRM_MODE_SCALE_CENTER
:
2925 rmx_type
= RMX_CENTER
;
2927 case DRM_MODE_SCALE_ASPECT
:
2928 rmx_type
= RMX_ASPECT
;
2930 case DRM_MODE_SCALE_FULLSCREEN
:
2931 rmx_type
= RMX_FULL
;
2933 case DRM_MODE_SCALE_NONE
:
2939 if (dm_old_state
->scaling
== rmx_type
)
2942 dm_new_state
->scaling
= rmx_type
;
2944 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2945 dm_new_state
->underscan_hborder
= val
;
2947 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2948 dm_new_state
->underscan_vborder
= val
;
2950 } else if (property
== adev
->mode_info
.underscan_property
) {
2951 dm_new_state
->underscan_enable
= val
;
2953 } else if (property
== adev
->mode_info
.max_bpc_property
) {
2954 dm_new_state
->max_bpc
= val
;
2961 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
2962 const struct drm_connector_state
*state
,
2963 struct drm_property
*property
,
2966 struct drm_device
*dev
= connector
->dev
;
2967 struct amdgpu_device
*adev
= dev
->dev_private
;
2968 struct dm_connector_state
*dm_state
=
2969 to_dm_connector_state(state
);
2972 if (property
== dev
->mode_config
.scaling_mode_property
) {
2973 switch (dm_state
->scaling
) {
2975 *val
= DRM_MODE_SCALE_CENTER
;
2978 *val
= DRM_MODE_SCALE_ASPECT
;
2981 *val
= DRM_MODE_SCALE_FULLSCREEN
;
2985 *val
= DRM_MODE_SCALE_NONE
;
2989 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
2990 *val
= dm_state
->underscan_hborder
;
2992 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
2993 *val
= dm_state
->underscan_vborder
;
2995 } else if (property
== adev
->mode_info
.underscan_property
) {
2996 *val
= dm_state
->underscan_enable
;
2998 } else if (property
== adev
->mode_info
.max_bpc_property
) {
2999 *val
= dm_state
->max_bpc
;
3005 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
3007 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3008 const struct dc_link
*link
= aconnector
->dc_link
;
3009 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
3010 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3012 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3013 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3015 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
3016 link
->type
!= dc_connection_none
&&
3017 dm
->backlight_dev
) {
3018 backlight_device_unregister(dm
->backlight_dev
);
3019 dm
->backlight_dev
= NULL
;
3022 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
3023 drm_connector_unregister(connector
);
3024 drm_connector_cleanup(connector
);
3028 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
3030 struct dm_connector_state
*state
=
3031 to_dm_connector_state(connector
->state
);
3033 if (connector
->state
)
3034 __drm_atomic_helper_connector_destroy_state(connector
->state
);
3038 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
3041 state
->scaling
= RMX_OFF
;
3042 state
->underscan_enable
= false;
3043 state
->underscan_hborder
= 0;
3044 state
->underscan_vborder
= 0;
3046 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
3050 struct drm_connector_state
*
3051 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
3053 struct dm_connector_state
*state
=
3054 to_dm_connector_state(connector
->state
);
3056 struct dm_connector_state
*new_state
=
3057 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
3062 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
3064 new_state
->freesync_capable
= state
->freesync_capable
;
3065 new_state
->freesync_enable
= state
->freesync_enable
;
3067 return &new_state
->base
;
3070 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
3071 .reset
= amdgpu_dm_connector_funcs_reset
,
3072 .detect
= amdgpu_dm_connector_detect
,
3073 .fill_modes
= drm_helper_probe_single_connector_modes
,
3074 .destroy
= amdgpu_dm_connector_destroy
,
3075 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
3076 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
3077 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
3078 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
3081 static int get_modes(struct drm_connector
*connector
)
3083 return amdgpu_dm_connector_get_modes(connector
);
3086 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
3088 struct dc_sink_init_data init_params
= {
3089 .link
= aconnector
->dc_link
,
3090 .sink_signal
= SIGNAL_TYPE_VIRTUAL
3094 if (!aconnector
->base
.edid_blob_ptr
) {
3095 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3096 aconnector
->base
.name
);
3098 aconnector
->base
.force
= DRM_FORCE_OFF
;
3099 aconnector
->base
.override_edid
= false;
3103 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
3105 aconnector
->edid
= edid
;
3107 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
3108 aconnector
->dc_link
,
3110 (edid
->extensions
+ 1) * EDID_LENGTH
,
3113 if (aconnector
->base
.force
== DRM_FORCE_ON
)
3114 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
3115 aconnector
->dc_link
->local_sink
:
3116 aconnector
->dc_em_sink
;
3119 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
3121 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
3124 * In case of headless boot with force on for DP managed connector
3125 * Those settings have to be != 0 to get initial modeset
3127 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
3128 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
3129 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
3133 aconnector
->base
.override_edid
= true;
3134 create_eml_sink(aconnector
);
3137 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
3138 struct drm_display_mode
*mode
)
3140 int result
= MODE_ERROR
;
3141 struct dc_sink
*dc_sink
;
3142 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
3143 /* TODO: Unhardcode stream count */
3144 struct dc_stream_state
*stream
;
3145 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3146 enum dc_status dc_result
= DC_OK
;
3148 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
3149 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
3153 * Only run this the first time mode_valid is called to initilialize
3156 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
3157 !aconnector
->dc_em_sink
)
3158 handle_edid_mgmt(aconnector
);
3160 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
3162 if (dc_sink
== NULL
) {
3163 DRM_ERROR("dc_sink is NULL!\n");
3167 stream
= create_stream_for_sink(aconnector
, mode
, NULL
);
3168 if (stream
== NULL
) {
3169 DRM_ERROR("Failed to create stream for sink!\n");
3173 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
3175 if (dc_result
== DC_OK
)
3178 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3184 dc_stream_release(stream
);
3187 /* TODO: error handling*/
3191 static const struct drm_connector_helper_funcs
3192 amdgpu_dm_connector_helper_funcs
= {
3194 * If hotplugging a second bigger display in FB Con mode, bigger resolution
3195 * modes will be filtered by drm_mode_validate_size(), and those modes
3196 * are missing after user start lightdm. So we need to renew modes list.
3197 * in get_modes call back, not just return the modes count
3199 .get_modes
= get_modes
,
3200 .mode_valid
= amdgpu_dm_connector_mode_valid
,
3201 .best_encoder
= drm_atomic_helper_best_encoder
3204 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
3208 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
3209 struct drm_crtc_state
*state
)
3211 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
3212 struct dc
*dc
= adev
->dm
.dc
;
3213 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
3216 if (unlikely(!dm_crtc_state
->stream
&&
3217 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
3222 /* In some use cases, like reset, no stream is attached */
3223 if (!dm_crtc_state
->stream
)
3226 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
3232 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
3233 const struct drm_display_mode
*mode
,
3234 struct drm_display_mode
*adjusted_mode
)
3239 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
3240 .disable
= dm_crtc_helper_disable
,
3241 .atomic_check
= dm_crtc_helper_atomic_check
,
3242 .mode_fixup
= dm_crtc_helper_mode_fixup
3245 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
3250 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
3251 struct drm_crtc_state
*crtc_state
,
3252 struct drm_connector_state
*conn_state
)
3257 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
3258 .disable
= dm_encoder_helper_disable
,
3259 .atomic_check
= dm_encoder_helper_atomic_check
3262 static void dm_drm_plane_reset(struct drm_plane
*plane
)
3264 struct dm_plane_state
*amdgpu_state
= NULL
;
3267 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
3269 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
3270 WARN_ON(amdgpu_state
== NULL
);
3273 plane
->state
= &amdgpu_state
->base
;
3274 plane
->state
->plane
= plane
;
3275 plane
->state
->rotation
= DRM_MODE_ROTATE_0
;
3279 static struct drm_plane_state
*
3280 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
3282 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
3284 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
3285 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
3286 if (!dm_plane_state
)
3289 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
3291 if (old_dm_plane_state
->dc_state
) {
3292 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
3293 dc_plane_state_retain(dm_plane_state
->dc_state
);
3296 return &dm_plane_state
->base
;
3299 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
3300 struct drm_plane_state
*state
)
3302 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3304 if (dm_plane_state
->dc_state
)
3305 dc_plane_state_release(dm_plane_state
->dc_state
);
3307 drm_atomic_helper_plane_destroy_state(plane
, state
);
3310 static const struct drm_plane_funcs dm_plane_funcs
= {
3311 .update_plane
= drm_atomic_helper_update_plane
,
3312 .disable_plane
= drm_atomic_helper_disable_plane
,
3313 .destroy
= drm_primary_helper_destroy
,
3314 .reset
= dm_drm_plane_reset
,
3315 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
3316 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
3319 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
3320 struct drm_plane_state
*new_state
)
3322 struct amdgpu_framebuffer
*afb
;
3323 struct drm_gem_object
*obj
;
3324 struct amdgpu_device
*adev
;
3325 struct amdgpu_bo
*rbo
;
3326 uint64_t chroma_addr
= 0;
3327 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
3328 unsigned int awidth
;
3332 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
3333 dm_plane_state_new
= to_dm_plane_state(new_state
);
3335 if (!new_state
->fb
) {
3336 DRM_DEBUG_DRIVER("No FB bound\n");
3340 afb
= to_amdgpu_framebuffer(new_state
->fb
);
3341 obj
= new_state
->fb
->obj
[0];
3342 rbo
= gem_to_amdgpu_bo(obj
);
3343 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
3344 r
= amdgpu_bo_reserve(rbo
, false);
3345 if (unlikely(r
!= 0))
3348 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
3349 domain
= amdgpu_display_supported_domains(adev
);
3351 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
3353 r
= amdgpu_bo_pin(rbo
, domain
);
3354 if (unlikely(r
!= 0)) {
3355 if (r
!= -ERESTARTSYS
)
3356 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
3357 amdgpu_bo_unreserve(rbo
);
3361 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
3362 if (unlikely(r
!= 0)) {
3363 amdgpu_bo_unpin(rbo
);
3364 amdgpu_bo_unreserve(rbo
);
3365 DRM_ERROR("%p bind failed\n", rbo
);
3368 amdgpu_bo_unreserve(rbo
);
3370 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
3374 if (dm_plane_state_new
->dc_state
&&
3375 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
3376 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
3378 if (plane_state
->format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3379 plane_state
->address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3380 plane_state
->address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3382 awidth
= ALIGN(new_state
->fb
->width
, 64);
3383 plane_state
->address
.type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3384 plane_state
->address
.video_progressive
.luma_addr
.low_part
3385 = lower_32_bits(afb
->address
);
3386 plane_state
->address
.video_progressive
.luma_addr
.high_part
3387 = upper_32_bits(afb
->address
);
3388 chroma_addr
= afb
->address
+ (u64
)awidth
* new_state
->fb
->height
;
3389 plane_state
->address
.video_progressive
.chroma_addr
.low_part
3390 = lower_32_bits(chroma_addr
);
3391 plane_state
->address
.video_progressive
.chroma_addr
.high_part
3392 = upper_32_bits(chroma_addr
);
3399 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
3400 struct drm_plane_state
*old_state
)
3402 struct amdgpu_bo
*rbo
;
3408 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
3409 r
= amdgpu_bo_reserve(rbo
, false);
3411 DRM_ERROR("failed to reserve rbo before unpin\n");
3415 amdgpu_bo_unpin(rbo
);
3416 amdgpu_bo_unreserve(rbo
);
3417 amdgpu_bo_unref(&rbo
);
3420 static int dm_plane_atomic_check(struct drm_plane
*plane
,
3421 struct drm_plane_state
*state
)
3423 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
3424 struct dc
*dc
= adev
->dm
.dc
;
3425 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
3427 if (!dm_plane_state
->dc_state
)
3430 if (!fill_rects_from_plane_state(state
, dm_plane_state
->dc_state
))
3433 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
3439 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
3440 .prepare_fb
= dm_plane_helper_prepare_fb
,
3441 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
3442 .atomic_check
= dm_plane_atomic_check
,
3446 * TODO: these are currently initialized to rgb formats only.
3447 * For future use cases we should either initialize them dynamically based on
3448 * plane capabilities, or initialize this array to all formats, so internal drm
3449 * check will succeed, and let DC implement proper check
3451 static const uint32_t rgb_formats
[] = {
3453 DRM_FORMAT_XRGB8888
,
3454 DRM_FORMAT_ARGB8888
,
3455 DRM_FORMAT_RGBA8888
,
3456 DRM_FORMAT_XRGB2101010
,
3457 DRM_FORMAT_XBGR2101010
,
3458 DRM_FORMAT_ARGB2101010
,
3459 DRM_FORMAT_ABGR2101010
,
3460 DRM_FORMAT_XBGR8888
,
3461 DRM_FORMAT_ABGR8888
,
3464 static const uint32_t yuv_formats
[] = {
3469 static const u32 cursor_formats
[] = {
3473 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
3474 struct amdgpu_plane
*aplane
,
3475 unsigned long possible_crtcs
)
3479 switch (aplane
->base
.type
) {
3480 case DRM_PLANE_TYPE_PRIMARY
:
3481 res
= drm_universal_plane_init(
3487 ARRAY_SIZE(rgb_formats
),
3488 NULL
, aplane
->base
.type
, NULL
);
3490 case DRM_PLANE_TYPE_OVERLAY
:
3491 res
= drm_universal_plane_init(
3497 ARRAY_SIZE(yuv_formats
),
3498 NULL
, aplane
->base
.type
, NULL
);
3500 case DRM_PLANE_TYPE_CURSOR
:
3501 res
= drm_universal_plane_init(
3507 ARRAY_SIZE(cursor_formats
),
3508 NULL
, aplane
->base
.type
, NULL
);
3512 drm_plane_helper_add(&aplane
->base
, &dm_plane_helper_funcs
);
3514 /* Create (reset) the plane state */
3515 if (aplane
->base
.funcs
->reset
)
3516 aplane
->base
.funcs
->reset(&aplane
->base
);
3522 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
3523 struct drm_plane
*plane
,
3524 uint32_t crtc_index
)
3526 struct amdgpu_crtc
*acrtc
= NULL
;
3527 struct amdgpu_plane
*cursor_plane
;
3531 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
3535 cursor_plane
->base
.type
= DRM_PLANE_TYPE_CURSOR
;
3536 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0);
3538 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
3542 res
= drm_crtc_init_with_planes(
3546 &cursor_plane
->base
,
3547 &amdgpu_dm_crtc_funcs
, NULL
);
3552 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
3554 /* Create (reset) the plane state */
3555 if (acrtc
->base
.funcs
->reset
)
3556 acrtc
->base
.funcs
->reset(&acrtc
->base
);
3558 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3559 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
3561 acrtc
->crtc_id
= crtc_index
;
3562 acrtc
->base
.enabled
= false;
3563 acrtc
->otg_inst
= -1;
3565 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
3566 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
3567 true, MAX_COLOR_LUT_ENTRIES
);
3568 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
3574 kfree(cursor_plane
);
3579 static int to_drm_connector_type(enum signal_type st
)
3582 case SIGNAL_TYPE_HDMI_TYPE_A
:
3583 return DRM_MODE_CONNECTOR_HDMIA
;
3584 case SIGNAL_TYPE_EDP
:
3585 return DRM_MODE_CONNECTOR_eDP
;
3586 case SIGNAL_TYPE_LVDS
:
3587 return DRM_MODE_CONNECTOR_LVDS
;
3588 case SIGNAL_TYPE_RGB
:
3589 return DRM_MODE_CONNECTOR_VGA
;
3590 case SIGNAL_TYPE_DISPLAY_PORT
:
3591 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
3592 return DRM_MODE_CONNECTOR_DisplayPort
;
3593 case SIGNAL_TYPE_DVI_DUAL_LINK
:
3594 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
3595 return DRM_MODE_CONNECTOR_DVID
;
3596 case SIGNAL_TYPE_VIRTUAL
:
3597 return DRM_MODE_CONNECTOR_VIRTUAL
;
3600 return DRM_MODE_CONNECTOR_Unknown
;
3604 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
3606 const struct drm_connector_helper_funcs
*helper
=
3607 connector
->helper_private
;
3608 struct drm_encoder
*encoder
;
3609 struct amdgpu_encoder
*amdgpu_encoder
;
3611 encoder
= helper
->best_encoder(connector
);
3613 if (encoder
== NULL
)
3616 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3618 amdgpu_encoder
->native_mode
.clock
= 0;
3620 if (!list_empty(&connector
->probed_modes
)) {
3621 struct drm_display_mode
*preferred_mode
= NULL
;
3623 list_for_each_entry(preferred_mode
,
3624 &connector
->probed_modes
,
3626 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
3627 amdgpu_encoder
->native_mode
= *preferred_mode
;
3635 static struct drm_display_mode
*
3636 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
3638 int hdisplay
, int vdisplay
)
3640 struct drm_device
*dev
= encoder
->dev
;
3641 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3642 struct drm_display_mode
*mode
= NULL
;
3643 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3645 mode
= drm_mode_duplicate(dev
, native_mode
);
3650 mode
->hdisplay
= hdisplay
;
3651 mode
->vdisplay
= vdisplay
;
3652 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
3653 strncpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
3659 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
3660 struct drm_connector
*connector
)
3662 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3663 struct drm_display_mode
*mode
= NULL
;
3664 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
3665 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3666 to_amdgpu_dm_connector(connector
);
3670 char name
[DRM_DISPLAY_MODE_LEN
];
3673 } common_modes
[] = {
3674 { "640x480", 640, 480},
3675 { "800x600", 800, 600},
3676 { "1024x768", 1024, 768},
3677 { "1280x720", 1280, 720},
3678 { "1280x800", 1280, 800},
3679 {"1280x1024", 1280, 1024},
3680 { "1440x900", 1440, 900},
3681 {"1680x1050", 1680, 1050},
3682 {"1600x1200", 1600, 1200},
3683 {"1920x1080", 1920, 1080},
3684 {"1920x1200", 1920, 1200}
3687 n
= ARRAY_SIZE(common_modes
);
3689 for (i
= 0; i
< n
; i
++) {
3690 struct drm_display_mode
*curmode
= NULL
;
3691 bool mode_existed
= false;
3693 if (common_modes
[i
].w
> native_mode
->hdisplay
||
3694 common_modes
[i
].h
> native_mode
->vdisplay
||
3695 (common_modes
[i
].w
== native_mode
->hdisplay
&&
3696 common_modes
[i
].h
== native_mode
->vdisplay
))
3699 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
3700 if (common_modes
[i
].w
== curmode
->hdisplay
&&
3701 common_modes
[i
].h
== curmode
->vdisplay
) {
3702 mode_existed
= true;
3710 mode
= amdgpu_dm_create_common_mode(encoder
,
3711 common_modes
[i
].name
, common_modes
[i
].w
,
3713 drm_mode_probed_add(connector
, mode
);
3714 amdgpu_dm_connector
->num_modes
++;
3718 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
3721 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3722 to_amdgpu_dm_connector(connector
);
3725 /* empty probed_modes */
3726 INIT_LIST_HEAD(&connector
->probed_modes
);
3727 amdgpu_dm_connector
->num_modes
=
3728 drm_add_edid_modes(connector
, edid
);
3730 amdgpu_dm_get_native_mode(connector
);
3732 amdgpu_dm_connector
->num_modes
= 0;
3736 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
3738 const struct drm_connector_helper_funcs
*helper
=
3739 connector
->helper_private
;
3740 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
3741 to_amdgpu_dm_connector(connector
);
3742 struct drm_encoder
*encoder
;
3743 struct edid
*edid
= amdgpu_dm_connector
->edid
;
3745 encoder
= helper
->best_encoder(connector
);
3747 if (!edid
|| !drm_edid_is_valid(edid
)) {
3748 amdgpu_dm_connector
->num_modes
=
3749 drm_add_modes_noedid(connector
, 640, 480);
3751 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
3752 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
3754 amdgpu_dm_fbc_init(connector
);
3756 return amdgpu_dm_connector
->num_modes
;
3759 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
3760 struct amdgpu_dm_connector
*aconnector
,
3762 struct dc_link
*link
,
3765 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
3767 aconnector
->connector_id
= link_index
;
3768 aconnector
->dc_link
= link
;
3769 aconnector
->base
.interlace_allowed
= false;
3770 aconnector
->base
.doublescan_allowed
= false;
3771 aconnector
->base
.stereo_allowed
= false;
3772 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
3773 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
3774 mutex_init(&aconnector
->hpd_lock
);
3777 * configure support HPD hot plug connector_>polled default value is 0
3778 * which means HPD hot plug not supported
3780 switch (connector_type
) {
3781 case DRM_MODE_CONNECTOR_HDMIA
:
3782 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3783 aconnector
->base
.ycbcr_420_allowed
=
3784 link
->link_enc
->features
.ycbcr420_supported
? true : false;
3786 case DRM_MODE_CONNECTOR_DisplayPort
:
3787 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3788 aconnector
->base
.ycbcr_420_allowed
=
3789 link
->link_enc
->features
.ycbcr420_supported
? true : false;
3791 case DRM_MODE_CONNECTOR_DVID
:
3792 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
3798 drm_object_attach_property(&aconnector
->base
.base
,
3799 dm
->ddev
->mode_config
.scaling_mode_property
,
3800 DRM_MODE_SCALE_NONE
);
3802 drm_object_attach_property(&aconnector
->base
.base
,
3803 adev
->mode_info
.underscan_property
,
3805 drm_object_attach_property(&aconnector
->base
.base
,
3806 adev
->mode_info
.underscan_hborder_property
,
3808 drm_object_attach_property(&aconnector
->base
.base
,
3809 adev
->mode_info
.underscan_vborder_property
,
3811 drm_object_attach_property(&aconnector
->base
.base
,
3812 adev
->mode_info
.max_bpc_property
,
3817 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
3818 struct i2c_msg
*msgs
, int num
)
3820 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
3821 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
3822 struct i2c_command cmd
;
3826 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
3831 cmd
.number_of_payloads
= num
;
3832 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
3835 for (i
= 0; i
< num
; i
++) {
3836 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
3837 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
3838 cmd
.payloads
[i
].length
= msgs
[i
].len
;
3839 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
3843 ddc_service
->ctx
->dc
,
3844 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
3848 kfree(cmd
.payloads
);
3852 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
3854 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
3857 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
3858 .master_xfer
= amdgpu_dm_i2c_xfer
,
3859 .functionality
= amdgpu_dm_i2c_func
,
3862 static struct amdgpu_i2c_adapter
*
3863 create_i2c(struct ddc_service
*ddc_service
,
3867 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
3868 struct amdgpu_i2c_adapter
*i2c
;
3870 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
3873 i2c
->base
.owner
= THIS_MODULE
;
3874 i2c
->base
.class = I2C_CLASS_DDC
;
3875 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
3876 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
3877 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
3878 i2c_set_adapdata(&i2c
->base
, i2c
);
3879 i2c
->ddc_service
= ddc_service
;
3880 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
3887 * Note: this function assumes that dc_link_detect() was called for the
3888 * dc_link which will be represented by this aconnector.
3890 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
3891 struct amdgpu_dm_connector
*aconnector
,
3892 uint32_t link_index
,
3893 struct amdgpu_encoder
*aencoder
)
3897 struct dc
*dc
= dm
->dc
;
3898 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
3899 struct amdgpu_i2c_adapter
*i2c
;
3901 link
->priv
= aconnector
;
3903 DRM_DEBUG_DRIVER("%s()\n", __func__
);
3905 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
3907 DRM_ERROR("Failed to create i2c adapter data\n");
3911 aconnector
->i2c
= i2c
;
3912 res
= i2c_add_adapter(&i2c
->base
);
3915 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
3919 connector_type
= to_drm_connector_type(link
->connector_signal
);
3921 res
= drm_connector_init(
3924 &amdgpu_dm_connector_funcs
,
3928 DRM_ERROR("connector_init failed\n");
3929 aconnector
->connector_id
= -1;
3933 drm_connector_helper_add(
3935 &amdgpu_dm_connector_helper_funcs
);
3937 if (aconnector
->base
.funcs
->reset
)
3938 aconnector
->base
.funcs
->reset(&aconnector
->base
);
3940 amdgpu_dm_connector_init_helper(
3947 drm_connector_attach_encoder(
3948 &aconnector
->base
, &aencoder
->base
);
3950 drm_connector_register(&aconnector
->base
);
3951 #if defined(CONFIG_DEBUG_FS)
3952 res
= connector_debugfs_init(aconnector
);
3954 DRM_ERROR("Failed to create debugfs for connector");
3959 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
3960 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
3961 amdgpu_dm_initialize_dp_connector(dm
, aconnector
);
3966 aconnector
->i2c
= NULL
;
3971 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
3973 switch (adev
->mode_info
.num_crtc
) {
3990 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
3991 struct amdgpu_encoder
*aencoder
,
3992 uint32_t link_index
)
3994 struct amdgpu_device
*adev
= dev
->dev_private
;
3996 int res
= drm_encoder_init(dev
,
3998 &amdgpu_dm_encoder_funcs
,
3999 DRM_MODE_ENCODER_TMDS
,
4002 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
4005 aencoder
->encoder_id
= link_index
;
4007 aencoder
->encoder_id
= -1;
4009 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
4014 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
4015 struct amdgpu_crtc
*acrtc
,
4019 * this is not correct translation but will work as soon as VBLANK
4020 * constant is the same as PFLIP
4023 amdgpu_display_crtc_idx_to_irq_type(
4028 drm_crtc_vblank_on(&acrtc
->base
);
4031 &adev
->pageflip_irq
,
4037 &adev
->pageflip_irq
,
4039 drm_crtc_vblank_off(&acrtc
->base
);
4044 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
4045 const struct dm_connector_state
*old_dm_state
)
4047 if (dm_state
->scaling
!= old_dm_state
->scaling
)
4049 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
4050 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
4052 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
4053 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
4055 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
4056 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
4061 static void remove_stream(struct amdgpu_device
*adev
,
4062 struct amdgpu_crtc
*acrtc
,
4063 struct dc_stream_state
*stream
)
4065 /* this is the update mode case */
4067 acrtc
->otg_inst
= -1;
4068 acrtc
->enabled
= false;
4071 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
4072 struct dc_cursor_position
*position
)
4074 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
4076 int xorigin
= 0, yorigin
= 0;
4078 if (!crtc
|| !plane
->state
->fb
) {
4079 position
->enable
= false;
4085 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
4086 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
4087 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4089 plane
->state
->crtc_w
,
4090 plane
->state
->crtc_h
);
4094 x
= plane
->state
->crtc_x
;
4095 y
= plane
->state
->crtc_y
;
4096 /* avivo cursor are offset into the total surface */
4097 x
+= crtc
->primary
->state
->src_x
>> 16;
4098 y
+= crtc
->primary
->state
->src_y
>> 16;
4100 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
4104 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
4107 position
->enable
= true;
4110 position
->x_hotspot
= xorigin
;
4111 position
->y_hotspot
= yorigin
;
4116 static void handle_cursor_update(struct drm_plane
*plane
,
4117 struct drm_plane_state
*old_plane_state
)
4119 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
4120 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
4121 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
4122 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
4123 uint64_t address
= afb
? afb
->address
: 0;
4124 struct dc_cursor_position position
;
4125 struct dc_cursor_attributes attributes
;
4128 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
4131 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4133 amdgpu_crtc
->crtc_id
,
4134 plane
->state
->crtc_w
,
4135 plane
->state
->crtc_h
);
4137 ret
= get_cursor_position(plane
, crtc
, &position
);
4141 if (!position
.enable
) {
4142 /* turn off cursor */
4143 if (crtc_state
&& crtc_state
->stream
)
4144 dc_stream_set_cursor_position(crtc_state
->stream
,
4149 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
4150 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
4152 attributes
.address
.high_part
= upper_32_bits(address
);
4153 attributes
.address
.low_part
= lower_32_bits(address
);
4154 attributes
.width
= plane
->state
->crtc_w
;
4155 attributes
.height
= plane
->state
->crtc_h
;
4156 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
4157 attributes
.rotation_angle
= 0;
4158 attributes
.attribute_flags
.value
= 0;
4160 attributes
.pitch
= attributes
.width
;
4162 if (crtc_state
->stream
) {
4163 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
4165 DRM_ERROR("DC failed to set cursor attributes\n");
4167 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
4169 DRM_ERROR("DC failed to set cursor position\n");
4173 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
4176 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
4177 WARN_ON(acrtc
->event
);
4179 acrtc
->event
= acrtc
->base
.state
->event
;
4181 /* Set the flip status */
4182 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
4184 /* Mark this event as consumed */
4185 acrtc
->base
.state
->event
= NULL
;
4187 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4194 * Waits on all BO's fences and for proper vblank count
4196 static void amdgpu_dm_do_flip(struct drm_crtc
*crtc
,
4197 struct drm_framebuffer
*fb
,
4199 struct dc_state
*state
)
4201 unsigned long flags
;
4202 uint32_t target_vblank
;
4204 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4205 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(fb
);
4206 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
4207 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4208 bool async_flip
= (crtc
->state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
) != 0;
4209 struct dc_flip_addrs addr
= { {0} };
4210 /* TODO eliminate or rename surface_update */
4211 struct dc_surface_update surface_updates
[1] = { {0} };
4212 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4213 struct dc_stream_status
*stream_status
;
4216 /* Prepare wait for target vblank early - before the fence-waits */
4217 target_vblank
= target
- (uint32_t)drm_crtc_vblank_count(crtc
) +
4218 amdgpu_get_vblank_counter_kms(crtc
->dev
, acrtc
->crtc_id
);
4221 * TODO This might fail and hence better not used, wait
4222 * explicitly on fences instead
4223 * and in general should be called for
4224 * blocking commit to as per framework helpers
4226 r
= amdgpu_bo_reserve(abo
, true);
4227 if (unlikely(r
!= 0)) {
4228 DRM_ERROR("failed to reserve buffer before flip\n");
4232 /* Wait for all fences on this FB */
4233 WARN_ON(reservation_object_wait_timeout_rcu(abo
->tbo
.resv
, true, false,
4234 MAX_SCHEDULE_TIMEOUT
) < 0);
4236 amdgpu_bo_unreserve(abo
);
4239 * Wait until we're out of the vertical blank period before the one
4240 * targeted by the flip
4242 while ((acrtc
->enabled
&&
4243 (amdgpu_display_get_crtc_scanoutpos(adev
->ddev
, acrtc
->crtc_id
,
4244 0, &vpos
, &hpos
, NULL
,
4245 NULL
, &crtc
->hwmode
)
4246 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
4247 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
4248 (int)(target_vblank
-
4249 amdgpu_get_vblank_counter_kms(adev
->ddev
, acrtc
->crtc_id
)) > 0)) {
4250 usleep_range(1000, 1100);
4254 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
4256 WARN_ON(acrtc
->pflip_status
!= AMDGPU_FLIP_NONE
);
4257 WARN_ON(!acrtc_state
->stream
);
4259 addr
.address
.grph
.addr
.low_part
= lower_32_bits(afb
->address
);
4260 addr
.address
.grph
.addr
.high_part
= upper_32_bits(afb
->address
);
4261 addr
.flip_immediate
= async_flip
;
4264 if (acrtc
->base
.state
->event
)
4265 prepare_flip_isr(acrtc
);
4267 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
4269 stream_status
= dc_stream_get_status(acrtc_state
->stream
);
4270 if (!stream_status
) {
4271 DRM_ERROR("No stream status for CRTC: id=%d\n",
4276 surface_updates
->surface
= stream_status
->plane_states
[0];
4277 if (!surface_updates
->surface
) {
4278 DRM_ERROR("No surface for CRTC: id=%d\n",
4282 surface_updates
->flip_addr
= &addr
;
4284 dc_commit_updates_for_stream(adev
->dm
.dc
,
4287 acrtc_state
->stream
,
4289 &surface_updates
->surface
,
4292 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4294 addr
.address
.grph
.addr
.high_part
,
4295 addr
.address
.grph
.addr
.low_part
);
4299 * TODO this whole function needs to go
4301 * dc_surface_update is needlessly complex. See if we can just replace this
4302 * with a dc_plane_state and follow the atomic model a bit more closely here.
4304 static bool commit_planes_to_stream(
4306 struct dc_plane_state
**plane_states
,
4307 uint8_t new_plane_count
,
4308 struct dm_crtc_state
*dm_new_crtc_state
,
4309 struct dm_crtc_state
*dm_old_crtc_state
,
4310 struct dc_state
*state
)
4312 /* no need to dynamically allocate this. it's pretty small */
4313 struct dc_surface_update updates
[MAX_SURFACES
];
4314 struct dc_flip_addrs
*flip_addr
;
4315 struct dc_plane_info
*plane_info
;
4316 struct dc_scaling_info
*scaling_info
;
4318 struct dc_stream_state
*dc_stream
= dm_new_crtc_state
->stream
;
4319 struct dc_stream_update
*stream_update
=
4320 kzalloc(sizeof(struct dc_stream_update
), GFP_KERNEL
);
4322 if (!stream_update
) {
4323 BREAK_TO_DEBUGGER();
4327 flip_addr
= kcalloc(MAX_SURFACES
, sizeof(struct dc_flip_addrs
),
4329 plane_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_plane_info
),
4331 scaling_info
= kcalloc(MAX_SURFACES
, sizeof(struct dc_scaling_info
),
4334 if (!flip_addr
|| !plane_info
|| !scaling_info
) {
4337 kfree(scaling_info
);
4338 kfree(stream_update
);
4342 memset(updates
, 0, sizeof(updates
));
4344 stream_update
->src
= dc_stream
->src
;
4345 stream_update
->dst
= dc_stream
->dst
;
4346 stream_update
->out_transfer_func
= dc_stream
->out_transfer_func
;
4348 if (dm_new_crtc_state
->freesync_enabled
!= dm_old_crtc_state
->freesync_enabled
) {
4349 stream_update
->vrr_infopacket
= &dc_stream
->vrr_infopacket
;
4350 stream_update
->adjust
= &dc_stream
->adjust
;
4353 for (i
= 0; i
< new_plane_count
; i
++) {
4354 updates
[i
].surface
= plane_states
[i
];
4356 (struct dc_gamma
*)plane_states
[i
]->gamma_correction
;
4357 updates
[i
].in_transfer_func
= plane_states
[i
]->in_transfer_func
;
4358 flip_addr
[i
].address
= plane_states
[i
]->address
;
4359 flip_addr
[i
].flip_immediate
= plane_states
[i
]->flip_immediate
;
4360 plane_info
[i
].color_space
= plane_states
[i
]->color_space
;
4361 plane_info
[i
].format
= plane_states
[i
]->format
;
4362 plane_info
[i
].plane_size
= plane_states
[i
]->plane_size
;
4363 plane_info
[i
].rotation
= plane_states
[i
]->rotation
;
4364 plane_info
[i
].horizontal_mirror
= plane_states
[i
]->horizontal_mirror
;
4365 plane_info
[i
].stereo_format
= plane_states
[i
]->stereo_format
;
4366 plane_info
[i
].tiling_info
= plane_states
[i
]->tiling_info
;
4367 plane_info
[i
].visible
= plane_states
[i
]->visible
;
4368 plane_info
[i
].per_pixel_alpha
= plane_states
[i
]->per_pixel_alpha
;
4369 plane_info
[i
].dcc
= plane_states
[i
]->dcc
;
4370 scaling_info
[i
].scaling_quality
= plane_states
[i
]->scaling_quality
;
4371 scaling_info
[i
].src_rect
= plane_states
[i
]->src_rect
;
4372 scaling_info
[i
].dst_rect
= plane_states
[i
]->dst_rect
;
4373 scaling_info
[i
].clip_rect
= plane_states
[i
]->clip_rect
;
4375 updates
[i
].flip_addr
= &flip_addr
[i
];
4376 updates
[i
].plane_info
= &plane_info
[i
];
4377 updates
[i
].scaling_info
= &scaling_info
[i
];
4380 dc_commit_updates_for_stream(
4384 dc_stream
, stream_update
, plane_states
, state
);
4388 kfree(scaling_info
);
4389 kfree(stream_update
);
4393 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
4394 struct drm_device
*dev
,
4395 struct amdgpu_display_manager
*dm
,
4396 struct drm_crtc
*pcrtc
,
4397 bool *wait_for_vblank
)
4400 struct drm_plane
*plane
;
4401 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
4402 struct dc_stream_state
*dc_stream_attach
;
4403 struct dc_plane_state
*plane_states_constructed
[MAX_SURFACES
];
4404 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
4405 struct drm_crtc_state
*new_pcrtc_state
=
4406 drm_atomic_get_new_crtc_state(state
, pcrtc
);
4407 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
4408 struct dm_crtc_state
*dm_old_crtc_state
=
4409 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
4410 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4411 int planes_count
= 0;
4412 unsigned long flags
;
4414 /* update planes when needed */
4415 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
4416 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
4417 struct drm_crtc_state
*new_crtc_state
;
4418 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
4420 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
4422 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
4423 handle_cursor_update(plane
, old_plane_state
);
4427 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
4430 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
4431 if (!new_crtc_state
->active
)
4434 pflip_needed
= !state
->allow_modeset
;
4436 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
4437 if (acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
) {
4438 DRM_ERROR("%s: acrtc %d, already busy\n",
4440 acrtc_attach
->crtc_id
);
4441 /* In commit tail framework this cannot happen */
4444 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
4446 if (!pflip_needed
|| plane
->type
== DRM_PLANE_TYPE_OVERLAY
) {
4447 WARN_ON(!dm_new_plane_state
->dc_state
);
4449 plane_states_constructed
[planes_count
] = dm_new_plane_state
->dc_state
;
4451 dc_stream_attach
= acrtc_state
->stream
;
4454 } else if (new_crtc_state
->planes_changed
) {
4455 /* Assume even ONE crtc with immediate flip means
4456 * entire can't wait for VBLANK
4457 * TODO Check if it's correct
4460 new_pcrtc_state
->pageflip_flags
& DRM_MODE_PAGE_FLIP_ASYNC
?
4463 /* TODO: Needs rework for multiplane flip */
4464 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
4465 drm_crtc_vblank_get(crtc
);
4470 (uint32_t)drm_crtc_vblank_count(crtc
) + *wait_for_vblank
,
4477 unsigned long flags
;
4479 if (new_pcrtc_state
->event
) {
4481 drm_crtc_vblank_get(pcrtc
);
4483 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
4484 prepare_flip_isr(acrtc_attach
);
4485 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
4488 dc_stream_attach
->adjust
= acrtc_state
->adjust
;
4489 dc_stream_attach
->vrr_infopacket
= acrtc_state
->vrr_infopacket
;
4491 if (false == commit_planes_to_stream(dm
->dc
,
4492 plane_states_constructed
,
4497 dm_error("%s: Failed to attach plane!\n", __func__
);
4499 /*TODO BUG Here should go disable planes on CRTC. */
4504 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4505 * @crtc_state: the DRM CRTC state
4506 * @stream_state: the DC stream state.
4508 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4509 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4511 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
4512 struct dc_stream_state
*stream_state
)
4514 stream_state
->mode_changed
= crtc_state
->mode_changed
;
4517 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
4518 struct drm_atomic_state
*state
,
4521 struct drm_crtc
*crtc
;
4522 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4523 struct amdgpu_device
*adev
= dev
->dev_private
;
4527 * We evade vblanks and pflips on crtc that
4528 * should be changed. We do it here to flush & disable
4529 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4530 * it will update crtc->dm_crtc_state->stream pointer which is used in
4533 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4534 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4535 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4537 if (drm_atomic_crtc_needs_modeset(new_crtc_state
) && dm_old_crtc_state
->stream
)
4538 manage_dm_interrupts(adev
, acrtc
, false);
4541 * Add check here for SoC's that support hardware cursor plane, to
4542 * unset legacy_cursor_update
4545 return drm_atomic_helper_commit(dev
, state
, nonblock
);
4547 /*TODO Handle EINTR, reenable IRQ*/
4550 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
4552 struct drm_device
*dev
= state
->dev
;
4553 struct amdgpu_device
*adev
= dev
->dev_private
;
4554 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4555 struct dm_atomic_state
*dm_state
;
4557 struct drm_crtc
*crtc
;
4558 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4559 unsigned long flags
;
4560 bool wait_for_vblank
= true;
4561 struct drm_connector
*connector
;
4562 struct drm_connector_state
*old_con_state
, *new_con_state
;
4563 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4564 int crtc_disable_count
= 0;
4566 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
4568 dm_state
= to_dm_atomic_state(state
);
4570 /* update changed items */
4571 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4572 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4574 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4575 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4578 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4579 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4580 "connectors_changed:%d\n",
4582 new_crtc_state
->enable
,
4583 new_crtc_state
->active
,
4584 new_crtc_state
->planes_changed
,
4585 new_crtc_state
->mode_changed
,
4586 new_crtc_state
->active_changed
,
4587 new_crtc_state
->connectors_changed
);
4589 /* Copy all transient state flags into dc state */
4590 if (dm_new_crtc_state
->stream
) {
4591 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
4592 dm_new_crtc_state
->stream
);
4595 /* handles headless hotplug case, updating new_state and
4596 * aconnector as needed
4599 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
4601 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
4603 if (!dm_new_crtc_state
->stream
) {
4605 * this could happen because of issues with
4606 * userspace notifications delivery.
4607 * In this case userspace tries to set mode on
4608 * display which is disconnected in fact.
4609 * dc_sink is NULL in this case on aconnector.
4610 * We expect reset mode will come soon.
4612 * This can also happen when unplug is done
4613 * during resume sequence ended
4615 * In this case, we want to pretend we still
4616 * have a sink to keep the pipe running so that
4617 * hw state is consistent with the sw state
4619 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4620 __func__
, acrtc
->base
.base
.id
);
4624 if (dm_old_crtc_state
->stream
)
4625 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4627 pm_runtime_get_noresume(dev
->dev
);
4629 acrtc
->enabled
= true;
4630 acrtc
->hw_mode
= new_crtc_state
->mode
;
4631 crtc
->hwmode
= new_crtc_state
->mode
;
4632 } else if (modereset_required(new_crtc_state
)) {
4633 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
4635 /* i.e. reset mode */
4636 if (dm_old_crtc_state
->stream
)
4637 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
4639 } /* for_each_crtc_in_state() */
4641 if (dm_state
->context
) {
4642 dm_enable_per_frame_crtc_master_sync(dm_state
->context
);
4643 WARN_ON(!dc_commit_state(dm
->dc
, dm_state
->context
));
4646 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4647 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4649 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4651 if (dm_new_crtc_state
->stream
!= NULL
) {
4652 const struct dc_stream_status
*status
=
4653 dc_stream_get_status(dm_new_crtc_state
->stream
);
4656 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
4658 acrtc
->otg_inst
= status
->primary_otg_inst
;
4662 /* Handle scaling and underscan changes*/
4663 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
4664 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
4665 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
4666 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
4667 struct dc_stream_status
*status
= NULL
;
4670 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
4671 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
4674 /* Skip any modesets/resets */
4675 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
4678 /* Skip anything that is not scaling or underscan changes */
4679 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
4682 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4684 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
4685 dm_new_con_state
, (struct dc_stream_state
*)dm_new_crtc_state
->stream
);
4687 if (!dm_new_crtc_state
->stream
)
4690 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
4692 WARN_ON(!status
->plane_count
);
4694 dm_new_crtc_state
->stream
->adjust
= dm_new_crtc_state
->adjust
;
4695 dm_new_crtc_state
->stream
->vrr_infopacket
= dm_new_crtc_state
->vrr_infopacket
;
4697 /*TODO How it works with MPO ?*/
4698 if (!commit_planes_to_stream(
4700 status
->plane_states
,
4701 status
->plane_count
,
4703 to_dm_crtc_state(old_crtc_state
),
4705 dm_error("%s: Failed to update stream scaling!\n", __func__
);
4708 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
4709 new_crtc_state
, i
) {
4711 * loop to enable interrupts on newly arrived crtc
4713 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4714 bool modeset_needed
;
4716 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
4717 crtc_disable_count
++;
4719 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4720 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4721 modeset_needed
= modeset_required(
4723 dm_new_crtc_state
->stream
,
4724 dm_old_crtc_state
->stream
);
4726 if (dm_new_crtc_state
->stream
== NULL
|| !modeset_needed
)
4729 manage_dm_interrupts(adev
, acrtc
, true);
4732 /* update planes when needed per crtc*/
4733 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
4734 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4736 if (dm_new_crtc_state
->stream
)
4737 amdgpu_dm_commit_planes(state
, dev
, dm
, crtc
, &wait_for_vblank
);
4742 * send vblank event on all events not handled in flip and
4743 * mark consumed event for drm_atomic_helper_commit_hw_done
4745 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
4746 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
4748 if (new_crtc_state
->event
)
4749 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
4751 new_crtc_state
->event
= NULL
;
4753 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
4756 if (wait_for_vblank
)
4757 drm_atomic_helper_wait_for_flip_done(dev
, state
);
4761 * Delay hw_done() until flip_done() is signaled. This is to block
4762 * another commit from freeing the CRTC state while we're still
4763 * waiting on flip_done.
4765 drm_atomic_helper_commit_hw_done(state
);
4767 drm_atomic_helper_cleanup_planes(dev
, state
);
4770 * Finally, drop a runtime PM reference for each newly disabled CRTC,
4771 * so we can put the GPU into runtime suspend if we're not driving any
4774 for (i
= 0; i
< crtc_disable_count
; i
++)
4775 pm_runtime_put_autosuspend(dev
->dev
);
4776 pm_runtime_mark_last_busy(dev
->dev
);
4780 static int dm_force_atomic_commit(struct drm_connector
*connector
)
4783 struct drm_device
*ddev
= connector
->dev
;
4784 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
4785 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4786 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
4787 struct drm_connector_state
*conn_state
;
4788 struct drm_crtc_state
*crtc_state
;
4789 struct drm_plane_state
*plane_state
;
4794 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
4796 /* Construct an atomic state to restore previous display setting */
4799 * Attach connectors to drm_atomic_state
4801 conn_state
= drm_atomic_get_connector_state(state
, connector
);
4803 ret
= PTR_ERR_OR_ZERO(conn_state
);
4807 /* Attach crtc to drm_atomic_state*/
4808 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
4810 ret
= PTR_ERR_OR_ZERO(crtc_state
);
4814 /* force a restore */
4815 crtc_state
->mode_changed
= true;
4817 /* Attach plane to drm_atomic_state */
4818 plane_state
= drm_atomic_get_plane_state(state
, plane
);
4820 ret
= PTR_ERR_OR_ZERO(plane_state
);
4825 /* Call commit internally with the state we just constructed */
4826 ret
= drm_atomic_commit(state
);
4831 DRM_ERROR("Restoring old state failed with %i\n", ret
);
4832 drm_atomic_state_put(state
);
4838 * This function handles all cases when set mode does not come upon hotplug.
4839 * This includes when a display is unplugged then plugged back into the
4840 * same port and when running without usermode desktop manager supprot
4842 void dm_restore_drm_connector_state(struct drm_device
*dev
,
4843 struct drm_connector
*connector
)
4845 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4846 struct amdgpu_crtc
*disconnected_acrtc
;
4847 struct dm_crtc_state
*acrtc_state
;
4849 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
4852 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
4853 if (!disconnected_acrtc
)
4856 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
4857 if (!acrtc_state
->stream
)
4861 * If the previous sink is not released and different from the current,
4862 * we deduce we are in a state where we can not rely on usermode call
4863 * to turn on the display, so we do it here
4865 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
4866 dm_force_atomic_commit(&aconnector
->base
);
4870 * Grabs all modesetting locks to serialize against any blocking commits,
4871 * Waits for completion of all non blocking commits.
4873 static int do_aquire_global_lock(struct drm_device
*dev
,
4874 struct drm_atomic_state
*state
)
4876 struct drm_crtc
*crtc
;
4877 struct drm_crtc_commit
*commit
;
4881 * Adding all modeset locks to aquire_ctx will
4882 * ensure that when the framework release it the
4883 * extra locks we are locking here will get released to
4885 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
4889 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
4890 spin_lock(&crtc
->commit_lock
);
4891 commit
= list_first_entry_or_null(&crtc
->commit_list
,
4892 struct drm_crtc_commit
, commit_entry
);
4894 drm_crtc_commit_get(commit
);
4895 spin_unlock(&crtc
->commit_lock
);
4901 * Make sure all pending HW programming completed and
4904 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
4907 ret
= wait_for_completion_interruptible_timeout(
4908 &commit
->flip_done
, 10*HZ
);
4911 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4912 "timed out\n", crtc
->base
.id
, crtc
->name
);
4914 drm_crtc_commit_put(commit
);
4917 return ret
< 0 ? ret
: 0;
4920 void set_freesync_on_stream(struct amdgpu_display_manager
*dm
,
4921 struct dm_crtc_state
*new_crtc_state
,
4922 struct dm_connector_state
*new_con_state
,
4923 struct dc_stream_state
*new_stream
)
4925 struct mod_freesync_config config
= {0};
4926 struct mod_vrr_params vrr
= {0};
4927 struct dc_info_packet vrr_infopacket
= {0};
4928 struct amdgpu_dm_connector
*aconnector
=
4929 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
4931 if (new_con_state
->freesync_capable
&&
4932 new_con_state
->freesync_enable
) {
4933 config
.state
= new_crtc_state
->freesync_enabled
?
4934 VRR_STATE_ACTIVE_VARIABLE
:
4936 config
.min_refresh_in_uhz
=
4937 aconnector
->min_vfreq
* 1000000;
4938 config
.max_refresh_in_uhz
=
4939 aconnector
->max_vfreq
* 1000000;
4940 config
.vsif_supported
= true;
4943 mod_freesync_build_vrr_params(dm
->freesync_module
,
4947 mod_freesync_build_vrr_infopacket(dm
->freesync_module
,
4954 new_crtc_state
->adjust
= vrr
.adjust
;
4955 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
4958 static int dm_update_crtcs_state(struct amdgpu_display_manager
*dm
,
4959 struct drm_atomic_state
*state
,
4961 bool *lock_and_validation_needed
)
4963 struct drm_crtc
*crtc
;
4964 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
4966 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
4967 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
4968 struct dc_stream_state
*new_stream
;
4972 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
4973 * update changed items
4975 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
4976 struct amdgpu_crtc
*acrtc
= NULL
;
4977 struct amdgpu_dm_connector
*aconnector
= NULL
;
4978 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
4979 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
4980 struct drm_plane_state
*new_plane_state
= NULL
;
4984 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
4985 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
4986 acrtc
= to_amdgpu_crtc(crtc
);
4988 new_plane_state
= drm_atomic_get_new_plane_state(state
, new_crtc_state
->crtc
->primary
);
4990 if (new_crtc_state
->enable
&& new_plane_state
&& !new_plane_state
->fb
) {
4995 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
4997 /* TODO This hack should go away */
4998 if (aconnector
&& enable
) {
4999 /* Make sure fake sink is created in plug-in scenario */
5000 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
5002 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
5005 if (IS_ERR(drm_new_conn_state
)) {
5006 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
5010 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
5011 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
5013 new_stream
= create_stream_for_sink(aconnector
,
5014 &new_crtc_state
->mode
,
5018 * we can have no stream on ACTION_SET if a display
5019 * was disconnected during S3, in this case it is not an
5020 * error, the OS will be updated after detection, and
5021 * will do the right thing on next atomic commit
5025 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5026 __func__
, acrtc
->base
.base
.id
);
5030 set_freesync_on_stream(dm
, dm_new_crtc_state
,
5031 dm_new_conn_state
, new_stream
);
5033 if (dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
5034 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
5035 new_crtc_state
->mode_changed
= false;
5036 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5037 new_crtc_state
->mode_changed
);
5041 if (dm_old_crtc_state
->freesync_enabled
!= dm_new_crtc_state
->freesync_enabled
)
5042 new_crtc_state
->mode_changed
= true;
5044 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
5048 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5049 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5050 "connectors_changed:%d\n",
5052 new_crtc_state
->enable
,
5053 new_crtc_state
->active
,
5054 new_crtc_state
->planes_changed
,
5055 new_crtc_state
->mode_changed
,
5056 new_crtc_state
->active_changed
,
5057 new_crtc_state
->connectors_changed
);
5059 /* Remove stream for any changed/disabled CRTC */
5062 if (!dm_old_crtc_state
->stream
)
5065 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5068 /* i.e. reset mode */
5069 if (dc_remove_stream_from_ctx(
5072 dm_old_crtc_state
->stream
) != DC_OK
) {
5077 dc_stream_release(dm_old_crtc_state
->stream
);
5078 dm_new_crtc_state
->stream
= NULL
;
5080 *lock_and_validation_needed
= true;
5082 } else {/* Add stream for any updated/enabled CRTC */
5084 * Quick fix to prevent NULL pointer on new_stream when
5085 * added MST connectors not found in existing crtc_state in the chained mode
5086 * TODO: need to dig out the root cause of that
5088 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
5091 if (modereset_required(new_crtc_state
))
5094 if (modeset_required(new_crtc_state
, new_stream
,
5095 dm_old_crtc_state
->stream
)) {
5097 WARN_ON(dm_new_crtc_state
->stream
);
5099 dm_new_crtc_state
->stream
= new_stream
;
5101 dc_stream_retain(new_stream
);
5103 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5106 if (dc_add_stream_to_ctx(
5109 dm_new_crtc_state
->stream
) != DC_OK
) {
5114 *lock_and_validation_needed
= true;
5119 /* Release extra reference */
5121 dc_stream_release(new_stream
);
5124 * We want to do dc stream updates that do not require a
5125 * full modeset below.
5127 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
5128 new_crtc_state
->active
))
5131 * Given above conditions, the dc state cannot be NULL because:
5132 * 1. We're in the process of enabling CRTCs (just been added
5133 * to the dc context, or already is on the context)
5134 * 2. Has a valid connector attached, and
5135 * 3. Is currently active and enabled.
5136 * => The dc stream state currently exists.
5138 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
5140 /* Scaling or underscan settings */
5141 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
5142 update_stream_scaling_settings(
5143 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
5146 * Color management settings. We also update color properties
5147 * when a modeset is needed, to ensure it gets reprogrammed.
5149 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
5150 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
5151 ret
= amdgpu_dm_set_regamma_lut(dm_new_crtc_state
);
5154 amdgpu_dm_set_ctm(dm_new_crtc_state
);
5164 dc_stream_release(new_stream
);
5168 static int dm_update_planes_state(struct dc
*dc
,
5169 struct drm_atomic_state
*state
,
5171 bool *lock_and_validation_needed
)
5173 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
5174 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
5175 struct drm_plane
*plane
;
5176 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
5177 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
5178 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
5179 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
5181 /* TODO return page_flip_needed() function */
5182 bool pflip_needed
= !state
->allow_modeset
;
5186 /* Add new planes, in reverse order as DC expectation */
5187 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
5188 new_plane_crtc
= new_plane_state
->crtc
;
5189 old_plane_crtc
= old_plane_state
->crtc
;
5190 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
5191 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
5193 /*TODO Implement atomic check for cursor plane */
5194 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5197 /* Remove any changed/removed planes */
5200 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
5203 if (!old_plane_crtc
)
5206 old_crtc_state
= drm_atomic_get_old_crtc_state(
5207 state
, old_plane_crtc
);
5208 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5210 if (!dm_old_crtc_state
->stream
)
5213 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5214 plane
->base
.id
, old_plane_crtc
->base
.id
);
5216 if (!dc_remove_plane_from_context(
5218 dm_old_crtc_state
->stream
,
5219 dm_old_plane_state
->dc_state
,
5220 dm_state
->context
)) {
5227 dc_plane_state_release(dm_old_plane_state
->dc_state
);
5228 dm_new_plane_state
->dc_state
= NULL
;
5230 *lock_and_validation_needed
= true;
5232 } else { /* Add new planes */
5233 struct dc_plane_state
*dc_new_plane_state
;
5235 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
5238 if (!new_plane_crtc
)
5241 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
5242 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5244 if (!dm_new_crtc_state
->stream
)
5248 plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
5251 WARN_ON(dm_new_plane_state
->dc_state
);
5253 dc_new_plane_state
= dc_create_plane_state(dc
);
5254 if (!dc_new_plane_state
)
5257 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5258 plane
->base
.id
, new_plane_crtc
->base
.id
);
5260 ret
= fill_plane_attributes(
5261 new_plane_crtc
->dev
->dev_private
,
5266 dc_plane_state_release(dc_new_plane_state
);
5271 * Any atomic check errors that occur after this will
5272 * not need a release. The plane state will be attached
5273 * to the stream, and therefore part of the atomic
5274 * state. It'll be released when the atomic state is
5277 if (!dc_add_plane_to_context(
5279 dm_new_crtc_state
->stream
,
5281 dm_state
->context
)) {
5283 dc_plane_state_release(dc_new_plane_state
);
5287 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
5289 /* Tell DC to do a full surface update every time there
5290 * is a plane change. Inefficient, but works for now.
5292 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
5294 *lock_and_validation_needed
= true;
5301 enum surface_update_type
dm_determine_update_type_for_commit(struct dc
*dc
, struct drm_atomic_state
*state
)
5305 int i
, j
, num_plane
;
5306 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
5307 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
5308 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
5309 struct drm_plane
*plane
;
5311 struct drm_crtc
*crtc
;
5312 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
5313 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
5314 struct dc_stream_status
*status
= NULL
;
5316 struct dc_surface_update
*updates
= kzalloc(MAX_SURFACES
* sizeof(struct dc_surface_update
), GFP_KERNEL
);
5317 struct dc_plane_state
*surface
= kzalloc(MAX_SURFACES
* sizeof(struct dc_plane_state
), GFP_KERNEL
);
5318 struct dc_stream_update stream_update
;
5319 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
5322 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
5323 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5324 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5327 if (new_dm_crtc_state
->stream
) {
5329 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
5330 new_plane_crtc
= new_plane_state
->crtc
;
5331 old_plane_crtc
= old_plane_state
->crtc
;
5332 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
5333 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
5335 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5338 if (!state
->allow_modeset
)
5341 if (crtc
== new_plane_crtc
) {
5342 updates
[num_plane
].surface
= &surface
[num_plane
];
5344 if (new_crtc_state
->mode_changed
) {
5345 updates
[num_plane
].surface
->src_rect
=
5346 new_dm_plane_state
->dc_state
->src_rect
;
5347 updates
[num_plane
].surface
->dst_rect
=
5348 new_dm_plane_state
->dc_state
->dst_rect
;
5349 updates
[num_plane
].surface
->rotation
=
5350 new_dm_plane_state
->dc_state
->rotation
;
5351 updates
[num_plane
].surface
->in_transfer_func
=
5352 new_dm_plane_state
->dc_state
->in_transfer_func
;
5353 stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
5354 stream_update
.src
= new_dm_crtc_state
->stream
->src
;
5357 if (new_crtc_state
->color_mgmt_changed
) {
5358 updates
[num_plane
].gamma
=
5359 new_dm_plane_state
->dc_state
->gamma_correction
;
5360 updates
[num_plane
].in_transfer_func
=
5361 new_dm_plane_state
->dc_state
->in_transfer_func
;
5362 stream_update
.gamut_remap
=
5363 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
5364 stream_update
.out_transfer_func
=
5365 new_dm_crtc_state
->stream
->out_transfer_func
;
5372 if (num_plane
> 0) {
5373 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
5374 update_type
= dc_check_update_surfaces_for_stream(dc
, updates
, num_plane
,
5375 &stream_update
, status
);
5377 if (update_type
> UPDATE_TYPE_MED
) {
5378 update_type
= UPDATE_TYPE_FULL
;
5383 } else if (!new_dm_crtc_state
->stream
&& old_dm_crtc_state
->stream
) {
5384 update_type
= UPDATE_TYPE_FULL
;
5396 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
5397 struct drm_atomic_state
*state
)
5399 struct amdgpu_device
*adev
= dev
->dev_private
;
5400 struct dc
*dc
= adev
->dm
.dc
;
5401 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
5402 struct drm_connector
*connector
;
5403 struct drm_connector_state
*old_con_state
, *new_con_state
;
5404 struct drm_crtc
*crtc
;
5405 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
5406 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
5407 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
5412 * This bool will be set for true for any modeset/reset
5413 * or plane update which implies non fast surface update.
5415 bool lock_and_validation_needed
= false;
5417 ret
= drm_atomic_helper_check_modeset(dev
, state
);
5421 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
5422 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
5423 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
5425 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
5426 !new_crtc_state
->color_mgmt_changed
&&
5427 (dm_old_crtc_state
->freesync_enabled
== dm_new_crtc_state
->freesync_enabled
))
5430 if (!new_crtc_state
->enable
)
5433 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
5437 ret
= drm_atomic_add_affected_planes(state
, crtc
);
5442 dm_state
->context
= dc_create_state();
5443 ASSERT(dm_state
->context
);
5444 dc_resource_state_copy_construct_current(dc
, dm_state
->context
);
5446 /* Remove exiting planes if they are modified */
5447 ret
= dm_update_planes_state(dc
, state
, false, &lock_and_validation_needed
);
5452 /* Disable all crtcs which require disable */
5453 ret
= dm_update_crtcs_state(&adev
->dm
, state
, false, &lock_and_validation_needed
);
5458 /* Enable all crtcs which require enable */
5459 ret
= dm_update_crtcs_state(&adev
->dm
, state
, true, &lock_and_validation_needed
);
5464 /* Add new/modified planes */
5465 ret
= dm_update_planes_state(dc
, state
, true, &lock_and_validation_needed
);
5470 /* Run this here since we want to validate the streams we created */
5471 ret
= drm_atomic_helper_check_planes(dev
, state
);
5475 /* Check scaling and underscan changes*/
5476 /* TODO Removed scaling changes validation due to inability to commit
5477 * new stream into context w\o causing full reset. Need to
5478 * decide how to handle.
5480 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5481 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
5482 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
5483 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
5485 /* Skip any modesets/resets */
5486 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
5487 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
5490 /* Skip any thing not scale or underscan changes */
5491 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
5494 overall_update_type
= UPDATE_TYPE_FULL
;
5495 lock_and_validation_needed
= true;
5499 * For full updates case when
5500 * removing/adding/updating streams on one CRTC while flipping
5502 * acquiring global lock will guarantee that any such full
5504 * will wait for completion of any outstanding flip using DRMs
5505 * synchronization events.
5507 update_type
= dm_determine_update_type_for_commit(dc
, state
);
5509 if (overall_update_type
< update_type
)
5510 overall_update_type
= update_type
;
5513 * lock_and_validation_needed was an old way to determine if we need to set
5514 * the global lock. Leaving it in to check if we broke any corner cases
5515 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
5516 * lock_and_validation_needed false = UPDATE_TYPE_FAST
5518 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
5519 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
5520 else if (!lock_and_validation_needed
&& overall_update_type
> UPDATE_TYPE_FAST
)
5521 WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
5524 if (overall_update_type
> UPDATE_TYPE_FAST
) {
5526 ret
= do_aquire_global_lock(dev
, state
);
5530 if (dc_validate_global_state(dc
, dm_state
->context
) != DC_OK
) {
5536 /* Must be success */
5541 if (ret
== -EDEADLK
)
5542 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
5543 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
5544 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
5546 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
5551 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
5552 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
5555 bool capable
= false;
5557 if (amdgpu_dm_connector
->dc_link
&&
5558 dm_helpers_dp_read_dpcd(
5560 amdgpu_dm_connector
->dc_link
,
5561 DP_DOWN_STREAM_PORT_COUNT
,
5563 sizeof(dpcd_data
))) {
5564 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
5569 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
5573 bool edid_check_required
;
5574 struct detailed_timing
*timing
;
5575 struct detailed_non_pixel
*data
;
5576 struct detailed_data_monitor_range
*range
;
5577 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5578 to_amdgpu_dm_connector(connector
);
5579 struct dm_connector_state
*dm_con_state
;
5581 struct drm_device
*dev
= connector
->dev
;
5582 struct amdgpu_device
*adev
= dev
->dev_private
;
5584 if (!connector
->state
) {
5585 DRM_ERROR("%s - Connector has no state", __func__
);
5590 dm_con_state
= to_dm_connector_state(connector
->state
);
5592 amdgpu_dm_connector
->min_vfreq
= 0;
5593 amdgpu_dm_connector
->max_vfreq
= 0;
5594 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
5596 dm_con_state
->freesync_capable
= false;
5597 dm_con_state
->freesync_enable
= false;
5601 dm_con_state
= to_dm_connector_state(connector
->state
);
5603 edid_check_required
= false;
5604 if (!amdgpu_dm_connector
->dc_sink
) {
5605 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5608 if (!adev
->dm
.freesync_module
)
5611 * if edid non zero restrict freesync only for dp and edp
5614 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
5615 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
5616 edid_check_required
= is_dp_capable_without_timing_msa(
5618 amdgpu_dm_connector
);
5621 dm_con_state
->freesync_capable
= false;
5622 if (edid_check_required
== true && (edid
->version
> 1 ||
5623 (edid
->version
== 1 && edid
->revision
> 1))) {
5624 for (i
= 0; i
< 4; i
++) {
5626 timing
= &edid
->detailed_timings
[i
];
5627 data
= &timing
->data
.other_data
;
5628 range
= &data
->data
.range
;
5630 * Check if monitor has continuous frequency mode
5632 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
5635 * Check for flag range limits only. If flag == 1 then
5636 * no additional timing information provided.
5637 * Default GTF, GTF Secondary curve and CVT are not
5640 if (range
->flags
!= 1)
5643 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
5644 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
5645 amdgpu_dm_connector
->pixel_clock_mhz
=
5646 range
->pixel_clock_mhz
* 10;
5650 if (amdgpu_dm_connector
->max_vfreq
-
5651 amdgpu_dm_connector
->min_vfreq
> 10) {
5653 dm_con_state
->freesync_capable
= true;