2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB
);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU
);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
122 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
136 struct drm_plane
*plane
,
137 unsigned long possible_crtcs
,
138 const struct dc_plane_cap
*plane_cap
);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
140 struct drm_plane
*plane
,
141 uint32_t link_index
);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
143 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
145 struct amdgpu_encoder
*amdgpu_encoder
);
146 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
147 struct amdgpu_encoder
*aencoder
,
148 uint32_t link_index
);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
152 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
153 struct drm_atomic_state
*state
,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
158 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
159 struct drm_atomic_state
*state
);
161 static void handle_cursor_update(struct drm_plane
*plane
,
162 struct drm_plane_state
*old_plane_state
);
164 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
185 if (crtc
>= adev
->mode_info
.num_crtc
)
188 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
189 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
193 if (acrtc_state
->stream
== NULL
) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
204 u32
*vbl
, u32
*position
)
206 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
208 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
211 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
212 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
215 if (acrtc_state
->stream
== NULL
) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state
->stream
,
231 *position
= v_position
| (h_position
<< 16);
232 *vbl
= v_blank_start
| (v_blank_end
<< 16);
238 static bool dm_is_idle(void *handle
)
244 static int dm_wait_for_idle(void *handle
)
250 static bool dm_check_soft_reset(void *handle
)
255 static int dm_soft_reset(void *handle
)
261 static struct amdgpu_crtc
*
262 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
265 struct drm_device
*dev
= adev
->ddev
;
266 struct drm_crtc
*crtc
;
267 struct amdgpu_crtc
*amdgpu_crtc
;
269 if (otg_inst
== -1) {
271 return adev
->mode_info
.crtcs
[0];
274 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
275 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
277 if (amdgpu_crtc
->otg_inst
== otg_inst
)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
286 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
287 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params
)
299 struct amdgpu_crtc
*amdgpu_crtc
;
300 struct common_irq_params
*irq_params
= interrupt_params
;
301 struct amdgpu_device
*adev
= irq_params
->adev
;
303 struct drm_pending_vblank_event
*e
;
304 struct dm_crtc_state
*acrtc_state
;
305 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
308 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc
== NULL
) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
319 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc
->pflip_status
,
322 AMDGPU_FLIP_SUBMITTED
,
323 amdgpu_crtc
->crtc_id
,
325 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
329 /* page flip completed. */
330 e
= amdgpu_crtc
->event
;
331 amdgpu_crtc
->event
= NULL
;
336 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
337 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
342 &v_blank_end
, &hpos
, &vpos
) ||
343 (vpos
< v_blank_start
)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
375 e
->pipe
= amdgpu_crtc
->crtc_id
;
377 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc
->last_flip_vblank
=
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc
->base
);
389 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
390 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
394 vrr_active
, (int) !e
);
397 static void dm_vupdate_high_irq(void *interrupt_params
)
399 struct common_irq_params
*irq_params
= interrupt_params
;
400 struct amdgpu_device
*adev
= irq_params
->adev
;
401 struct amdgpu_crtc
*acrtc
;
402 struct dm_crtc_state
*acrtc_state
;
405 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
408 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state
));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state
)) {
421 drm_crtc_handle_vblank(&acrtc
->base
);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state
->stream
&&
425 adev
->family
< AMDGPU_FAMILY_AI
) {
426 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
427 mod_freesync_handle_v_update(
428 adev
->dm
.freesync_module
,
430 &acrtc_state
->vrr_params
);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state
->vrr_params
.adjust
);
436 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params
)
451 struct common_irq_params
*irq_params
= interrupt_params
;
452 struct amdgpu_device
*adev
= irq_params
->adev
;
453 struct amdgpu_crtc
*acrtc
;
454 struct dm_crtc_state
*acrtc_state
;
457 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
460 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
464 amdgpu_dm_vrr_active(acrtc_state
));
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
471 if (!amdgpu_dm_vrr_active(acrtc_state
))
472 drm_crtc_handle_vblank(&acrtc
->base
);
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
479 if (acrtc_state
->stream
&& adev
->family
>= AMDGPU_FAMILY_AI
&&
480 acrtc_state
->vrr_params
.supported
&&
481 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
482 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
483 mod_freesync_handle_v_update(
484 adev
->dm
.freesync_module
,
486 &acrtc_state
->vrr_params
);
488 dc_stream_adjust_vmin_vmax(
491 &acrtc_state
->vrr_params
.adjust
);
492 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
502 * Notify DRM's vblank event handler at VSTARTUP
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
509 * It is therefore the correct place to signal vblank, send user flip events,
512 static void dm_dcn_crtc_high_irq(void *interrupt_params
)
514 struct common_irq_params
*irq_params
= interrupt_params
;
515 struct amdgpu_device
*adev
= irq_params
->adev
;
516 struct amdgpu_crtc
*acrtc
;
517 struct dm_crtc_state
*acrtc_state
;
520 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
525 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc
->crtc_id
,
528 amdgpu_dm_vrr_active(acrtc_state
),
529 acrtc_state
->active_planes
);
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
532 drm_crtc_handle_vblank(&acrtc
->base
);
534 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
536 if (acrtc_state
->vrr_params
.supported
&&
537 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
538 mod_freesync_handle_v_update(
539 adev
->dm
.freesync_module
,
541 &acrtc_state
->vrr_params
);
543 dc_stream_adjust_vmin_vmax(
546 &acrtc_state
->vrr_params
.adjust
);
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
559 if (acrtc
->pflip_status
== AMDGPU_FLIP_SUBMITTED
&&
560 acrtc_state
->active_planes
== 0) {
562 drm_crtc_send_vblank_event(&acrtc
->base
, acrtc
->event
);
564 drm_crtc_vblank_put(&acrtc
->base
);
566 acrtc
->pflip_status
= AMDGPU_FLIP_NONE
;
569 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
573 static int dm_set_clockgating_state(void *handle
,
574 enum amd_clockgating_state state
)
579 static int dm_set_powergating_state(void *handle
,
580 enum amd_powergating_state state
)
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle
);
588 /* Allocate memory for FBC compressed data */
589 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
591 struct drm_device
*dev
= connector
->dev
;
592 struct amdgpu_device
*adev
= dev
->dev_private
;
593 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
594 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
595 struct drm_display_mode
*mode
;
596 unsigned long max_size
= 0;
598 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
601 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
604 if (compressor
->bo_ptr
)
608 list_for_each_entry(mode
, &connector
->modes
, head
) {
609 if (max_size
< mode
->htotal
* mode
->vtotal
)
610 max_size
= mode
->htotal
* mode
->vtotal
;
614 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
615 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
616 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
619 DRM_ERROR("DM: Failed to initialize FBC\n");
621 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
629 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
630 int pipe
, bool *enabled
,
631 unsigned char *buf
, int max_bytes
)
633 struct drm_device
*dev
= dev_get_drvdata(kdev
);
634 struct amdgpu_device
*adev
= dev
->dev_private
;
635 struct drm_connector
*connector
;
636 struct drm_connector_list_iter conn_iter
;
637 struct amdgpu_dm_connector
*aconnector
;
642 mutex_lock(&adev
->dm
.audio_lock
);
644 drm_connector_list_iter_begin(dev
, &conn_iter
);
645 drm_for_each_connector_iter(connector
, &conn_iter
) {
646 aconnector
= to_amdgpu_dm_connector(connector
);
647 if (aconnector
->audio_inst
!= port
)
651 ret
= drm_eld_size(connector
->eld
);
652 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
656 drm_connector_list_iter_end(&conn_iter
);
658 mutex_unlock(&adev
->dm
.audio_lock
);
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
666 .get_eld
= amdgpu_dm_audio_component_get_eld
,
669 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
670 struct device
*hda_kdev
, void *data
)
672 struct drm_device
*dev
= dev_get_drvdata(kdev
);
673 struct amdgpu_device
*adev
= dev
->dev_private
;
674 struct drm_audio_component
*acomp
= data
;
676 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
678 adev
->dm
.audio_component
= acomp
;
683 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
684 struct device
*hda_kdev
, void *data
)
686 struct drm_device
*dev
= dev_get_drvdata(kdev
);
687 struct amdgpu_device
*adev
= dev
->dev_private
;
688 struct drm_audio_component
*acomp
= data
;
692 adev
->dm
.audio_component
= NULL
;
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
696 .bind
= amdgpu_dm_audio_component_bind
,
697 .unbind
= amdgpu_dm_audio_component_unbind
,
700 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
707 adev
->mode_info
.audio
.enabled
= true;
709 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
711 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
712 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
713 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
714 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
715 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
716 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
717 adev
->mode_info
.audio
.pin
[i
].connected
= false;
718 adev
->mode_info
.audio
.pin
[i
].id
=
719 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
720 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
723 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
727 adev
->dm
.audio_registered
= true;
732 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
737 if (!adev
->mode_info
.audio
.enabled
)
740 if (adev
->dm
.audio_registered
) {
741 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
742 adev
->dm
.audio_registered
= false;
745 /* TODO: Disable audio? */
747 adev
->mode_info
.audio
.enabled
= false;
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
752 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
754 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
757 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
762 static int dm_dmub_hw_init(struct amdgpu_device
*adev
)
764 const struct dmcub_firmware_header_v1_0
*hdr
;
765 struct dmub_srv
*dmub_srv
= adev
->dm
.dmub_srv
;
766 struct dmub_srv_fb_info
*fb_info
= adev
->dm
.dmub_fb_info
;
767 const struct firmware
*dmub_fw
= adev
->dm
.dmub_fw
;
768 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
769 struct abm
*abm
= adev
->dm
.dc
->res_pool
->abm
;
770 struct dmub_srv_hw_params hw_params
;
771 enum dmub_status status
;
772 const unsigned char *fw_inst_const
, *fw_bss_data
;
773 uint32_t i
, fw_inst_const_size
, fw_bss_data_size
;
777 /* DMUB isn't supported on the ASIC. */
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
791 status
= dmub_srv_has_hw_support(dmub_srv
, &has_hw_support
);
792 if (status
!= DMUB_STATUS_OK
) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status
);
797 if (!has_hw_support
) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
802 hdr
= (const struct dmcub_firmware_header_v1_0
*)dmub_fw
->data
;
804 fw_inst_const
= dmub_fw
->data
+
805 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
808 fw_bss_data
= dmub_fw
->data
+
809 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
810 le32_to_cpu(hdr
->inst_const_bytes
);
812 /* Copy firmware and bios info into FB memory. */
813 fw_inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
814 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
816 fw_bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
823 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
824 memcpy(fb_info
->fb
[DMUB_WINDOW_0_INST_CONST
].cpu_addr
, fw_inst_const
,
828 memcpy(fb_info
->fb
[DMUB_WINDOW_2_BSS_DATA
].cpu_addr
, fw_bss_data
,
831 /* Copy firmware bios info into FB memory. */
832 memcpy(fb_info
->fb
[DMUB_WINDOW_3_VBIOS
].cpu_addr
, adev
->bios
,
835 /* Reset regions that need to be reset. */
836 memset(fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].cpu_addr
, 0,
837 fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].size
);
839 memset(fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].cpu_addr
, 0,
840 fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].size
);
842 memset(fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].cpu_addr
, 0,
843 fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].size
);
845 /* Initialize hardware. */
846 memset(&hw_params
, 0, sizeof(hw_params
));
847 hw_params
.fb_base
= adev
->gmc
.fb_start
;
848 hw_params
.fb_offset
= adev
->gmc
.aper_base
;
850 /* backdoor load firmware and trigger dmub running */
851 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
)
852 hw_params
.load_inst_const
= true;
855 hw_params
.psp_version
= dmcu
->psp_version
;
857 for (i
= 0; i
< fb_info
->num_fb
; ++i
)
858 hw_params
.fb
[i
] = &fb_info
->fb
[i
];
860 status
= dmub_srv_hw_init(dmub_srv
, &hw_params
);
861 if (status
!= DMUB_STATUS_OK
) {
862 DRM_ERROR("Error initializing DMUB HW: %d\n", status
);
866 /* Wait for firmware load to finish. */
867 status
= dmub_srv_wait_for_auto_load(dmub_srv
, 100000);
868 if (status
!= DMUB_STATUS_OK
)
869 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status
);
871 /* Init DMCU and ABM if available. */
873 dmcu
->funcs
->dmcu_init(dmcu
);
874 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
877 adev
->dm
.dc
->ctx
->dmub_srv
= dc_dmub_srv_create(adev
->dm
.dc
, dmub_srv
);
878 if (!adev
->dm
.dc
->ctx
->dmub_srv
) {
879 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
883 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884 adev
->dm
.dmcub_fw_version
);
889 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
891 struct dc_init_data init_data
;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893 struct dc_callback_init init_params
;
897 adev
->dm
.ddev
= adev
->ddev
;
898 adev
->dm
.adev
= adev
;
900 /* Zero all the fields */
901 memset(&init_data
, 0, sizeof(init_data
));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903 memset(&init_params
, 0, sizeof(init_params
));
906 mutex_init(&adev
->dm
.dc_lock
);
907 mutex_init(&adev
->dm
.audio_lock
);
909 if(amdgpu_dm_irq_init(adev
)) {
910 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
914 init_data
.asic_id
.chip_family
= adev
->family
;
916 init_data
.asic_id
.pci_revision_id
= adev
->pdev
->revision
;
917 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
919 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
920 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
921 init_data
.asic_id
.atombios_base_address
=
922 adev
->mode_info
.atom_context
->bios
;
924 init_data
.driver
= adev
;
926 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
928 if (!adev
->dm
.cgs_device
) {
929 DRM_ERROR("amdgpu: failed to create cgs device.\n");
933 init_data
.cgs_device
= adev
->dm
.cgs_device
;
935 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
937 switch (adev
->asic_type
) {
942 init_data
.flags
.gpu_vm_support
= true;
948 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
949 init_data
.flags
.fbc_support
= true;
951 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
952 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
954 if (amdgpu_dc_feature_mask
& DC_DISABLE_FRACTIONAL_PWM_MASK
)
955 init_data
.flags
.disable_fractional_pwm
= true;
957 init_data
.flags
.power_down_display_on_boot
= true;
959 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
961 /* Display Core create. */
962 adev
->dm
.dc
= dc_create(&init_data
);
965 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
967 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
971 r
= dm_dmub_hw_init(adev
);
973 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
977 dc_hardware_init(adev
->dm
.dc
);
979 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
980 if (!adev
->dm
.freesync_module
) {
982 "amdgpu: failed to initialize freesync_module.\n");
984 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985 adev
->dm
.freesync_module
);
987 amdgpu_dm_init_color_mod();
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990 if (adev
->asic_type
>= CHIP_RAVEN
) {
991 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(adev
, &init_params
.cp_psp
, adev
->dm
.dc
);
993 if (!adev
->dm
.hdcp_workqueue
)
994 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
998 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
1001 if (amdgpu_dm_initialize_drm_device(adev
)) {
1003 "amdgpu: failed to initialize sw for display support.\n");
1007 /* Update the actual used number of crtc */
1008 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
1010 /* TODO: Add_display_info? */
1012 /* TODO use dynamic cursor width */
1013 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
1014 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
1016 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
1018 "amdgpu: failed to initialize sw for display support.\n");
1022 DRM_DEBUG_DRIVER("KMS initialized.\n");
1026 amdgpu_dm_fini(adev
);
1031 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
1033 amdgpu_dm_audio_fini(adev
);
1035 amdgpu_dm_destroy_drm_device(&adev
->dm
);
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038 if (adev
->dm
.hdcp_workqueue
) {
1039 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
1040 adev
->dm
.hdcp_workqueue
= NULL
;
1044 dc_deinit_callbacks(adev
->dm
.dc
);
1046 if (adev
->dm
.dc
->ctx
->dmub_srv
) {
1047 dc_dmub_srv_destroy(&adev
->dm
.dc
->ctx
->dmub_srv
);
1048 adev
->dm
.dc
->ctx
->dmub_srv
= NULL
;
1051 if (adev
->dm
.dmub_bo
)
1052 amdgpu_bo_free_kernel(&adev
->dm
.dmub_bo
,
1053 &adev
->dm
.dmub_bo_gpu_addr
,
1054 &adev
->dm
.dmub_bo_cpu_addr
);
1056 /* DC Destroy TODO: Replace destroy DAL */
1058 dc_destroy(&adev
->dm
.dc
);
1060 * TODO: pageflip, vlank interrupt
1062 * amdgpu_dm_irq_fini(adev);
1065 if (adev
->dm
.cgs_device
) {
1066 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
1067 adev
->dm
.cgs_device
= NULL
;
1069 if (adev
->dm
.freesync_module
) {
1070 mod_freesync_destroy(adev
->dm
.freesync_module
);
1071 adev
->dm
.freesync_module
= NULL
;
1074 mutex_destroy(&adev
->dm
.audio_lock
);
1075 mutex_destroy(&adev
->dm
.dc_lock
);
1080 static int load_dmcu_fw(struct amdgpu_device
*adev
)
1082 const char *fw_name_dmcu
= NULL
;
1084 const struct dmcu_firmware_header_v1_0
*hdr
;
1086 switch(adev
->asic_type
) {
1096 case CHIP_POLARIS11
:
1097 case CHIP_POLARIS10
:
1098 case CHIP_POLARIS12
:
1108 fw_name_dmcu
= FIRMWARE_NAVI12_DMCU
;
1111 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
1112 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1113 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
1114 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1119 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1123 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1124 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1128 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
1130 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132 adev
->dm
.fw_dmcu
= NULL
;
1136 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
1141 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
1143 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 release_firmware(adev
->dm
.fw_dmcu
);
1146 adev
->dm
.fw_dmcu
= NULL
;
1150 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
1151 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
1152 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
1153 adev
->firmware
.fw_size
+=
1154 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1156 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
1157 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
1158 adev
->firmware
.fw_size
+=
1159 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1161 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1163 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx
, uint32_t address
)
1170 struct amdgpu_device
*adev
= ctx
;
1172 return dm_read_reg(adev
->dm
.dc
->ctx
, address
);
1175 static void amdgpu_dm_dmub_reg_write(void *ctx
, uint32_t address
,
1178 struct amdgpu_device
*adev
= ctx
;
1180 return dm_write_reg(adev
->dm
.dc
->ctx
, address
, value
);
1183 static int dm_dmub_sw_init(struct amdgpu_device
*adev
)
1185 struct dmub_srv_create_params create_params
;
1186 struct dmub_srv_region_params region_params
;
1187 struct dmub_srv_region_info region_info
;
1188 struct dmub_srv_fb_params fb_params
;
1189 struct dmub_srv_fb_info
*fb_info
;
1190 struct dmub_srv
*dmub_srv
;
1191 const struct dmcub_firmware_header_v1_0
*hdr
;
1192 const char *fw_name_dmub
;
1193 enum dmub_asic dmub_asic
;
1194 enum dmub_status status
;
1197 switch (adev
->asic_type
) {
1199 dmub_asic
= DMUB_ASIC_DCN21
;
1200 fw_name_dmub
= FIRMWARE_RENOIR_DMUB
;
1204 /* ASIC doesn't support DMUB. */
1208 r
= request_firmware_direct(&adev
->dm
.dmub_fw
, fw_name_dmub
, adev
->dev
);
1210 DRM_ERROR("DMUB firmware loading failed: %d\n", r
);
1214 r
= amdgpu_ucode_validate(adev
->dm
.dmub_fw
);
1216 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r
);
1220 hdr
= (const struct dmcub_firmware_header_v1_0
*)adev
->dm
.dmub_fw
->data
;
1222 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
1223 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].ucode_id
=
1224 AMDGPU_UCODE_ID_DMCUB
;
1225 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].fw
=
1227 adev
->firmware
.fw_size
+=
1228 ALIGN(le32_to_cpu(hdr
->inst_const_bytes
), PAGE_SIZE
);
1230 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231 adev
->dm
.dmcub_fw_version
);
1234 adev
->dm
.dmcub_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1236 adev
->dm
.dmub_srv
= kzalloc(sizeof(*adev
->dm
.dmub_srv
), GFP_KERNEL
);
1237 dmub_srv
= adev
->dm
.dmub_srv
;
1240 DRM_ERROR("Failed to allocate DMUB service!\n");
1244 memset(&create_params
, 0, sizeof(create_params
));
1245 create_params
.user_ctx
= adev
;
1246 create_params
.funcs
.reg_read
= amdgpu_dm_dmub_reg_read
;
1247 create_params
.funcs
.reg_write
= amdgpu_dm_dmub_reg_write
;
1248 create_params
.asic
= dmub_asic
;
1250 /* Create the DMUB service. */
1251 status
= dmub_srv_create(dmub_srv
, &create_params
);
1252 if (status
!= DMUB_STATUS_OK
) {
1253 DRM_ERROR("Error creating DMUB service: %d\n", status
);
1257 /* Calculate the size of all the regions for the DMUB service. */
1258 memset(®ion_params
, 0, sizeof(region_params
));
1260 region_params
.inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
1261 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
1262 region_params
.bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
1263 region_params
.vbios_size
= adev
->bios_size
;
1264 region_params
.fw_bss_data
=
1265 adev
->dm
.dmub_fw
->data
+
1266 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1267 le32_to_cpu(hdr
->inst_const_bytes
);
1269 status
= dmub_srv_calc_region_info(dmub_srv
, ®ion_params
,
1272 if (status
!= DMUB_STATUS_OK
) {
1273 DRM_ERROR("Error calculating DMUB region info: %d\n", status
);
1278 * Allocate a framebuffer based on the total size of all the regions.
1279 * TODO: Move this into GART.
1281 r
= amdgpu_bo_create_kernel(adev
, region_info
.fb_size
, PAGE_SIZE
,
1282 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->dm
.dmub_bo
,
1283 &adev
->dm
.dmub_bo_gpu_addr
,
1284 &adev
->dm
.dmub_bo_cpu_addr
);
1288 /* Rebase the regions on the framebuffer address. */
1289 memset(&fb_params
, 0, sizeof(fb_params
));
1290 fb_params
.cpu_addr
= adev
->dm
.dmub_bo_cpu_addr
;
1291 fb_params
.gpu_addr
= adev
->dm
.dmub_bo_gpu_addr
;
1292 fb_params
.region_info
= ®ion_info
;
1294 adev
->dm
.dmub_fb_info
=
1295 kzalloc(sizeof(*adev
->dm
.dmub_fb_info
), GFP_KERNEL
);
1296 fb_info
= adev
->dm
.dmub_fb_info
;
1300 "Failed to allocate framebuffer info for DMUB service!\n");
1304 status
= dmub_srv_calc_fb_info(dmub_srv
, &fb_params
, fb_info
);
1305 if (status
!= DMUB_STATUS_OK
) {
1306 DRM_ERROR("Error calculating DMUB FB info: %d\n", status
);
1313 static int dm_sw_init(void *handle
)
1315 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1318 r
= dm_dmub_sw_init(adev
);
1322 return load_dmcu_fw(adev
);
1325 static int dm_sw_fini(void *handle
)
1327 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1329 kfree(adev
->dm
.dmub_fb_info
);
1330 adev
->dm
.dmub_fb_info
= NULL
;
1332 if (adev
->dm
.dmub_srv
) {
1333 dmub_srv_destroy(adev
->dm
.dmub_srv
);
1334 adev
->dm
.dmub_srv
= NULL
;
1337 if (adev
->dm
.dmub_fw
) {
1338 release_firmware(adev
->dm
.dmub_fw
);
1339 adev
->dm
.dmub_fw
= NULL
;
1342 if(adev
->dm
.fw_dmcu
) {
1343 release_firmware(adev
->dm
.fw_dmcu
);
1344 adev
->dm
.fw_dmcu
= NULL
;
1350 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
1352 struct amdgpu_dm_connector
*aconnector
;
1353 struct drm_connector
*connector
;
1354 struct drm_connector_list_iter iter
;
1357 drm_connector_list_iter_begin(dev
, &iter
);
1358 drm_for_each_connector_iter(connector
, &iter
) {
1359 aconnector
= to_amdgpu_dm_connector(connector
);
1360 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
1361 aconnector
->mst_mgr
.aux
) {
1362 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1364 aconnector
->base
.base
.id
);
1366 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
1368 DRM_ERROR("DM_MST: Failed to start MST\n");
1369 aconnector
->dc_link
->type
=
1370 dc_connection_single
;
1375 drm_connector_list_iter_end(&iter
);
1380 static int dm_late_init(void *handle
)
1382 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1384 struct dmcu_iram_parameters params
;
1385 unsigned int linear_lut
[16];
1387 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
1390 for (i
= 0; i
< 16; i
++)
1391 linear_lut
[i
] = 0xFFFF * i
/ 15;
1394 params
.backlight_ramping_start
= 0xCCCC;
1395 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
1396 params
.backlight_lut_array_size
= 16;
1397 params
.backlight_lut_array
= linear_lut
;
1399 /* Min backlight level after ABM reduction, Don't allow below 1%
1400 * 0xFFFF x 0.01 = 0x28F
1402 params
.min_abm_backlight
= 0x28F;
1404 /* todo will enable for navi10 */
1405 if (adev
->asic_type
<= CHIP_RAVEN
) {
1406 ret
= dmcu_load_iram(dmcu
, params
);
1412 return detect_mst_link_for_all_connectors(adev
->ddev
);
1415 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1417 struct amdgpu_dm_connector
*aconnector
;
1418 struct drm_connector
*connector
;
1419 struct drm_connector_list_iter iter
;
1420 struct drm_dp_mst_topology_mgr
*mgr
;
1422 bool need_hotplug
= false;
1424 drm_connector_list_iter_begin(dev
, &iter
);
1425 drm_for_each_connector_iter(connector
, &iter
) {
1426 aconnector
= to_amdgpu_dm_connector(connector
);
1427 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1428 aconnector
->mst_port
)
1431 mgr
= &aconnector
->mst_mgr
;
1434 drm_dp_mst_topology_mgr_suspend(mgr
);
1436 ret
= drm_dp_mst_topology_mgr_resume(mgr
, true);
1438 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1439 need_hotplug
= true;
1443 drm_connector_list_iter_end(&iter
);
1446 drm_kms_helper_hotplug_event(dev
);
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device
*adev
)
1451 struct smu_context
*smu
= &adev
->smu
;
1454 if (!is_support_sw_smu(adev
))
1457 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458 * on window driver dc implementation.
1459 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460 * should be passed to smu during boot up and resume from s3.
1461 * boot up: dc calculate dcn watermark clock settings within dc_create,
1462 * dcn20_resource_construct
1463 * then call pplib functions below to pass the settings to smu:
1464 * smu_set_watermarks_for_clock_ranges
1465 * smu_set_watermarks_table
1466 * navi10_set_watermarks_table
1467 * smu_write_watermarks_table
1469 * For Renoir, clock settings of dcn watermark are also fixed values.
1470 * dc has implemented different flow for window driver:
1471 * dc_hardware_init / dc_set_power_state
1476 * smu_set_watermarks_for_clock_ranges
1477 * renoir_set_watermarks_table
1478 * smu_write_watermarks_table
1481 * dc_hardware_init -> amdgpu_dm_init
1482 * dc_set_power_state --> dm_resume
1484 * therefore, this function apply to navi10/12/14 but not Renoir
1487 switch(adev
->asic_type
) {
1496 mutex_lock(&smu
->mutex
);
1498 /* pass data to smu controller */
1499 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
1500 !(smu
->watermarks_bitmap
& WATERMARKS_LOADED
)) {
1501 ret
= smu_write_watermarks_table(smu
);
1504 mutex_unlock(&smu
->mutex
);
1505 DRM_ERROR("Failed to update WMTABLE!\n");
1508 smu
->watermarks_bitmap
|= WATERMARKS_LOADED
;
1511 mutex_unlock(&smu
->mutex
);
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1527 * Some notable things that are initialized here:
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1534 * - Debug FS entries, if enabled
1536 static int dm_hw_init(void *handle
)
1538 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev
);
1541 amdgpu_dm_hpd_init(adev
);
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1554 static int dm_hw_fini(void *handle
)
1556 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1558 amdgpu_dm_hpd_fini(adev
);
1560 amdgpu_dm_irq_fini(adev
);
1561 amdgpu_dm_fini(adev
);
1565 static int dm_suspend(void *handle
)
1567 struct amdgpu_device
*adev
= handle
;
1568 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1571 WARN_ON(adev
->dm
.cached_state
);
1572 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1574 s3_handle_mst(adev
->ddev
, true);
1576 amdgpu_dm_irq_suspend(adev
);
1579 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1584 static struct amdgpu_dm_connector
*
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1586 struct drm_crtc
*crtc
)
1589 struct drm_connector_state
*new_con_state
;
1590 struct drm_connector
*connector
;
1591 struct drm_crtc
*crtc_from_state
;
1593 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1594 crtc_from_state
= new_con_state
->crtc
;
1596 if (crtc_from_state
== crtc
)
1597 return to_amdgpu_dm_connector(connector
);
1603 static void emulated_link_detect(struct dc_link
*link
)
1605 struct dc_sink_init_data sink_init_data
= { 0 };
1606 struct display_sink_capability sink_caps
= { 0 };
1607 enum dc_edid_status edid_status
;
1608 struct dc_context
*dc_ctx
= link
->ctx
;
1609 struct dc_sink
*sink
= NULL
;
1610 struct dc_sink
*prev_sink
= NULL
;
1612 link
->type
= dc_connection_none
;
1613 prev_sink
= link
->local_sink
;
1615 if (prev_sink
!= NULL
)
1616 dc_sink_retain(prev_sink
);
1618 switch (link
->connector_signal
) {
1619 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1620 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1621 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1625 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1626 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1627 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1631 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1632 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1633 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1637 case SIGNAL_TYPE_LVDS
: {
1638 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1639 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1643 case SIGNAL_TYPE_EDP
: {
1644 sink_caps
.transaction_type
=
1645 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1646 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1650 case SIGNAL_TYPE_DISPLAY_PORT
: {
1651 sink_caps
.transaction_type
=
1652 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1653 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1658 DC_ERROR("Invalid connector type! signal:%d\n",
1659 link
->connector_signal
);
1663 sink_init_data
.link
= link
;
1664 sink_init_data
.sink_signal
= sink_caps
.signal
;
1666 sink
= dc_sink_create(&sink_init_data
);
1668 DC_ERROR("Failed to create sink!\n");
1672 /* dc_sink_create returns a new reference */
1673 link
->local_sink
= sink
;
1675 edid_status
= dm_helpers_read_local_edid(
1680 if (edid_status
!= EDID_OK
)
1681 DC_ERROR("Failed to read EDID");
1685 static int dm_resume(void *handle
)
1687 struct amdgpu_device
*adev
= handle
;
1688 struct drm_device
*ddev
= adev
->ddev
;
1689 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1690 struct amdgpu_dm_connector
*aconnector
;
1691 struct drm_connector
*connector
;
1692 struct drm_connector_list_iter iter
;
1693 struct drm_crtc
*crtc
;
1694 struct drm_crtc_state
*new_crtc_state
;
1695 struct dm_crtc_state
*dm_new_crtc_state
;
1696 struct drm_plane
*plane
;
1697 struct drm_plane_state
*new_plane_state
;
1698 struct dm_plane_state
*dm_new_plane_state
;
1699 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1700 enum dc_connection_type new_connection_type
= dc_connection_none
;
1703 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704 dc_release_state(dm_state
->context
);
1705 dm_state
->context
= dc_create_state(dm
->dc
);
1706 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1709 /* Before powering on DC we need to re-initialize DMUB. */
1710 r
= dm_dmub_hw_init(adev
);
1712 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1714 /* power on hardware */
1715 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1717 /* program HPD filter */
1721 * early enable HPD Rx IRQ, should be done before set mode as short
1722 * pulse interrupts are used for MST
1724 amdgpu_dm_irq_resume_early(adev
);
1726 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1727 s3_handle_mst(ddev
, false);
1730 drm_connector_list_iter_begin(ddev
, &iter
);
1731 drm_for_each_connector_iter(connector
, &iter
) {
1732 aconnector
= to_amdgpu_dm_connector(connector
);
1735 * this is the case when traversing through already created
1736 * MST connectors, should be skipped
1738 if (aconnector
->mst_port
)
1741 mutex_lock(&aconnector
->hpd_lock
);
1742 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1743 DRM_ERROR("KMS: Failed to detect connector\n");
1745 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1746 emulated_link_detect(aconnector
->dc_link
);
1748 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1750 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1751 aconnector
->fake_enable
= false;
1753 if (aconnector
->dc_sink
)
1754 dc_sink_release(aconnector
->dc_sink
);
1755 aconnector
->dc_sink
= NULL
;
1756 amdgpu_dm_update_connector_after_detect(aconnector
);
1757 mutex_unlock(&aconnector
->hpd_lock
);
1759 drm_connector_list_iter_end(&iter
);
1761 /* Force mode set in atomic commit */
1762 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1763 new_crtc_state
->active_changed
= true;
1766 * atomic_check is expected to create the dc states. We need to release
1767 * them here, since they were duplicated as part of the suspend
1770 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1771 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1772 if (dm_new_crtc_state
->stream
) {
1773 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1774 dc_stream_release(dm_new_crtc_state
->stream
);
1775 dm_new_crtc_state
->stream
= NULL
;
1779 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1780 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1781 if (dm_new_plane_state
->dc_state
) {
1782 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1783 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1784 dm_new_plane_state
->dc_state
= NULL
;
1788 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1790 dm
->cached_state
= NULL
;
1792 amdgpu_dm_irq_resume_late(adev
);
1794 amdgpu_dm_smu_write_watermarks_table(adev
);
1802 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804 * the base driver's device list to be initialized and torn down accordingly.
1806 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1809 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1811 .early_init
= dm_early_init
,
1812 .late_init
= dm_late_init
,
1813 .sw_init
= dm_sw_init
,
1814 .sw_fini
= dm_sw_fini
,
1815 .hw_init
= dm_hw_init
,
1816 .hw_fini
= dm_hw_fini
,
1817 .suspend
= dm_suspend
,
1818 .resume
= dm_resume
,
1819 .is_idle
= dm_is_idle
,
1820 .wait_for_idle
= dm_wait_for_idle
,
1821 .check_soft_reset
= dm_check_soft_reset
,
1822 .soft_reset
= dm_soft_reset
,
1823 .set_clockgating_state
= dm_set_clockgating_state
,
1824 .set_powergating_state
= dm_set_powergating_state
,
1827 const struct amdgpu_ip_block_version dm_ip_block
=
1829 .type
= AMD_IP_BLOCK_TYPE_DCE
,
1833 .funcs
= &amdgpu_dm_funcs
,
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
1844 .fb_create
= amdgpu_display_user_framebuffer_create
,
1845 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1846 .atomic_check
= amdgpu_dm_atomic_check
,
1847 .atomic_commit
= amdgpu_dm_atomic_commit
,
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
1851 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector
*aconnector
)
1856 u32 max_cll
, min_cll
, max
, min
, q
, r
;
1857 struct amdgpu_dm_backlight_caps
*caps
;
1858 struct amdgpu_display_manager
*dm
;
1859 struct drm_connector
*conn_base
;
1860 struct amdgpu_device
*adev
;
1861 static const u8 pre_computed_values
[] = {
1862 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1865 if (!aconnector
|| !aconnector
->dc_link
)
1868 conn_base
= &aconnector
->base
;
1869 adev
= conn_base
->dev
->dev_private
;
1871 caps
= &dm
->backlight_caps
;
1872 caps
->ext_caps
= &aconnector
->dc_link
->dpcd_sink_ext_caps
;
1873 caps
->aux_support
= false;
1874 max_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.max_cll
;
1875 min_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.min_cll
;
1877 if (caps
->ext_caps
->bits
.oled
== 1 ||
1878 caps
->ext_caps
->bits
.sdr_aux_backlight_control
== 1 ||
1879 caps
->ext_caps
->bits
.hdr_aux_backlight_control
== 1)
1880 caps
->aux_support
= true;
1882 /* From the specification (CTA-861-G), for calculating the maximum
1883 * luminance we need to use:
1884 * Luminance = 50*2**(CV/32)
1885 * Where CV is a one-byte value.
1886 * For calculating this expression we may need float point precision;
1887 * to avoid this complexity level, we take advantage that CV is divided
1888 * by a constant. From the Euclids division algorithm, we know that CV
1889 * can be written as: CV = 32*q + r. Next, we replace CV in the
1890 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891 * need to pre-compute the value of r/32. For pre-computing the values
1892 * We just used the following Ruby line:
1893 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894 * The results of the above expressions can be verified at
1895 * pre_computed_values.
1899 max
= (1 << q
) * pre_computed_values
[r
];
1901 // min luminance: maxLum * (CV/255)^2 / 100
1902 q
= DIV_ROUND_CLOSEST(min_cll
, 255);
1903 min
= max
* DIV_ROUND_CLOSEST((q
* q
), 100);
1905 caps
->aux_max_input_signal
= max
;
1906 caps
->aux_min_input_signal
= min
;
1909 void amdgpu_dm_update_connector_after_detect(
1910 struct amdgpu_dm_connector
*aconnector
)
1912 struct drm_connector
*connector
= &aconnector
->base
;
1913 struct drm_device
*dev
= connector
->dev
;
1914 struct dc_sink
*sink
;
1916 /* MST handled by drm_mst framework */
1917 if (aconnector
->mst_mgr
.mst_state
== true)
1921 sink
= aconnector
->dc_link
->local_sink
;
1923 dc_sink_retain(sink
);
1926 * Edid mgmt connector gets first update only in mode_valid hook and then
1927 * the connector sink is set to either fake or physical sink depends on link status.
1928 * Skip if already done during boot.
1930 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1931 && aconnector
->dc_em_sink
) {
1934 * For S3 resume with headless use eml_sink to fake stream
1935 * because on resume connector->sink is set to NULL
1937 mutex_lock(&dev
->mode_config
.mutex
);
1940 if (aconnector
->dc_sink
) {
1941 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1943 * retain and release below are used to
1944 * bump up refcount for sink because the link doesn't point
1945 * to it anymore after disconnect, so on next crtc to connector
1946 * reshuffle by UMD we will get into unwanted dc_sink release
1948 dc_sink_release(aconnector
->dc_sink
);
1950 aconnector
->dc_sink
= sink
;
1951 dc_sink_retain(aconnector
->dc_sink
);
1952 amdgpu_dm_update_freesync_caps(connector
,
1955 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1956 if (!aconnector
->dc_sink
) {
1957 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1958 dc_sink_retain(aconnector
->dc_sink
);
1962 mutex_unlock(&dev
->mode_config
.mutex
);
1965 dc_sink_release(sink
);
1970 * TODO: temporary guard to look for proper fix
1971 * if this sink is MST sink, we should not do anything
1973 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
1974 dc_sink_release(sink
);
1978 if (aconnector
->dc_sink
== sink
) {
1980 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1983 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984 aconnector
->connector_id
);
1986 dc_sink_release(sink
);
1990 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1993 mutex_lock(&dev
->mode_config
.mutex
);
1996 * 1. Update status of the drm connector
1997 * 2. Send an event and let userspace tell us what to do
2001 * TODO: check if we still need the S3 mode update workaround.
2002 * If yes, put it here.
2004 if (aconnector
->dc_sink
)
2005 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2007 aconnector
->dc_sink
= sink
;
2008 dc_sink_retain(aconnector
->dc_sink
);
2009 if (sink
->dc_edid
.length
== 0) {
2010 aconnector
->edid
= NULL
;
2011 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
2014 (struct edid
*) sink
->dc_edid
.raw_edid
;
2017 drm_connector_update_edid_property(connector
,
2019 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
2022 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
2023 update_connector_ext_caps(aconnector
);
2025 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
2026 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2027 drm_connector_update_edid_property(connector
, NULL
);
2028 aconnector
->num_modes
= 0;
2029 dc_sink_release(aconnector
->dc_sink
);
2030 aconnector
->dc_sink
= NULL
;
2031 aconnector
->edid
= NULL
;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
2035 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2039 mutex_unlock(&dev
->mode_config
.mutex
);
2042 dc_sink_release(sink
);
2045 static void handle_hpd_irq(void *param
)
2047 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2048 struct drm_connector
*connector
= &aconnector
->base
;
2049 struct drm_device
*dev
= connector
->dev
;
2050 enum dc_connection_type new_connection_type
= dc_connection_none
;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052 struct amdgpu_device
*adev
= dev
->dev_private
;
2056 * In case of failure or MST no need to update connector status or notify the OS
2057 * since (for MST case) MST does this in its own context.
2059 mutex_lock(&aconnector
->hpd_lock
);
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062 if (adev
->dm
.hdcp_workqueue
)
2063 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
2065 if (aconnector
->fake_enable
)
2066 aconnector
->fake_enable
= false;
2068 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
2069 DRM_ERROR("KMS: Failed to detect connector\n");
2071 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2072 emulated_link_detect(aconnector
->dc_link
);
2075 drm_modeset_lock_all(dev
);
2076 dm_restore_drm_connector_state(dev
, connector
);
2077 drm_modeset_unlock_all(dev
);
2079 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2080 drm_kms_helper_hotplug_event(dev
);
2082 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
2083 amdgpu_dm_update_connector_after_detect(aconnector
);
2086 drm_modeset_lock_all(dev
);
2087 dm_restore_drm_connector_state(dev
, connector
);
2088 drm_modeset_unlock_all(dev
);
2090 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2091 drm_kms_helper_hotplug_event(dev
);
2093 mutex_unlock(&aconnector
->hpd_lock
);
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
2099 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
2101 bool new_irq_handled
= false;
2103 int dpcd_bytes_to_read
;
2105 const int max_process_count
= 30;
2106 int process_count
= 0;
2108 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
2110 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
2111 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
2112 /* DPCD 0x200 - 0x201 for downstream IRQ */
2113 dpcd_addr
= DP_SINK_COUNT
;
2115 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
2116 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117 dpcd_addr
= DP_SINK_COUNT_ESI
;
2120 dret
= drm_dp_dpcd_read(
2121 &aconnector
->dm_dp_aux
.aux
,
2124 dpcd_bytes_to_read
);
2126 while (dret
== dpcd_bytes_to_read
&&
2127 process_count
< max_process_count
) {
2133 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
2134 /* handle HPD short pulse irq */
2135 if (aconnector
->mst_mgr
.mst_state
)
2137 &aconnector
->mst_mgr
,
2141 if (new_irq_handled
) {
2142 /* ACK at DPCD to notify down stream */
2143 const int ack_dpcd_bytes_to_write
=
2144 dpcd_bytes_to_read
- 1;
2146 for (retry
= 0; retry
< 3; retry
++) {
2149 wret
= drm_dp_dpcd_write(
2150 &aconnector
->dm_dp_aux
.aux
,
2153 ack_dpcd_bytes_to_write
);
2154 if (wret
== ack_dpcd_bytes_to_write
)
2158 /* check if there is new irq to be handled */
2159 dret
= drm_dp_dpcd_read(
2160 &aconnector
->dm_dp_aux
.aux
,
2163 dpcd_bytes_to_read
);
2165 new_irq_handled
= false;
2171 if (process_count
== max_process_count
)
2172 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2175 static void handle_hpd_rx_irq(void *param
)
2177 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2178 struct drm_connector
*connector
= &aconnector
->base
;
2179 struct drm_device
*dev
= connector
->dev
;
2180 struct dc_link
*dc_link
= aconnector
->dc_link
;
2181 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
2182 enum dc_connection_type new_connection_type
= dc_connection_none
;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184 union hpd_irq_data hpd_irq_data
;
2185 struct amdgpu_device
*adev
= dev
->dev_private
;
2187 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
2191 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192 * conflict, after implement i2c helper, this mutex should be
2195 if (dc_link
->type
!= dc_connection_mst_branch
)
2196 mutex_lock(&aconnector
->hpd_lock
);
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
2202 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
2204 !is_mst_root_connector
) {
2205 /* Downstream Port status changed. */
2206 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
2207 DRM_ERROR("KMS: Failed to detect connector\n");
2209 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2210 emulated_link_detect(dc_link
);
2212 if (aconnector
->fake_enable
)
2213 aconnector
->fake_enable
= false;
2215 amdgpu_dm_update_connector_after_detect(aconnector
);
2218 drm_modeset_lock_all(dev
);
2219 dm_restore_drm_connector_state(dev
, connector
);
2220 drm_modeset_unlock_all(dev
);
2222 drm_kms_helper_hotplug_event(dev
);
2223 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
2225 if (aconnector
->fake_enable
)
2226 aconnector
->fake_enable
= false;
2228 amdgpu_dm_update_connector_after_detect(aconnector
);
2231 drm_modeset_lock_all(dev
);
2232 dm_restore_drm_connector_state(dev
, connector
);
2233 drm_modeset_unlock_all(dev
);
2235 drm_kms_helper_hotplug_event(dev
);
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
) {
2240 if (adev
->dm
.hdcp_workqueue
)
2241 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
2244 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
2245 (dc_link
->type
== dc_connection_mst_branch
))
2246 dm_handle_hpd_rx_irq(aconnector
);
2248 if (dc_link
->type
!= dc_connection_mst_branch
) {
2249 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
2250 mutex_unlock(&aconnector
->hpd_lock
);
2254 static void register_hpd_handlers(struct amdgpu_device
*adev
)
2256 struct drm_device
*dev
= adev
->ddev
;
2257 struct drm_connector
*connector
;
2258 struct amdgpu_dm_connector
*aconnector
;
2259 const struct dc_link
*dc_link
;
2260 struct dc_interrupt_params int_params
= {0};
2262 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2263 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2265 list_for_each_entry(connector
,
2266 &dev
->mode_config
.connector_list
, head
) {
2268 aconnector
= to_amdgpu_dm_connector(connector
);
2269 dc_link
= aconnector
->dc_link
;
2271 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
2272 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2273 int_params
.irq_source
= dc_link
->irq_source_hpd
;
2275 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2277 (void *) aconnector
);
2280 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
2282 /* Also register for DP short pulse (hpd_rx). */
2283 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2284 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
2286 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2288 (void *) aconnector
);
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
2296 struct dc
*dc
= adev
->dm
.dc
;
2297 struct common_irq_params
*c_irq_params
;
2298 struct dc_interrupt_params int_params
= {0};
2301 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2303 if (adev
->asic_type
>= CHIP_VEGA10
)
2304 client_id
= SOC15_IH_CLIENTID_DCE
;
2306 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2307 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2310 * Actions of amdgpu_irq_add_id():
2311 * 1. Register a set() function with base driver.
2312 * Base driver will call set() function to enable/disable an
2313 * interrupt in DC hardware.
2314 * 2. Register amdgpu_dm_irq_handler().
2315 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316 * coming from DC hardware.
2317 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318 * for acknowledging and handling. */
2320 /* Use VBLANK interrupt */
2321 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
2322 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
2324 DRM_ERROR("Failed to add crtc irq id!\n");
2328 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2329 int_params
.irq_source
=
2330 dc_interrupt_to_irq_source(dc
, i
, 0);
2332 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2334 c_irq_params
->adev
= adev
;
2335 c_irq_params
->irq_src
= int_params
.irq_source
;
2337 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2338 dm_crtc_high_irq
, c_irq_params
);
2341 /* Use VUPDATE interrupt */
2342 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
2343 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
2345 DRM_ERROR("Failed to add vupdate irq id!\n");
2349 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2350 int_params
.irq_source
=
2351 dc_interrupt_to_irq_source(dc
, i
, 0);
2353 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2355 c_irq_params
->adev
= adev
;
2356 c_irq_params
->irq_src
= int_params
.irq_source
;
2358 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2359 dm_vupdate_high_irq
, c_irq_params
);
2362 /* Use GRPH_PFLIP interrupt */
2363 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2364 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2365 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2367 DRM_ERROR("Failed to add page flip irq id!\n");
2371 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2372 int_params
.irq_source
=
2373 dc_interrupt_to_irq_source(dc
, i
, 0);
2375 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2377 c_irq_params
->adev
= adev
;
2378 c_irq_params
->irq_src
= int_params
.irq_source
;
2380 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2381 dm_pflip_high_irq
, c_irq_params
);
2386 r
= amdgpu_irq_add_id(adev
, client_id
,
2387 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2389 DRM_ERROR("Failed to add hpd irq id!\n");
2393 register_hpd_handlers(adev
);
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
2402 struct dc
*dc
= adev
->dm
.dc
;
2403 struct common_irq_params
*c_irq_params
;
2404 struct dc_interrupt_params int_params
= {0};
2408 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2409 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2412 * Actions of amdgpu_irq_add_id():
2413 * 1. Register a set() function with base driver.
2414 * Base driver will call set() function to enable/disable an
2415 * interrupt in DC hardware.
2416 * 2. Register amdgpu_dm_irq_handler().
2417 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418 * coming from DC hardware.
2419 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420 * for acknowledging and handling.
2423 /* Use VSTARTUP interrupt */
2424 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
2425 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
2427 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
2430 DRM_ERROR("Failed to add crtc irq id!\n");
2434 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2435 int_params
.irq_source
=
2436 dc_interrupt_to_irq_source(dc
, i
, 0);
2438 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2440 c_irq_params
->adev
= adev
;
2441 c_irq_params
->irq_src
= int_params
.irq_source
;
2443 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2444 dm_dcn_crtc_high_irq
, c_irq_params
);
2447 /* Use GRPH_PFLIP interrupt */
2448 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
2449 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2451 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
2453 DRM_ERROR("Failed to add page flip irq id!\n");
2457 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2458 int_params
.irq_source
=
2459 dc_interrupt_to_irq_source(dc
, i
, 0);
2461 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2463 c_irq_params
->adev
= adev
;
2464 c_irq_params
->irq_src
= int_params
.irq_source
;
2466 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2467 dm_pflip_high_irq
, c_irq_params
);
2472 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
2475 DRM_ERROR("Failed to add hpd irq id!\n");
2479 register_hpd_handlers(adev
);
2486 * Acquires the lock for the atomic state object and returns
2487 * the new atomic state.
2489 * This should only be called during atomic check.
2491 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
2492 struct dm_atomic_state
**dm_state
)
2494 struct drm_device
*dev
= state
->dev
;
2495 struct amdgpu_device
*adev
= dev
->dev_private
;
2496 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2497 struct drm_private_state
*priv_state
;
2502 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
2503 if (IS_ERR(priv_state
))
2504 return PTR_ERR(priv_state
);
2506 *dm_state
= to_dm_atomic_state(priv_state
);
2511 struct dm_atomic_state
*
2512 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2514 struct drm_device
*dev
= state
->dev
;
2515 struct amdgpu_device
*adev
= dev
->dev_private
;
2516 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2517 struct drm_private_obj
*obj
;
2518 struct drm_private_state
*new_obj_state
;
2521 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2522 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2523 return to_dm_atomic_state(new_obj_state
);
2529 struct dm_atomic_state
*
2530 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2532 struct drm_device
*dev
= state
->dev
;
2533 struct amdgpu_device
*adev
= dev
->dev_private
;
2534 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2535 struct drm_private_obj
*obj
;
2536 struct drm_private_state
*old_obj_state
;
2539 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2540 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2541 return to_dm_atomic_state(old_obj_state
);
2547 static struct drm_private_state
*
2548 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2550 struct dm_atomic_state
*old_state
, *new_state
;
2552 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2556 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2558 old_state
= to_dm_atomic_state(obj
->state
);
2560 if (old_state
&& old_state
->context
)
2561 new_state
->context
= dc_copy_state(old_state
->context
);
2563 if (!new_state
->context
) {
2568 return &new_state
->base
;
2571 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2572 struct drm_private_state
*state
)
2574 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2576 if (dm_state
&& dm_state
->context
)
2577 dc_release_state(dm_state
->context
);
2582 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2583 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2584 .atomic_destroy_state
= dm_atomic_destroy_state
,
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2589 struct dm_atomic_state
*state
;
2592 adev
->mode_info
.mode_config_initialized
= true;
2594 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2595 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2597 adev
->ddev
->mode_config
.max_width
= 16384;
2598 adev
->ddev
->mode_config
.max_height
= 16384;
2600 adev
->ddev
->mode_config
.preferred_depth
= 24;
2601 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2602 /* indicates support for immediate flip */
2603 adev
->ddev
->mode_config
.async_page_flip
= true;
2605 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2607 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2611 state
->context
= dc_create_state(adev
->dm
.dc
);
2612 if (!state
->context
) {
2617 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2619 drm_atomic_private_obj_init(adev
->ddev
,
2620 &adev
->dm
.atomic_obj
,
2622 &dm_atomic_state_funcs
);
2624 r
= amdgpu_display_modeset_create_props(adev
);
2628 r
= amdgpu_dm_audio_init(adev
);
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2644 #if defined(CONFIG_ACPI)
2645 struct amdgpu_dm_backlight_caps caps
;
2647 if (dm
->backlight_caps
.caps_valid
)
2650 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2651 if (caps
.caps_valid
) {
2652 dm
->backlight_caps
.caps_valid
= true;
2653 if (caps
.aux_support
)
2655 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2656 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2658 dm
->backlight_caps
.min_input_signal
=
2659 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2660 dm
->backlight_caps
.max_input_signal
=
2661 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2664 if (dm
->backlight_caps
.aux_support
)
2667 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2668 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2672 static int set_backlight_via_aux(struct dc_link
*link
, uint32_t brightness
)
2679 rc
= dc_link_set_backlight_level_nits(link
, true, brightness
,
2680 AUX_BL_DEFAULT_TRANSITION_TIME_MS
);
2685 static u32
convert_brightness(const struct amdgpu_dm_backlight_caps
*caps
,
2686 const uint32_t user_brightness
)
2688 u32 min
, max
, conversion_pace
;
2689 u32 brightness
= user_brightness
;
2694 if (!caps
->aux_support
) {
2695 max
= caps
->max_input_signal
;
2696 min
= caps
->min_input_signal
;
2698 * The brightness input is in the range 0-255
2699 * It needs to be rescaled to be between the
2700 * requested min and max input signal
2701 * It also needs to be scaled up by 0x101 to
2702 * match the DC interface which has a range of
2705 conversion_pace
= 0x101;
2710 / AMDGPU_MAX_BL_LEVEL
2711 + min
* conversion_pace
;
2714 * We are doing a linear interpolation here, which is OK but
2715 * does not provide the optimal result. We probably want
2716 * something close to the Perceptual Quantizer (PQ) curve.
2718 max
= caps
->aux_max_input_signal
;
2719 min
= caps
->aux_min_input_signal
;
2721 brightness
= (AMDGPU_MAX_BL_LEVEL
- user_brightness
) * min
2722 + user_brightness
* max
;
2723 // Multiple the value by 1000 since we use millinits
2725 brightness
= DIV_ROUND_CLOSEST(brightness
, AMDGPU_MAX_BL_LEVEL
);
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
2734 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2735 struct amdgpu_dm_backlight_caps caps
;
2736 struct dc_link
*link
= NULL
;
2740 amdgpu_dm_update_backlight_caps(dm
);
2741 caps
= dm
->backlight_caps
;
2743 link
= (struct dc_link
*)dm
->backlight_link
;
2745 brightness
= convert_brightness(&caps
, bd
->props
.brightness
);
2746 // Change brightness based on AUX property
2747 if (caps
.aux_support
)
2748 return set_backlight_via_aux(link
, brightness
);
2750 rc
= dc_link_set_backlight_level(dm
->backlight_link
, brightness
, 0);
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
2757 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2758 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
2760 if (ret
== DC_ERROR_UNEXPECTED
)
2761 return bd
->props
.brightness
;
2765 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
2766 .options
= BL_CORE_SUSPENDRESUME
,
2767 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
2768 .update_status
= amdgpu_dm_backlight_update_status
,
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
2775 struct backlight_properties props
= { 0 };
2777 amdgpu_dm_update_backlight_caps(dm
);
2779 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
2780 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
2781 props
.type
= BACKLIGHT_RAW
;
2783 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
2784 dm
->adev
->ddev
->primary
->index
);
2786 dm
->backlight_dev
= backlight_device_register(bl_name
,
2787 dm
->adev
->ddev
->dev
,
2789 &amdgpu_dm_backlight_ops
,
2792 if (IS_ERR(dm
->backlight_dev
))
2793 DRM_ERROR("DM: Backlight registration failed!\n");
2795 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
2800 static int initialize_plane(struct amdgpu_display_manager
*dm
,
2801 struct amdgpu_mode_info
*mode_info
, int plane_id
,
2802 enum drm_plane_type plane_type
,
2803 const struct dc_plane_cap
*plane_cap
)
2805 struct drm_plane
*plane
;
2806 unsigned long possible_crtcs
;
2809 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
2811 DRM_ERROR("KMS: Failed to allocate plane\n");
2814 plane
->type
= plane_type
;
2817 * HACK: IGT tests expect that the primary plane for a CRTC
2818 * can only have one possible CRTC. Only expose support for
2819 * any CRTC if they're not going to be used as a primary plane
2820 * for a CRTC - like overlay or underlay planes.
2822 possible_crtcs
= 1 << plane_id
;
2823 if (plane_id
>= dm
->dc
->caps
.max_streams
)
2824 possible_crtcs
= 0xff;
2826 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
2829 DRM_ERROR("KMS: Failed to initialize plane\n");
2835 mode_info
->planes
[plane_id
] = plane
;
2841 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
2842 struct dc_link
*link
)
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2847 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2848 link
->type
!= dc_connection_none
) {
2850 * Event if registration failed, we should continue with
2851 * DM initialization because not having a backlight control
2852 * is better then a black screen.
2854 amdgpu_dm_register_backlight_device(dm
);
2856 if (dm
->backlight_dev
)
2857 dm
->backlight_link
= link
;
2864 * In this architecture, the association
2865 * connector -> encoder -> crtc
2866 * id not really requried. The crtc and connector will hold the
2867 * display_index as an abstraction to use with DAL component
2869 * Returns 0 on success
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
2873 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2875 struct amdgpu_dm_connector
*aconnector
= NULL
;
2876 struct amdgpu_encoder
*aencoder
= NULL
;
2877 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2879 int32_t primary_planes
;
2880 enum dc_connection_type new_connection_type
= dc_connection_none
;
2881 const struct dc_plane_cap
*plane
;
2883 link_cnt
= dm
->dc
->caps
.max_links
;
2884 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
2885 DRM_ERROR("DM: Failed to initialize mode config\n");
2889 /* There is one primary plane per CRTC */
2890 primary_planes
= dm
->dc
->caps
.max_streams
;
2891 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
2894 * Initialize primary planes, implicit planes for legacy IOCTLS.
2895 * Order is reversed to match iteration order in atomic check.
2897 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
2898 plane
= &dm
->dc
->caps
.planes
[i
];
2900 if (initialize_plane(dm
, mode_info
, i
,
2901 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
2902 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908 * Initialize overlay planes, index starting after primary planes.
2909 * These planes have a higher DRM index than the primary planes since
2910 * they should be considered as having a higher z-order.
2911 * Order is reversed to match iteration order in atomic check.
2913 * Only support DCN for now, and only expose one so we don't encourage
2914 * userspace to use up all the pipes.
2916 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
2917 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
2919 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
2922 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
2925 if (!plane
->pixel_format_support
.argb8888
)
2928 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
2929 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
2930 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2934 /* Only create one overlay plane. */
2938 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
2939 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
2940 DRM_ERROR("KMS: Failed to initialize crtc\n");
2944 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
2946 /* loops over all connectors on the board */
2947 for (i
= 0; i
< link_cnt
; i
++) {
2948 struct dc_link
*link
= NULL
;
2950 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
2952 "KMS: Cannot support more than %d display indexes\n",
2953 AMDGPU_DM_MAX_DISPLAY_INDEX
);
2957 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
2961 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
2965 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
2966 DRM_ERROR("KMS: Failed to initialize encoder\n");
2970 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
2971 DRM_ERROR("KMS: Failed to initialize connector\n");
2975 link
= dc_get_link_at_index(dm
->dc
, i
);
2977 if (!dc_link_detect_sink(link
, &new_connection_type
))
2978 DRM_ERROR("KMS: Failed to detect connector\n");
2980 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2981 emulated_link_detect(link
);
2982 amdgpu_dm_update_connector_after_detect(aconnector
);
2984 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
2985 amdgpu_dm_update_connector_after_detect(aconnector
);
2986 register_backlight_device(dm
, link
);
2987 if (amdgpu_dc_feature_mask
& DC_PSR_MASK
)
2988 amdgpu_dm_set_psr_caps(link
);
2994 /* Software is initialized. Now we can register interrupt handlers. */
2995 switch (adev
->asic_type
) {
3005 case CHIP_POLARIS11
:
3006 case CHIP_POLARIS10
:
3007 case CHIP_POLARIS12
:
3012 if (dce110_register_irq_handlers(dm
->adev
)) {
3013 DRM_ERROR("DM: Failed to initialize IRQ\n");
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023 if (dcn10_register_irq_handlers(dm
->adev
)) {
3024 DRM_ERROR("DM: Failed to initialize IRQ\n");
3030 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3034 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
3035 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
3037 /* No userspace support. */
3038 dm
->dc
->debug
.disable_tri_buf
= true;
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
3050 drm_mode_config_cleanup(dm
->ddev
);
3051 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
3055 /******************************************************************************
3056 * amdgpu_display_funcs functions
3057 *****************************************************************************/
3060 * dm_bandwidth_update - program display watermarks
3062 * @adev: amdgpu_device pointer
3064 * Calculate and program the display watermarks and line buffer allocation.
3066 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
3068 /* TODO: implement later */
3071 static const struct amdgpu_display_funcs dm_display_funcs
= {
3072 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
3073 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
3074 .backlight_set_level
= NULL
, /* never called for DC */
3075 .backlight_get_level
= NULL
, /* never called for DC */
3076 .hpd_sense
= NULL
,/* called unconditionally */
3077 .hpd_set_polarity
= NULL
, /* called unconditionally */
3078 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
3079 .page_flip_get_scanoutpos
=
3080 dm_crtc_get_scanoutpos
,/* called unconditionally */
3081 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
3082 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3087 static ssize_t
s3_debug_store(struct device
*device
,
3088 struct device_attribute
*attr
,
3094 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
3095 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
3097 ret
= kstrtoint(buf
, 0, &s3_state
);
3102 drm_kms_helper_hotplug_event(adev
->ddev
);
3107 return ret
== 0 ? count
: 0;
3110 DEVICE_ATTR_WO(s3_debug
);
3114 static int dm_early_init(void *handle
)
3116 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3118 switch (adev
->asic_type
) {
3121 adev
->mode_info
.num_crtc
= 6;
3122 adev
->mode_info
.num_hpd
= 6;
3123 adev
->mode_info
.num_dig
= 6;
3126 adev
->mode_info
.num_crtc
= 4;
3127 adev
->mode_info
.num_hpd
= 6;
3128 adev
->mode_info
.num_dig
= 7;
3132 adev
->mode_info
.num_crtc
= 2;
3133 adev
->mode_info
.num_hpd
= 6;
3134 adev
->mode_info
.num_dig
= 6;
3138 adev
->mode_info
.num_crtc
= 6;
3139 adev
->mode_info
.num_hpd
= 6;
3140 adev
->mode_info
.num_dig
= 7;
3143 adev
->mode_info
.num_crtc
= 3;
3144 adev
->mode_info
.num_hpd
= 6;
3145 adev
->mode_info
.num_dig
= 9;
3148 adev
->mode_info
.num_crtc
= 2;
3149 adev
->mode_info
.num_hpd
= 6;
3150 adev
->mode_info
.num_dig
= 9;
3152 case CHIP_POLARIS11
:
3153 case CHIP_POLARIS12
:
3154 adev
->mode_info
.num_crtc
= 5;
3155 adev
->mode_info
.num_hpd
= 5;
3156 adev
->mode_info
.num_dig
= 5;
3158 case CHIP_POLARIS10
:
3160 adev
->mode_info
.num_crtc
= 6;
3161 adev
->mode_info
.num_hpd
= 6;
3162 adev
->mode_info
.num_dig
= 6;
3167 adev
->mode_info
.num_crtc
= 6;
3168 adev
->mode_info
.num_hpd
= 6;
3169 adev
->mode_info
.num_dig
= 6;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3173 adev
->mode_info
.num_crtc
= 4;
3174 adev
->mode_info
.num_hpd
= 4;
3175 adev
->mode_info
.num_dig
= 4;
3180 adev
->mode_info
.num_crtc
= 6;
3181 adev
->mode_info
.num_hpd
= 6;
3182 adev
->mode_info
.num_dig
= 6;
3185 adev
->mode_info
.num_crtc
= 5;
3186 adev
->mode_info
.num_hpd
= 5;
3187 adev
->mode_info
.num_dig
= 5;
3190 adev
->mode_info
.num_crtc
= 4;
3191 adev
->mode_info
.num_hpd
= 4;
3192 adev
->mode_info
.num_dig
= 4;
3195 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3199 amdgpu_dm_set_irq_funcs(adev
);
3201 if (adev
->mode_info
.funcs
== NULL
)
3202 adev
->mode_info
.funcs
= &dm_display_funcs
;
3205 * Note: Do NOT change adev->audio_endpt_rreg and
3206 * adev->audio_endpt_wreg because they are initialised in
3207 * amdgpu_device_init()
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3212 &dev_attr_s3_debug
);
3218 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
3219 struct dc_stream_state
*new_stream
,
3220 struct dc_stream_state
*old_stream
)
3222 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3225 if (!crtc_state
->enable
)
3228 return crtc_state
->active
;
3231 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
3233 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3236 return !crtc_state
->enable
|| !crtc_state
->active
;
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
3241 drm_encoder_cleanup(encoder
);
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
3246 .destroy
= amdgpu_dm_encoder_destroy
,
3250 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
3251 struct dc_scaling_info
*scaling_info
)
3253 int scale_w
, scale_h
;
3255 memset(scaling_info
, 0, sizeof(*scaling_info
));
3257 /* Source is fixed 16.16 but we ignore mantissa for now... */
3258 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
3259 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
3261 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
3262 if (scaling_info
->src_rect
.width
== 0)
3265 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
3266 if (scaling_info
->src_rect
.height
== 0)
3269 scaling_info
->dst_rect
.x
= state
->crtc_x
;
3270 scaling_info
->dst_rect
.y
= state
->crtc_y
;
3272 if (state
->crtc_w
== 0)
3275 scaling_info
->dst_rect
.width
= state
->crtc_w
;
3277 if (state
->crtc_h
== 0)
3280 scaling_info
->dst_rect
.height
= state
->crtc_h
;
3282 /* DRM doesn't specify clipping on destination output. */
3283 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
3285 /* TODO: Validate scaling per-format with DC plane caps */
3286 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
3287 scaling_info
->src_rect
.width
;
3289 if (scale_w
< 250 || scale_w
> 16000)
3292 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
3293 scaling_info
->src_rect
.height
;
3295 if (scale_h
< 250 || scale_h
> 16000)
3299 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300 * assume reasonable defaults based on the format.
3306 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
3307 uint64_t *tiling_flags
)
3309 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
3310 int r
= amdgpu_bo_reserve(rbo
, false);
3313 /* Don't show error message when returning -ERESTARTSYS */
3314 if (r
!= -ERESTARTSYS
)
3315 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
3320 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
3322 amdgpu_bo_unreserve(rbo
);
3327 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
3329 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
3331 return offset
? (address
+ offset
* 256) : 0;
3335 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
3336 const struct amdgpu_framebuffer
*afb
,
3337 const enum surface_pixel_format format
,
3338 const enum dc_rotation_angle rotation
,
3339 const struct plane_size
*plane_size
,
3340 const union dc_tiling_info
*tiling_info
,
3341 const uint64_t info
,
3342 struct dc_plane_dcc_param
*dcc
,
3343 struct dc_plane_address
*address
,
3344 bool force_disable_dcc
)
3346 struct dc
*dc
= adev
->dm
.dc
;
3347 struct dc_dcc_surface_param input
;
3348 struct dc_surface_dcc_cap output
;
3349 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
3350 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
3351 uint64_t dcc_address
;
3353 memset(&input
, 0, sizeof(input
));
3354 memset(&output
, 0, sizeof(output
));
3356 if (force_disable_dcc
)
3362 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3365 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
3368 input
.format
= format
;
3369 input
.surface_size
.width
= plane_size
->surface_size
.width
;
3370 input
.surface_size
.height
= plane_size
->surface_size
.height
;
3371 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
3373 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
3374 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
3375 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
3376 input
.scan
= SCAN_DIRECTION_VERTICAL
;
3378 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
3381 if (!output
.capable
)
3384 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
3389 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
3390 dcc
->independent_64b_blks
= i64b
;
3392 dcc_address
= get_dcc_address(afb
->address
, info
);
3393 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
3394 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
3400 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
3401 const struct amdgpu_framebuffer
*afb
,
3402 const enum surface_pixel_format format
,
3403 const enum dc_rotation_angle rotation
,
3404 const uint64_t tiling_flags
,
3405 union dc_tiling_info
*tiling_info
,
3406 struct plane_size
*plane_size
,
3407 struct dc_plane_dcc_param
*dcc
,
3408 struct dc_plane_address
*address
,
3409 bool force_disable_dcc
)
3411 const struct drm_framebuffer
*fb
= &afb
->base
;
3414 memset(tiling_info
, 0, sizeof(*tiling_info
));
3415 memset(plane_size
, 0, sizeof(*plane_size
));
3416 memset(dcc
, 0, sizeof(*dcc
));
3417 memset(address
, 0, sizeof(*address
));
3419 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3420 plane_size
->surface_size
.x
= 0;
3421 plane_size
->surface_size
.y
= 0;
3422 plane_size
->surface_size
.width
= fb
->width
;
3423 plane_size
->surface_size
.height
= fb
->height
;
3424 plane_size
->surface_pitch
=
3425 fb
->pitches
[0] / fb
->format
->cpp
[0];
3427 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
3428 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3429 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3430 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
3431 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
3433 plane_size
->surface_size
.x
= 0;
3434 plane_size
->surface_size
.y
= 0;
3435 plane_size
->surface_size
.width
= fb
->width
;
3436 plane_size
->surface_size
.height
= fb
->height
;
3437 plane_size
->surface_pitch
=
3438 fb
->pitches
[0] / fb
->format
->cpp
[0];
3440 plane_size
->chroma_size
.x
= 0;
3441 plane_size
->chroma_size
.y
= 0;
3442 /* TODO: set these based on surface format */
3443 plane_size
->chroma_size
.width
= fb
->width
/ 2;
3444 plane_size
->chroma_size
.height
= fb
->height
/ 2;
3446 plane_size
->chroma_pitch
=
3447 fb
->pitches
[1] / fb
->format
->cpp
[1];
3449 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3450 address
->video_progressive
.luma_addr
.low_part
=
3451 lower_32_bits(afb
->address
);
3452 address
->video_progressive
.luma_addr
.high_part
=
3453 upper_32_bits(afb
->address
);
3454 address
->video_progressive
.chroma_addr
.low_part
=
3455 lower_32_bits(chroma_addr
);
3456 address
->video_progressive
.chroma_addr
.high_part
=
3457 upper_32_bits(chroma_addr
);
3460 /* Fill GFX8 params */
3461 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
3462 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
3464 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
3465 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
3466 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
3467 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
3468 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
3470 /* XXX fix me for VI */
3471 tiling_info
->gfx8
.num_banks
= num_banks
;
3472 tiling_info
->gfx8
.array_mode
=
3473 DC_ARRAY_2D_TILED_THIN1
;
3474 tiling_info
->gfx8
.tile_split
= tile_split
;
3475 tiling_info
->gfx8
.bank_width
= bankw
;
3476 tiling_info
->gfx8
.bank_height
= bankh
;
3477 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
3478 tiling_info
->gfx8
.tile_mode
=
3479 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
3480 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
3481 == DC_ARRAY_1D_TILED_THIN1
) {
3482 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
3485 tiling_info
->gfx8
.pipe_config
=
3486 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
3488 if (adev
->asic_type
== CHIP_VEGA10
||
3489 adev
->asic_type
== CHIP_VEGA12
||
3490 adev
->asic_type
== CHIP_VEGA20
||
3491 adev
->asic_type
== CHIP_NAVI10
||
3492 adev
->asic_type
== CHIP_NAVI14
||
3493 adev
->asic_type
== CHIP_NAVI12
||
3494 adev
->asic_type
== CHIP_RENOIR
||
3495 adev
->asic_type
== CHIP_RAVEN
) {
3496 /* Fill GFX9 params */
3497 tiling_info
->gfx9
.num_pipes
=
3498 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
3499 tiling_info
->gfx9
.num_banks
=
3500 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
3501 tiling_info
->gfx9
.pipe_interleave
=
3502 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
3503 tiling_info
->gfx9
.num_shader_engines
=
3504 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
3505 tiling_info
->gfx9
.max_compressed_frags
=
3506 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
3507 tiling_info
->gfx9
.num_rb_per_se
=
3508 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
3509 tiling_info
->gfx9
.swizzle
=
3510 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
3511 tiling_info
->gfx9
.shaderEnable
= 1;
3513 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
3514 plane_size
, tiling_info
,
3515 tiling_flags
, dcc
, address
,
3525 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
3526 bool *per_pixel_alpha
, bool *global_alpha
,
3527 int *global_alpha_value
)
3529 *per_pixel_alpha
= false;
3530 *global_alpha
= false;
3531 *global_alpha_value
= 0xff;
3533 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
3536 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
3537 static const uint32_t alpha_formats
[] = {
3538 DRM_FORMAT_ARGB8888
,
3539 DRM_FORMAT_RGBA8888
,
3540 DRM_FORMAT_ABGR8888
,
3542 uint32_t format
= plane_state
->fb
->format
->format
;
3545 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
3546 if (format
== alpha_formats
[i
]) {
3547 *per_pixel_alpha
= true;
3553 if (plane_state
->alpha
< 0xffff) {
3554 *global_alpha
= true;
3555 *global_alpha_value
= plane_state
->alpha
>> 8;
3560 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
3561 const enum surface_pixel_format format
,
3562 enum dc_color_space
*color_space
)
3566 *color_space
= COLOR_SPACE_SRGB
;
3568 /* DRM color properties only affect non-RGB formats. */
3569 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3572 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3574 switch (plane_state
->color_encoding
) {
3575 case DRM_COLOR_YCBCR_BT601
:
3577 *color_space
= COLOR_SPACE_YCBCR601
;
3579 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3582 case DRM_COLOR_YCBCR_BT709
:
3584 *color_space
= COLOR_SPACE_YCBCR709
;
3586 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3589 case DRM_COLOR_YCBCR_BT2020
:
3591 *color_space
= COLOR_SPACE_2020_YCBCR
;
3604 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3605 const struct drm_plane_state
*plane_state
,
3606 const uint64_t tiling_flags
,
3607 struct dc_plane_info
*plane_info
,
3608 struct dc_plane_address
*address
,
3609 bool force_disable_dcc
)
3611 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3612 const struct amdgpu_framebuffer
*afb
=
3613 to_amdgpu_framebuffer(plane_state
->fb
);
3614 struct drm_format_name_buf format_name
;
3617 memset(plane_info
, 0, sizeof(*plane_info
));
3619 switch (fb
->format
->format
) {
3621 plane_info
->format
=
3622 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3624 case DRM_FORMAT_RGB565
:
3625 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3627 case DRM_FORMAT_XRGB8888
:
3628 case DRM_FORMAT_ARGB8888
:
3629 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3631 case DRM_FORMAT_XRGB2101010
:
3632 case DRM_FORMAT_ARGB2101010
:
3633 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3635 case DRM_FORMAT_XBGR2101010
:
3636 case DRM_FORMAT_ABGR2101010
:
3637 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3639 case DRM_FORMAT_XBGR8888
:
3640 case DRM_FORMAT_ABGR8888
:
3641 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3643 case DRM_FORMAT_NV21
:
3644 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3646 case DRM_FORMAT_NV12
:
3647 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3649 case DRM_FORMAT_P010
:
3650 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
;
3654 "Unsupported screen format %s\n",
3655 drm_get_format_name(fb
->format
->format
, &format_name
));
3659 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
3660 case DRM_MODE_ROTATE_0
:
3661 plane_info
->rotation
= ROTATION_ANGLE_0
;
3663 case DRM_MODE_ROTATE_90
:
3664 plane_info
->rotation
= ROTATION_ANGLE_90
;
3666 case DRM_MODE_ROTATE_180
:
3667 plane_info
->rotation
= ROTATION_ANGLE_180
;
3669 case DRM_MODE_ROTATE_270
:
3670 plane_info
->rotation
= ROTATION_ANGLE_270
;
3673 plane_info
->rotation
= ROTATION_ANGLE_0
;
3677 plane_info
->visible
= true;
3678 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
3680 plane_info
->layer_index
= 0;
3682 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
3683 &plane_info
->color_space
);
3687 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
3688 plane_info
->rotation
, tiling_flags
,
3689 &plane_info
->tiling_info
,
3690 &plane_info
->plane_size
,
3691 &plane_info
->dcc
, address
,
3696 fill_blending_from_plane_state(
3697 plane_state
, &plane_info
->per_pixel_alpha
,
3698 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
3703 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
3704 struct dc_plane_state
*dc_plane_state
,
3705 struct drm_plane_state
*plane_state
,
3706 struct drm_crtc_state
*crtc_state
)
3708 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
3709 const struct amdgpu_framebuffer
*amdgpu_fb
=
3710 to_amdgpu_framebuffer(plane_state
->fb
);
3711 struct dc_scaling_info scaling_info
;
3712 struct dc_plane_info plane_info
;
3713 uint64_t tiling_flags
;
3715 bool force_disable_dcc
= false;
3717 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
3721 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
3722 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
3723 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
3724 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
3726 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
3730 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
3731 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
3733 &dc_plane_state
->address
,
3738 dc_plane_state
->format
= plane_info
.format
;
3739 dc_plane_state
->color_space
= plane_info
.color_space
;
3740 dc_plane_state
->format
= plane_info
.format
;
3741 dc_plane_state
->plane_size
= plane_info
.plane_size
;
3742 dc_plane_state
->rotation
= plane_info
.rotation
;
3743 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
3744 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
3745 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
3746 dc_plane_state
->visible
= plane_info
.visible
;
3747 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
3748 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
3749 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
3750 dc_plane_state
->dcc
= plane_info
.dcc
;
3751 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
3754 * Always set input transfer function, since plane state is refreshed
3757 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
3764 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
3765 const struct dm_connector_state
*dm_state
,
3766 struct dc_stream_state
*stream
)
3768 enum amdgpu_rmx_type rmx_type
;
3770 struct rect src
= { 0 }; /* viewport in composition space*/
3771 struct rect dst
= { 0 }; /* stream addressable area */
3773 /* no mode. nothing to be done */
3777 /* Full screen scaling by default */
3778 src
.width
= mode
->hdisplay
;
3779 src
.height
= mode
->vdisplay
;
3780 dst
.width
= stream
->timing
.h_addressable
;
3781 dst
.height
= stream
->timing
.v_addressable
;
3784 rmx_type
= dm_state
->scaling
;
3785 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
3786 if (src
.width
* dst
.height
<
3787 src
.height
* dst
.width
) {
3788 /* height needs less upscaling/more downscaling */
3789 dst
.width
= src
.width
*
3790 dst
.height
/ src
.height
;
3792 /* width needs less upscaling/more downscaling */
3793 dst
.height
= src
.height
*
3794 dst
.width
/ src
.width
;
3796 } else if (rmx_type
== RMX_CENTER
) {
3800 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
3801 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
3803 if (dm_state
->underscan_enable
) {
3804 dst
.x
+= dm_state
->underscan_hborder
/ 2;
3805 dst
.y
+= dm_state
->underscan_vborder
/ 2;
3806 dst
.width
-= dm_state
->underscan_hborder
;
3807 dst
.height
-= dm_state
->underscan_vborder
;
3814 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3815 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
3819 static enum dc_color_depth
3820 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
3821 const struct drm_connector_state
*state
,
3829 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3830 if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_48
)
3832 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_36
)
3834 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_30
)
3837 bpc
= (uint8_t)connector
->display_info
.bpc
;
3838 /* Assume 8 bpc by default if no bpc is specified. */
3839 bpc
= bpc
? bpc
: 8;
3843 state
= connector
->state
;
3847 * Cap display bpc based on the user requested value.
3849 * The value for state->max_bpc may not correctly updated
3850 * depending on when the connector gets added to the state
3851 * or if this was called outside of atomic check, so it
3852 * can't be used directly.
3854 bpc
= min(bpc
, state
->max_requested_bpc
);
3856 /* Round down to the nearest even number. */
3857 bpc
= bpc
- (bpc
& 1);
3863 * Temporary Work around, DRM doesn't parse color depth for
3864 * EDID revision before 1.4
3865 * TODO: Fix edid parsing
3867 return COLOR_DEPTH_888
;
3869 return COLOR_DEPTH_666
;
3871 return COLOR_DEPTH_888
;
3873 return COLOR_DEPTH_101010
;
3875 return COLOR_DEPTH_121212
;
3877 return COLOR_DEPTH_141414
;
3879 return COLOR_DEPTH_161616
;
3881 return COLOR_DEPTH_UNDEFINED
;
3885 static enum dc_aspect_ratio
3886 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
3888 /* 1-1 mapping, since both enums follow the HDMI spec. */
3889 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
3892 static enum dc_color_space
3893 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
3895 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
3897 switch (dc_crtc_timing
->pixel_encoding
) {
3898 case PIXEL_ENCODING_YCBCR422
:
3899 case PIXEL_ENCODING_YCBCR444
:
3900 case PIXEL_ENCODING_YCBCR420
:
3903 * 27030khz is the separation point between HDTV and SDTV
3904 * according to HDMI spec, we use YCbCr709 and YCbCr601
3907 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
3908 if (dc_crtc_timing
->flags
.Y_ONLY
)
3910 COLOR_SPACE_YCBCR709_LIMITED
;
3912 color_space
= COLOR_SPACE_YCBCR709
;
3914 if (dc_crtc_timing
->flags
.Y_ONLY
)
3916 COLOR_SPACE_YCBCR601_LIMITED
;
3918 color_space
= COLOR_SPACE_YCBCR601
;
3923 case PIXEL_ENCODING_RGB
:
3924 color_space
= COLOR_SPACE_SRGB
;
3935 static bool adjust_colour_depth_from_display_info(
3936 struct dc_crtc_timing
*timing_out
,
3937 const struct drm_display_info
*info
)
3939 enum dc_color_depth depth
= timing_out
->display_color_depth
;
3942 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
3943 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3944 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3945 normalized_clk
/= 2;
3946 /* Adjusting pix clock following on HDMI spec based on colour depth */
3948 case COLOR_DEPTH_888
:
3950 case COLOR_DEPTH_101010
:
3951 normalized_clk
= (normalized_clk
* 30) / 24;
3953 case COLOR_DEPTH_121212
:
3954 normalized_clk
= (normalized_clk
* 36) / 24;
3956 case COLOR_DEPTH_161616
:
3957 normalized_clk
= (normalized_clk
* 48) / 24;
3960 /* The above depths are the only ones valid for HDMI. */
3963 if (normalized_clk
<= info
->max_tmds_clock
) {
3964 timing_out
->display_color_depth
= depth
;
3967 } while (--depth
> COLOR_DEPTH_666
);
3971 static void fill_stream_properties_from_drm_display_mode(
3972 struct dc_stream_state
*stream
,
3973 const struct drm_display_mode
*mode_in
,
3974 const struct drm_connector
*connector
,
3975 const struct drm_connector_state
*connector_state
,
3976 const struct dc_stream_state
*old_stream
)
3978 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
3979 const struct drm_display_info
*info
= &connector
->display_info
;
3980 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3981 struct hdmi_vendor_infoframe hv_frame
;
3982 struct hdmi_avi_infoframe avi_frame
;
3984 memset(&hv_frame
, 0, sizeof(hv_frame
));
3985 memset(&avi_frame
, 0, sizeof(avi_frame
));
3987 timing_out
->h_border_left
= 0;
3988 timing_out
->h_border_right
= 0;
3989 timing_out
->v_border_top
= 0;
3990 timing_out
->v_border_bottom
= 0;
3991 /* TODO: un-hardcode */
3992 if (drm_mode_is_420_only(info
, mode_in
)
3993 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3994 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3995 else if (drm_mode_is_420_also(info
, mode_in
)
3996 && aconnector
->force_yuv420_output
)
3997 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3998 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
3999 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4000 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
4002 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
4004 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
4005 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
4006 connector
, connector_state
,
4007 (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
));
4008 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
4009 timing_out
->hdmi_vic
= 0;
4012 timing_out
->vic
= old_stream
->timing
.vic
;
4013 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
4014 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
4016 timing_out
->vic
= drm_match_cea_mode(mode_in
);
4017 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
4018 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
4019 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
4020 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
4023 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4024 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
4025 timing_out
->vic
= avi_frame
.video_code
;
4026 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
4027 timing_out
->hdmi_vic
= hv_frame
.vic
;
4030 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
4031 timing_out
->h_total
= mode_in
->crtc_htotal
;
4032 timing_out
->h_sync_width
=
4033 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
4034 timing_out
->h_front_porch
=
4035 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
4036 timing_out
->v_total
= mode_in
->crtc_vtotal
;
4037 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
4038 timing_out
->v_front_porch
=
4039 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
4040 timing_out
->v_sync_width
=
4041 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
4042 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
4043 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
4045 stream
->output_color_space
= get_output_color_space(timing_out
);
4047 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
4048 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
4049 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4050 if (!adjust_colour_depth_from_display_info(timing_out
, info
) &&
4051 drm_mode_is_420_also(info
, mode_in
) &&
4052 timing_out
->pixel_encoding
!= PIXEL_ENCODING_YCBCR420
) {
4053 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4054 adjust_colour_depth_from_display_info(timing_out
, info
);
4059 static void fill_audio_info(struct audio_info
*audio_info
,
4060 const struct drm_connector
*drm_connector
,
4061 const struct dc_sink
*dc_sink
)
4064 int cea_revision
= 0;
4065 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
4067 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
4068 audio_info
->product_id
= edid_caps
->product_id
;
4070 cea_revision
= drm_connector
->display_info
.cea_rev
;
4072 strscpy(audio_info
->display_name
,
4073 edid_caps
->display_name
,
4074 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
4076 if (cea_revision
>= 3) {
4077 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
4079 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
4080 audio_info
->modes
[i
].format_code
=
4081 (enum audio_format_code
)
4082 (edid_caps
->audio_modes
[i
].format_code
);
4083 audio_info
->modes
[i
].channel_count
=
4084 edid_caps
->audio_modes
[i
].channel_count
;
4085 audio_info
->modes
[i
].sample_rates
.all
=
4086 edid_caps
->audio_modes
[i
].sample_rate
;
4087 audio_info
->modes
[i
].sample_size
=
4088 edid_caps
->audio_modes
[i
].sample_size
;
4092 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
4094 /* TODO: We only check for the progressive mode, check for interlace mode too */
4095 if (drm_connector
->latency_present
[0]) {
4096 audio_info
->video_latency
= drm_connector
->video_latency
[0];
4097 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
4100 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4105 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
4106 struct drm_display_mode
*dst_mode
)
4108 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
4109 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
4110 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
4111 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
4112 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
4113 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
4114 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
4115 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
4116 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
4117 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
4118 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
4119 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
4120 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
4121 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
4125 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
4126 const struct drm_display_mode
*native_mode
,
4129 if (scale_enabled
) {
4130 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4131 } else if (native_mode
->clock
== drm_mode
->clock
&&
4132 native_mode
->htotal
== drm_mode
->htotal
&&
4133 native_mode
->vtotal
== drm_mode
->vtotal
) {
4134 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4136 /* no scaling nor amdgpu inserted, no need to patch */
4140 static struct dc_sink
*
4141 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
4143 struct dc_sink_init_data sink_init_data
= { 0 };
4144 struct dc_sink
*sink
= NULL
;
4145 sink_init_data
.link
= aconnector
->dc_link
;
4146 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
4148 sink
= dc_sink_create(&sink_init_data
);
4150 DRM_ERROR("Failed to create sink!\n");
4153 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
4158 static void set_multisync_trigger_params(
4159 struct dc_stream_state
*stream
)
4161 if (stream
->triggered_crtc_reset
.enabled
) {
4162 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
4163 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
4167 static void set_master_stream(struct dc_stream_state
*stream_set
[],
4170 int j
, highest_rfr
= 0, master_stream
= 0;
4172 for (j
= 0; j
< stream_count
; j
++) {
4173 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
4174 int refresh_rate
= 0;
4176 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
4177 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
4178 if (refresh_rate
> highest_rfr
) {
4179 highest_rfr
= refresh_rate
;
4184 for (j
= 0; j
< stream_count
; j
++) {
4186 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
4190 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
4194 if (context
->stream_count
< 2)
4196 for (i
= 0; i
< context
->stream_count
; i
++) {
4197 if (!context
->streams
[i
])
4200 * TODO: add a function to read AMD VSDB bits and set
4201 * crtc_sync_master.multi_sync_enabled flag
4202 * For now it's set to false
4204 set_multisync_trigger_params(context
->streams
[i
]);
4206 set_master_stream(context
->streams
, context
->stream_count
);
4209 static struct dc_stream_state
*
4210 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
4211 const struct drm_display_mode
*drm_mode
,
4212 const struct dm_connector_state
*dm_state
,
4213 const struct dc_stream_state
*old_stream
)
4215 struct drm_display_mode
*preferred_mode
= NULL
;
4216 struct drm_connector
*drm_connector
;
4217 const struct drm_connector_state
*con_state
=
4218 dm_state
? &dm_state
->base
: NULL
;
4219 struct dc_stream_state
*stream
= NULL
;
4220 struct drm_display_mode mode
= *drm_mode
;
4221 bool native_mode_found
= false;
4222 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
4224 int preferred_refresh
= 0;
4225 #if defined(CONFIG_DRM_AMD_DC_DCN)
4226 struct dsc_dec_dpcd_caps dsc_caps
;
4228 uint32_t link_bandwidth_kbps
;
4230 struct dc_sink
*sink
= NULL
;
4231 if (aconnector
== NULL
) {
4232 DRM_ERROR("aconnector is NULL!\n");
4236 drm_connector
= &aconnector
->base
;
4238 if (!aconnector
->dc_sink
) {
4239 sink
= create_fake_sink(aconnector
);
4243 sink
= aconnector
->dc_sink
;
4244 dc_sink_retain(sink
);
4247 stream
= dc_create_stream_for_sink(sink
);
4249 if (stream
== NULL
) {
4250 DRM_ERROR("Failed to create stream for sink!\n");
4254 stream
->dm_stream_context
= aconnector
;
4256 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
4257 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
4259 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
4260 /* Search for preferred mode */
4261 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
4262 native_mode_found
= true;
4266 if (!native_mode_found
)
4267 preferred_mode
= list_first_entry_or_null(
4268 &aconnector
->base
.modes
,
4269 struct drm_display_mode
,
4272 mode_refresh
= drm_mode_vrefresh(&mode
);
4274 if (preferred_mode
== NULL
) {
4276 * This may not be an error, the use case is when we have no
4277 * usermode calls to reset and set mode upon hotplug. In this
4278 * case, we call set mode ourselves to restore the previous mode
4279 * and the modelist may not be filled in in time.
4281 DRM_DEBUG_DRIVER("No preferred mode found\n");
4283 decide_crtc_timing_for_drm_display_mode(
4284 &mode
, preferred_mode
,
4285 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
4286 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
4290 drm_mode_set_crtcinfo(&mode
, 0);
4293 * If scaling is enabled and refresh rate didn't change
4294 * we copy the vic and polarities of the old timings
4296 if (!scale
|| mode_refresh
!= preferred_refresh
)
4297 fill_stream_properties_from_drm_display_mode(stream
,
4298 &mode
, &aconnector
->base
, con_state
, NULL
);
4300 fill_stream_properties_from_drm_display_mode(stream
,
4301 &mode
, &aconnector
->base
, con_state
, old_stream
);
4303 stream
->timing
.flags
.DSC
= 0;
4305 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4306 #if defined(CONFIG_DRM_AMD_DC_DCN)
4307 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
4308 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
4309 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_ext_caps
.raw
,
4312 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
4313 dc_link_get_link_cap(aconnector
->dc_link
));
4315 #if defined(CONFIG_DRM_AMD_DC_DCN)
4316 if (dsc_caps
.is_dsc_supported
)
4317 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
4319 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
4320 link_bandwidth_kbps
,
4322 &stream
->timing
.dsc_cfg
))
4323 stream
->timing
.flags
.DSC
= 1;
4327 update_stream_scaling_settings(&mode
, dm_state
, stream
);
4330 &stream
->audio_info
,
4334 update_stream_signal(stream
, sink
);
4336 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4337 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
4338 if (stream
->link
->psr_feature_enabled
) {
4339 struct dc
*core_dc
= stream
->link
->ctx
->dc
;
4341 if (dc_is_dmcu_initialized(core_dc
)) {
4342 struct dmcu
*dmcu
= core_dc
->res_pool
->dmcu
;
4344 stream
->psr_version
= dmcu
->dmcu_version
.psr_version
;
4347 // should decide stream support vsc sdp colorimetry capability
4348 // before building vsc info packet
4350 stream
->use_vsc_sdp_for_colorimetry
= false;
4351 if (aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
4352 stream
->use_vsc_sdp_for_colorimetry
=
4353 aconnector
->dc_sink
->is_vsc_sdp_colorimetry_supported
;
4355 if (stream
->link
->dpcd_caps
.dpcd_rev
.raw
>= 0x14 &&
4356 stream
->link
->dpcd_caps
.dprx_feature
.bits
.VSC_SDP_COLORIMETRY_SUPPORTED
) {
4357 stream
->use_vsc_sdp_for_colorimetry
= true;
4360 mod_build_vsc_infopacket(stream
, &stream
->vsc_infopacket
);
4364 dc_sink_release(sink
);
4369 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
4371 drm_crtc_cleanup(crtc
);
4375 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
4376 struct drm_crtc_state
*state
)
4378 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
4380 /* TODO Destroy dc_stream objects are stream object is flattened */
4382 dc_stream_release(cur
->stream
);
4385 __drm_atomic_helper_crtc_destroy_state(state
);
4391 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
4393 struct dm_crtc_state
*state
;
4396 dm_crtc_destroy_state(crtc
, crtc
->state
);
4398 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4399 if (WARN_ON(!state
))
4402 crtc
->state
= &state
->base
;
4403 crtc
->state
->crtc
= crtc
;
4407 static struct drm_crtc_state
*
4408 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
4410 struct dm_crtc_state
*state
, *cur
;
4412 cur
= to_dm_crtc_state(crtc
->state
);
4414 if (WARN_ON(!crtc
->state
))
4417 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4421 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
4424 state
->stream
= cur
->stream
;
4425 dc_stream_retain(state
->stream
);
4428 state
->active_planes
= cur
->active_planes
;
4429 state
->interrupts_enabled
= cur
->interrupts_enabled
;
4430 state
->vrr_params
= cur
->vrr_params
;
4431 state
->vrr_infopacket
= cur
->vrr_infopacket
;
4432 state
->abm_level
= cur
->abm_level
;
4433 state
->vrr_supported
= cur
->vrr_supported
;
4434 state
->freesync_config
= cur
->freesync_config
;
4435 state
->crc_src
= cur
->crc_src
;
4436 state
->cm_has_degamma
= cur
->cm_has_degamma
;
4437 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
4439 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4441 return &state
->base
;
4444 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
4446 enum dc_irq_source irq_source
;
4447 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4448 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4451 /* Do not set vupdate for DCN hardware */
4452 if (adev
->family
> AMDGPU_FAMILY_AI
)
4455 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
4457 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4459 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4460 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
4464 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
4466 enum dc_irq_source irq_source
;
4467 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4468 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4469 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4473 /* vblank irq on -> Only need vupdate irq in vrr mode */
4474 if (amdgpu_dm_vrr_active(acrtc_state
))
4475 rc
= dm_set_vupdate_irq(crtc
, true);
4477 /* vblank irq off -> vupdate irq off */
4478 rc
= dm_set_vupdate_irq(crtc
, false);
4484 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
4485 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4488 static int dm_enable_vblank(struct drm_crtc
*crtc
)
4490 return dm_set_vblank(crtc
, true);
4493 static void dm_disable_vblank(struct drm_crtc
*crtc
)
4495 dm_set_vblank(crtc
, false);
4498 /* Implemented only the options currently availible for the driver */
4499 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
4500 .reset
= dm_crtc_reset_state
,
4501 .destroy
= amdgpu_dm_crtc_destroy
,
4502 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
4503 .set_config
= drm_atomic_helper_set_config
,
4504 .page_flip
= drm_atomic_helper_page_flip
,
4505 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
4506 .atomic_destroy_state
= dm_crtc_destroy_state
,
4507 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
4508 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
4509 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
4510 .get_vblank_counter
= amdgpu_get_vblank_counter_kms
,
4511 .enable_vblank
= dm_enable_vblank
,
4512 .disable_vblank
= dm_disable_vblank
,
4513 .get_vblank_timestamp
= drm_crtc_vblank_helper_get_vblank_timestamp
,
4516 static enum drm_connector_status
4517 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
4520 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4524 * 1. This interface is NOT called in context of HPD irq.
4525 * 2. This interface *is called* in context of user-mode ioctl. Which
4526 * makes it a bad place for *any* MST-related activity.
4529 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
4530 !aconnector
->fake_enable
)
4531 connected
= (aconnector
->dc_sink
!= NULL
);
4533 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
4535 return (connected
? connector_status_connected
:
4536 connector_status_disconnected
);
4539 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
4540 struct drm_connector_state
*connector_state
,
4541 struct drm_property
*property
,
4544 struct drm_device
*dev
= connector
->dev
;
4545 struct amdgpu_device
*adev
= dev
->dev_private
;
4546 struct dm_connector_state
*dm_old_state
=
4547 to_dm_connector_state(connector
->state
);
4548 struct dm_connector_state
*dm_new_state
=
4549 to_dm_connector_state(connector_state
);
4553 if (property
== dev
->mode_config
.scaling_mode_property
) {
4554 enum amdgpu_rmx_type rmx_type
;
4557 case DRM_MODE_SCALE_CENTER
:
4558 rmx_type
= RMX_CENTER
;
4560 case DRM_MODE_SCALE_ASPECT
:
4561 rmx_type
= RMX_ASPECT
;
4563 case DRM_MODE_SCALE_FULLSCREEN
:
4564 rmx_type
= RMX_FULL
;
4566 case DRM_MODE_SCALE_NONE
:
4572 if (dm_old_state
->scaling
== rmx_type
)
4575 dm_new_state
->scaling
= rmx_type
;
4577 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4578 dm_new_state
->underscan_hborder
= val
;
4580 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4581 dm_new_state
->underscan_vborder
= val
;
4583 } else if (property
== adev
->mode_info
.underscan_property
) {
4584 dm_new_state
->underscan_enable
= val
;
4586 } else if (property
== adev
->mode_info
.abm_level_property
) {
4587 dm_new_state
->abm_level
= val
;
4594 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
4595 const struct drm_connector_state
*state
,
4596 struct drm_property
*property
,
4599 struct drm_device
*dev
= connector
->dev
;
4600 struct amdgpu_device
*adev
= dev
->dev_private
;
4601 struct dm_connector_state
*dm_state
=
4602 to_dm_connector_state(state
);
4605 if (property
== dev
->mode_config
.scaling_mode_property
) {
4606 switch (dm_state
->scaling
) {
4608 *val
= DRM_MODE_SCALE_CENTER
;
4611 *val
= DRM_MODE_SCALE_ASPECT
;
4614 *val
= DRM_MODE_SCALE_FULLSCREEN
;
4618 *val
= DRM_MODE_SCALE_NONE
;
4622 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4623 *val
= dm_state
->underscan_hborder
;
4625 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4626 *val
= dm_state
->underscan_vborder
;
4628 } else if (property
== adev
->mode_info
.underscan_property
) {
4629 *val
= dm_state
->underscan_enable
;
4631 } else if (property
== adev
->mode_info
.abm_level_property
) {
4632 *val
= dm_state
->abm_level
;
4639 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4641 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4643 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4646 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4648 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4649 const struct dc_link
*link
= aconnector
->dc_link
;
4650 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4651 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4653 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4654 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4656 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
4657 link
->type
!= dc_connection_none
&&
4658 dm
->backlight_dev
) {
4659 backlight_device_unregister(dm
->backlight_dev
);
4660 dm
->backlight_dev
= NULL
;
4664 if (aconnector
->dc_em_sink
)
4665 dc_sink_release(aconnector
->dc_em_sink
);
4666 aconnector
->dc_em_sink
= NULL
;
4667 if (aconnector
->dc_sink
)
4668 dc_sink_release(aconnector
->dc_sink
);
4669 aconnector
->dc_sink
= NULL
;
4671 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
4672 drm_connector_unregister(connector
);
4673 drm_connector_cleanup(connector
);
4674 if (aconnector
->i2c
) {
4675 i2c_del_adapter(&aconnector
->i2c
->base
);
4676 kfree(aconnector
->i2c
);
4678 kfree(aconnector
->dm_dp_aux
.aux
.name
);
4683 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
4685 struct dm_connector_state
*state
=
4686 to_dm_connector_state(connector
->state
);
4688 if (connector
->state
)
4689 __drm_atomic_helper_connector_destroy_state(connector
->state
);
4693 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4696 state
->scaling
= RMX_OFF
;
4697 state
->underscan_enable
= false;
4698 state
->underscan_hborder
= 0;
4699 state
->underscan_vborder
= 0;
4700 state
->base
.max_requested_bpc
= 8;
4701 state
->vcpi_slots
= 0;
4703 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4704 state
->abm_level
= amdgpu_dm_abm_level
;
4706 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
4710 struct drm_connector_state
*
4711 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
4713 struct dm_connector_state
*state
=
4714 to_dm_connector_state(connector
->state
);
4716 struct dm_connector_state
*new_state
=
4717 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
4722 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
4724 new_state
->freesync_capable
= state
->freesync_capable
;
4725 new_state
->abm_level
= state
->abm_level
;
4726 new_state
->scaling
= state
->scaling
;
4727 new_state
->underscan_enable
= state
->underscan_enable
;
4728 new_state
->underscan_hborder
= state
->underscan_hborder
;
4729 new_state
->underscan_vborder
= state
->underscan_vborder
;
4730 new_state
->vcpi_slots
= state
->vcpi_slots
;
4731 new_state
->pbn
= state
->pbn
;
4732 return &new_state
->base
;
4736 amdgpu_dm_connector_late_register(struct drm_connector
*connector
)
4738 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4739 to_amdgpu_dm_connector(connector
);
4742 if ((connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
) ||
4743 (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)) {
4744 amdgpu_dm_connector
->dm_dp_aux
.aux
.dev
= connector
->kdev
;
4745 r
= drm_dp_aux_register(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4750 #if defined(CONFIG_DEBUG_FS)
4751 connector_debugfs_init(amdgpu_dm_connector
);
4757 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
4758 .reset
= amdgpu_dm_connector_funcs_reset
,
4759 .detect
= amdgpu_dm_connector_detect
,
4760 .fill_modes
= drm_helper_probe_single_connector_modes
,
4761 .destroy
= amdgpu_dm_connector_destroy
,
4762 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
4763 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4764 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
4765 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
4766 .late_register
= amdgpu_dm_connector_late_register
,
4767 .early_unregister
= amdgpu_dm_connector_unregister
4770 static int get_modes(struct drm_connector
*connector
)
4772 return amdgpu_dm_connector_get_modes(connector
);
4775 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
4777 struct dc_sink_init_data init_params
= {
4778 .link
= aconnector
->dc_link
,
4779 .sink_signal
= SIGNAL_TYPE_VIRTUAL
4783 if (!aconnector
->base
.edid_blob_ptr
) {
4784 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4785 aconnector
->base
.name
);
4787 aconnector
->base
.force
= DRM_FORCE_OFF
;
4788 aconnector
->base
.override_edid
= false;
4792 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
4794 aconnector
->edid
= edid
;
4796 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
4797 aconnector
->dc_link
,
4799 (edid
->extensions
+ 1) * EDID_LENGTH
,
4802 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
4803 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
4804 aconnector
->dc_link
->local_sink
:
4805 aconnector
->dc_em_sink
;
4806 dc_sink_retain(aconnector
->dc_sink
);
4810 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
4812 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
4815 * In case of headless boot with force on for DP managed connector
4816 * Those settings have to be != 0 to get initial modeset
4818 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4819 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
4820 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
4824 aconnector
->base
.override_edid
= true;
4825 create_eml_sink(aconnector
);
4828 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
4829 struct drm_display_mode
*mode
)
4831 int result
= MODE_ERROR
;
4832 struct dc_sink
*dc_sink
;
4833 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4834 /* TODO: Unhardcode stream count */
4835 struct dc_stream_state
*stream
;
4836 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4837 enum dc_status dc_result
= DC_OK
;
4839 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
4840 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
4844 * Only run this the first time mode_valid is called to initilialize
4847 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
4848 !aconnector
->dc_em_sink
)
4849 handle_edid_mgmt(aconnector
);
4851 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
4853 if (dc_sink
== NULL
) {
4854 DRM_ERROR("dc_sink is NULL!\n");
4858 stream
= create_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
4859 if (stream
== NULL
) {
4860 DRM_ERROR("Failed to create stream for sink!\n");
4864 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
4866 if (dc_result
== DC_OK
)
4869 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4875 dc_stream_release(stream
);
4878 /* TODO: error handling*/
4882 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
4883 struct dc_info_packet
*out
)
4885 struct hdmi_drm_infoframe frame
;
4886 unsigned char buf
[30]; /* 26 + 4 */
4890 memset(out
, 0, sizeof(*out
));
4892 if (!state
->hdr_output_metadata
)
4895 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
4899 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
4903 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4907 /* Prepare the infopacket for DC. */
4908 switch (state
->connector
->connector_type
) {
4909 case DRM_MODE_CONNECTOR_HDMIA
:
4910 out
->hb0
= 0x87; /* type */
4911 out
->hb1
= 0x01; /* version */
4912 out
->hb2
= 0x1A; /* length */
4913 out
->sb
[0] = buf
[3]; /* checksum */
4917 case DRM_MODE_CONNECTOR_DisplayPort
:
4918 case DRM_MODE_CONNECTOR_eDP
:
4919 out
->hb0
= 0x00; /* sdp id, zero */
4920 out
->hb1
= 0x87; /* type */
4921 out
->hb2
= 0x1D; /* payload len - 1 */
4922 out
->hb3
= (0x13 << 2); /* sdp version */
4923 out
->sb
[0] = 0x01; /* version */
4924 out
->sb
[1] = 0x1A; /* length */
4932 memcpy(&out
->sb
[i
], &buf
[4], 26);
4935 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
4936 sizeof(out
->sb
), false);
4942 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
4943 const struct drm_connector_state
*new_state
)
4945 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
4946 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
4948 if (old_blob
!= new_blob
) {
4949 if (old_blob
&& new_blob
&&
4950 old_blob
->length
== new_blob
->length
)
4951 return memcmp(old_blob
->data
, new_blob
->data
,
4961 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
4962 struct drm_atomic_state
*state
)
4964 struct drm_connector_state
*new_con_state
=
4965 drm_atomic_get_new_connector_state(state
, conn
);
4966 struct drm_connector_state
*old_con_state
=
4967 drm_atomic_get_old_connector_state(state
, conn
);
4968 struct drm_crtc
*crtc
= new_con_state
->crtc
;
4969 struct drm_crtc_state
*new_crtc_state
;
4975 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
4976 struct dc_info_packet hdr_infopacket
;
4978 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
4982 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
4983 if (IS_ERR(new_crtc_state
))
4984 return PTR_ERR(new_crtc_state
);
4987 * DC considers the stream backends changed if the
4988 * static metadata changes. Forcing the modeset also
4989 * gives a simple way for userspace to switch from
4990 * 8bpc to 10bpc when setting the metadata to enter
4993 * Changing the static metadata after it's been
4994 * set is permissible, however. So only force a
4995 * modeset if we're entering or exiting HDR.
4997 new_crtc_state
->mode_changed
=
4998 !old_con_state
->hdr_output_metadata
||
4999 !new_con_state
->hdr_output_metadata
;
5005 static const struct drm_connector_helper_funcs
5006 amdgpu_dm_connector_helper_funcs
= {
5008 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5009 * modes will be filtered by drm_mode_validate_size(), and those modes
5010 * are missing after user start lightdm. So we need to renew modes list.
5011 * in get_modes call back, not just return the modes count
5013 .get_modes
= get_modes
,
5014 .mode_valid
= amdgpu_dm_connector_mode_valid
,
5015 .atomic_check
= amdgpu_dm_connector_atomic_check
,
5018 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
5022 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
5024 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
5025 struct drm_plane
*plane
;
5027 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
5028 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5035 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
5037 struct drm_atomic_state
*state
= new_crtc_state
->state
;
5038 struct drm_plane
*plane
;
5041 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
5042 struct drm_plane_state
*new_plane_state
;
5044 /* Cursor planes are "fake". */
5045 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5048 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
5050 if (!new_plane_state
) {
5052 * The plane is enable on the CRTC and hasn't changed
5053 * state. This means that it previously passed
5054 * validation and is therefore enabled.
5060 /* We need a framebuffer to be considered enabled. */
5061 num_active
+= (new_plane_state
->fb
!= NULL
);
5068 * Sets whether interrupts should be enabled on a specific CRTC.
5069 * We require that the stream be enabled and that there exist active
5070 * DC planes on the stream.
5073 dm_update_crtc_interrupt_state(struct drm_crtc
*crtc
,
5074 struct drm_crtc_state
*new_crtc_state
)
5076 struct dm_crtc_state
*dm_new_crtc_state
=
5077 to_dm_crtc_state(new_crtc_state
);
5079 dm_new_crtc_state
->active_planes
= 0;
5080 dm_new_crtc_state
->interrupts_enabled
= false;
5082 if (!dm_new_crtc_state
->stream
)
5085 dm_new_crtc_state
->active_planes
=
5086 count_crtc_active_planes(new_crtc_state
);
5088 dm_new_crtc_state
->interrupts_enabled
=
5089 dm_new_crtc_state
->active_planes
> 0;
5092 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
5093 struct drm_crtc_state
*state
)
5095 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
5096 struct dc
*dc
= adev
->dm
.dc
;
5097 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
5101 * Update interrupt state for the CRTC. This needs to happen whenever
5102 * the CRTC has changed or whenever any of its planes have changed.
5103 * Atomic check satisfies both of these requirements since the CRTC
5104 * is added to the state by DRM during drm_atomic_helper_check_planes.
5106 dm_update_crtc_interrupt_state(crtc
, state
);
5108 if (unlikely(!dm_crtc_state
->stream
&&
5109 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
5114 /* In some use cases, like reset, no stream is attached */
5115 if (!dm_crtc_state
->stream
)
5119 * We want at least one hardware plane enabled to use
5120 * the stream with a cursor enabled.
5122 if (state
->enable
&& state
->active
&&
5123 does_crtc_have_active_cursor(state
) &&
5124 dm_crtc_state
->active_planes
== 0)
5127 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
5133 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
5134 const struct drm_display_mode
*mode
,
5135 struct drm_display_mode
*adjusted_mode
)
5140 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
5141 .disable
= dm_crtc_helper_disable
,
5142 .atomic_check
= dm_crtc_helper_atomic_check
,
5143 .mode_fixup
= dm_crtc_helper_mode_fixup
,
5144 .get_scanout_position
= amdgpu_crtc_get_scanout_position
,
5147 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
5152 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth
)
5154 switch (display_color_depth
) {
5155 case COLOR_DEPTH_666
:
5157 case COLOR_DEPTH_888
:
5159 case COLOR_DEPTH_101010
:
5161 case COLOR_DEPTH_121212
:
5163 case COLOR_DEPTH_141414
:
5165 case COLOR_DEPTH_161616
:
5173 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
5174 struct drm_crtc_state
*crtc_state
,
5175 struct drm_connector_state
*conn_state
)
5177 struct drm_atomic_state
*state
= crtc_state
->state
;
5178 struct drm_connector
*connector
= conn_state
->connector
;
5179 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5180 struct dm_connector_state
*dm_new_connector_state
= to_dm_connector_state(conn_state
);
5181 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
5182 struct drm_dp_mst_topology_mgr
*mst_mgr
;
5183 struct drm_dp_mst_port
*mst_port
;
5184 enum dc_color_depth color_depth
;
5186 bool is_y420
= false;
5188 if (!aconnector
->port
|| !aconnector
->dc_sink
)
5191 mst_port
= aconnector
->port
;
5192 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
5194 if (!crtc_state
->connectors_changed
&& !crtc_state
->mode_changed
)
5197 if (!state
->duplicated
) {
5198 is_y420
= drm_mode_is_420_also(&connector
->display_info
, adjusted_mode
) &&
5199 aconnector
->force_yuv420_output
;
5200 color_depth
= convert_color_depth_from_display_info(connector
, conn_state
,
5202 bpp
= convert_dc_color_depth_into_bpc(color_depth
) * 3;
5203 clock
= adjusted_mode
->clock
;
5204 dm_new_connector_state
->pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, false);
5206 dm_new_connector_state
->vcpi_slots
= drm_dp_atomic_find_vcpi_slots(state
,
5209 dm_new_connector_state
->pbn
,
5211 if (dm_new_connector_state
->vcpi_slots
< 0) {
5212 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state
->vcpi_slots
);
5213 return dm_new_connector_state
->vcpi_slots
;
5218 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
5219 .disable
= dm_encoder_helper_disable
,
5220 .atomic_check
= dm_encoder_helper_atomic_check
5223 #if defined(CONFIG_DRM_AMD_DC_DCN)
5224 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
*state
,
5225 struct dc_state
*dc_state
)
5227 struct dc_stream_state
*stream
= NULL
;
5228 struct drm_connector
*connector
;
5229 struct drm_connector_state
*new_con_state
, *old_con_state
;
5230 struct amdgpu_dm_connector
*aconnector
;
5231 struct dm_connector_state
*dm_conn_state
;
5232 int i
, j
, clock
, bpp
;
5233 int vcpi
, pbn_div
, pbn
= 0;
5235 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5237 aconnector
= to_amdgpu_dm_connector(connector
);
5239 if (!aconnector
->port
)
5242 if (!new_con_state
|| !new_con_state
->crtc
)
5245 dm_conn_state
= to_dm_connector_state(new_con_state
);
5247 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
5248 stream
= dc_state
->streams
[j
];
5252 if ((struct amdgpu_dm_connector
*)stream
->dm_stream_context
== aconnector
)
5261 if (stream
->timing
.flags
.DSC
!= 1) {
5262 drm_dp_mst_atomic_enable_dsc(state
,
5270 pbn_div
= dm_mst_get_pbn_divider(stream
->link
);
5271 bpp
= stream
->timing
.dsc_cfg
.bits_per_pixel
;
5272 clock
= stream
->timing
.pix_clk_100hz
/ 10;
5273 pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, true);
5274 vcpi
= drm_dp_mst_atomic_enable_dsc(state
,
5281 dm_conn_state
->pbn
= pbn
;
5282 dm_conn_state
->vcpi_slots
= vcpi
;
5288 static void dm_drm_plane_reset(struct drm_plane
*plane
)
5290 struct dm_plane_state
*amdgpu_state
= NULL
;
5293 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
5295 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
5296 WARN_ON(amdgpu_state
== NULL
);
5299 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
5302 static struct drm_plane_state
*
5303 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
5305 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
5307 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
5308 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
5309 if (!dm_plane_state
)
5312 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
5314 if (old_dm_plane_state
->dc_state
) {
5315 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
5316 dc_plane_state_retain(dm_plane_state
->dc_state
);
5319 return &dm_plane_state
->base
;
5322 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
5323 struct drm_plane_state
*state
)
5325 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
5327 if (dm_plane_state
->dc_state
)
5328 dc_plane_state_release(dm_plane_state
->dc_state
);
5330 drm_atomic_helper_plane_destroy_state(plane
, state
);
5333 static const struct drm_plane_funcs dm_plane_funcs
= {
5334 .update_plane
= drm_atomic_helper_update_plane
,
5335 .disable_plane
= drm_atomic_helper_disable_plane
,
5336 .destroy
= drm_primary_helper_destroy
,
5337 .reset
= dm_drm_plane_reset
,
5338 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
5339 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
5342 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
5343 struct drm_plane_state
*new_state
)
5345 struct amdgpu_framebuffer
*afb
;
5346 struct drm_gem_object
*obj
;
5347 struct amdgpu_device
*adev
;
5348 struct amdgpu_bo
*rbo
;
5349 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
5350 struct list_head list
;
5351 struct ttm_validate_buffer tv
;
5352 struct ww_acquire_ctx ticket
;
5353 uint64_t tiling_flags
;
5356 bool force_disable_dcc
= false;
5358 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
5359 dm_plane_state_new
= to_dm_plane_state(new_state
);
5361 if (!new_state
->fb
) {
5362 DRM_DEBUG_DRIVER("No FB bound\n");
5366 afb
= to_amdgpu_framebuffer(new_state
->fb
);
5367 obj
= new_state
->fb
->obj
[0];
5368 rbo
= gem_to_amdgpu_bo(obj
);
5369 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
5370 INIT_LIST_HEAD(&list
);
5374 list_add(&tv
.head
, &list
);
5376 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
5378 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
5382 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5383 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
5385 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
5387 r
= amdgpu_bo_pin(rbo
, domain
);
5388 if (unlikely(r
!= 0)) {
5389 if (r
!= -ERESTARTSYS
)
5390 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
5391 ttm_eu_backoff_reservation(&ticket
, &list
);
5395 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
5396 if (unlikely(r
!= 0)) {
5397 amdgpu_bo_unpin(rbo
);
5398 ttm_eu_backoff_reservation(&ticket
, &list
);
5399 DRM_ERROR("%p bind failed\n", rbo
);
5403 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
5405 ttm_eu_backoff_reservation(&ticket
, &list
);
5407 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
5411 if (dm_plane_state_new
->dc_state
&&
5412 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
5413 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
5415 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
5416 fill_plane_buffer_attributes(
5417 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
5418 tiling_flags
, &plane_state
->tiling_info
,
5419 &plane_state
->plane_size
, &plane_state
->dcc
,
5420 &plane_state
->address
,
5427 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
5428 struct drm_plane_state
*old_state
)
5430 struct amdgpu_bo
*rbo
;
5436 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
5437 r
= amdgpu_bo_reserve(rbo
, false);
5439 DRM_ERROR("failed to reserve rbo before unpin\n");
5443 amdgpu_bo_unpin(rbo
);
5444 amdgpu_bo_unreserve(rbo
);
5445 amdgpu_bo_unref(&rbo
);
5448 static int dm_plane_atomic_check(struct drm_plane
*plane
,
5449 struct drm_plane_state
*state
)
5451 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5452 struct dc
*dc
= adev
->dm
.dc
;
5453 struct dm_plane_state
*dm_plane_state
;
5454 struct dc_scaling_info scaling_info
;
5457 dm_plane_state
= to_dm_plane_state(state
);
5459 if (!dm_plane_state
->dc_state
)
5462 ret
= fill_dc_scaling_info(state
, &scaling_info
);
5466 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
5472 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
5473 struct drm_plane_state
*new_plane_state
)
5475 /* Only support async updates on cursor planes. */
5476 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5482 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
5483 struct drm_plane_state
*new_state
)
5485 struct drm_plane_state
*old_state
=
5486 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
5488 swap(plane
->state
->fb
, new_state
->fb
);
5490 plane
->state
->src_x
= new_state
->src_x
;
5491 plane
->state
->src_y
= new_state
->src_y
;
5492 plane
->state
->src_w
= new_state
->src_w
;
5493 plane
->state
->src_h
= new_state
->src_h
;
5494 plane
->state
->crtc_x
= new_state
->crtc_x
;
5495 plane
->state
->crtc_y
= new_state
->crtc_y
;
5496 plane
->state
->crtc_w
= new_state
->crtc_w
;
5497 plane
->state
->crtc_h
= new_state
->crtc_h
;
5499 handle_cursor_update(plane
, old_state
);
5502 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
5503 .prepare_fb
= dm_plane_helper_prepare_fb
,
5504 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
5505 .atomic_check
= dm_plane_atomic_check
,
5506 .atomic_async_check
= dm_plane_atomic_async_check
,
5507 .atomic_async_update
= dm_plane_atomic_async_update
5511 * TODO: these are currently initialized to rgb formats only.
5512 * For future use cases we should either initialize them dynamically based on
5513 * plane capabilities, or initialize this array to all formats, so internal drm
5514 * check will succeed, and let DC implement proper check
5516 static const uint32_t rgb_formats
[] = {
5517 DRM_FORMAT_XRGB8888
,
5518 DRM_FORMAT_ARGB8888
,
5519 DRM_FORMAT_RGBA8888
,
5520 DRM_FORMAT_XRGB2101010
,
5521 DRM_FORMAT_XBGR2101010
,
5522 DRM_FORMAT_ARGB2101010
,
5523 DRM_FORMAT_ABGR2101010
,
5524 DRM_FORMAT_XBGR8888
,
5525 DRM_FORMAT_ABGR8888
,
5529 static const uint32_t overlay_formats
[] = {
5530 DRM_FORMAT_XRGB8888
,
5531 DRM_FORMAT_ARGB8888
,
5532 DRM_FORMAT_RGBA8888
,
5533 DRM_FORMAT_XBGR8888
,
5534 DRM_FORMAT_ABGR8888
,
5538 static const u32 cursor_formats
[] = {
5542 static int get_plane_formats(const struct drm_plane
*plane
,
5543 const struct dc_plane_cap
*plane_cap
,
5544 uint32_t *formats
, int max_formats
)
5546 int i
, num_formats
= 0;
5549 * TODO: Query support for each group of formats directly from
5550 * DC plane caps. This will require adding more formats to the
5554 switch (plane
->type
) {
5555 case DRM_PLANE_TYPE_PRIMARY
:
5556 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
5557 if (num_formats
>= max_formats
)
5560 formats
[num_formats
++] = rgb_formats
[i
];
5563 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
5564 formats
[num_formats
++] = DRM_FORMAT_NV12
;
5565 if (plane_cap
&& plane_cap
->pixel_format_support
.p010
)
5566 formats
[num_formats
++] = DRM_FORMAT_P010
;
5569 case DRM_PLANE_TYPE_OVERLAY
:
5570 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
5571 if (num_formats
>= max_formats
)
5574 formats
[num_formats
++] = overlay_formats
[i
];
5578 case DRM_PLANE_TYPE_CURSOR
:
5579 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
5580 if (num_formats
>= max_formats
)
5583 formats
[num_formats
++] = cursor_formats
[i
];
5591 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
5592 struct drm_plane
*plane
,
5593 unsigned long possible_crtcs
,
5594 const struct dc_plane_cap
*plane_cap
)
5596 uint32_t formats
[32];
5600 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
5601 ARRAY_SIZE(formats
));
5603 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
5604 &dm_plane_funcs
, formats
, num_formats
,
5605 NULL
, plane
->type
, NULL
);
5609 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
5610 plane_cap
&& plane_cap
->per_pixel_alpha
) {
5611 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
5612 BIT(DRM_MODE_BLEND_PREMULTI
);
5614 drm_plane_create_alpha_property(plane
);
5615 drm_plane_create_blend_mode_property(plane
, blend_caps
);
5618 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
5620 (plane_cap
->pixel_format_support
.nv12
||
5621 plane_cap
->pixel_format_support
.p010
)) {
5622 /* This only affects YUV formats. */
5623 drm_plane_create_color_properties(
5625 BIT(DRM_COLOR_YCBCR_BT601
) |
5626 BIT(DRM_COLOR_YCBCR_BT709
) |
5627 BIT(DRM_COLOR_YCBCR_BT2020
),
5628 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
5629 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
5630 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
5633 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
5635 /* Create (reset) the plane state */
5636 if (plane
->funcs
->reset
)
5637 plane
->funcs
->reset(plane
);
5642 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
5643 struct drm_plane
*plane
,
5644 uint32_t crtc_index
)
5646 struct amdgpu_crtc
*acrtc
= NULL
;
5647 struct drm_plane
*cursor_plane
;
5651 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
5655 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
5656 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
5658 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
5662 res
= drm_crtc_init_with_planes(
5667 &amdgpu_dm_crtc_funcs
, NULL
);
5672 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
5674 /* Create (reset) the plane state */
5675 if (acrtc
->base
.funcs
->reset
)
5676 acrtc
->base
.funcs
->reset(&acrtc
->base
);
5678 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5679 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5681 acrtc
->crtc_id
= crtc_index
;
5682 acrtc
->base
.enabled
= false;
5683 acrtc
->otg_inst
= -1;
5685 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
5686 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
5687 true, MAX_COLOR_LUT_ENTRIES
);
5688 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
5694 kfree(cursor_plane
);
5699 static int to_drm_connector_type(enum signal_type st
)
5702 case SIGNAL_TYPE_HDMI_TYPE_A
:
5703 return DRM_MODE_CONNECTOR_HDMIA
;
5704 case SIGNAL_TYPE_EDP
:
5705 return DRM_MODE_CONNECTOR_eDP
;
5706 case SIGNAL_TYPE_LVDS
:
5707 return DRM_MODE_CONNECTOR_LVDS
;
5708 case SIGNAL_TYPE_RGB
:
5709 return DRM_MODE_CONNECTOR_VGA
;
5710 case SIGNAL_TYPE_DISPLAY_PORT
:
5711 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
5712 return DRM_MODE_CONNECTOR_DisplayPort
;
5713 case SIGNAL_TYPE_DVI_DUAL_LINK
:
5714 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
5715 return DRM_MODE_CONNECTOR_DVID
;
5716 case SIGNAL_TYPE_VIRTUAL
:
5717 return DRM_MODE_CONNECTOR_VIRTUAL
;
5720 return DRM_MODE_CONNECTOR_Unknown
;
5724 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
5726 struct drm_encoder
*encoder
;
5728 /* There is only one encoder per connector */
5729 drm_connector_for_each_possible_encoder(connector
, encoder
)
5735 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
5737 struct drm_encoder
*encoder
;
5738 struct amdgpu_encoder
*amdgpu_encoder
;
5740 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5742 if (encoder
== NULL
)
5745 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5747 amdgpu_encoder
->native_mode
.clock
= 0;
5749 if (!list_empty(&connector
->probed_modes
)) {
5750 struct drm_display_mode
*preferred_mode
= NULL
;
5752 list_for_each_entry(preferred_mode
,
5753 &connector
->probed_modes
,
5755 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
5756 amdgpu_encoder
->native_mode
= *preferred_mode
;
5764 static struct drm_display_mode
*
5765 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
5767 int hdisplay
, int vdisplay
)
5769 struct drm_device
*dev
= encoder
->dev
;
5770 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5771 struct drm_display_mode
*mode
= NULL
;
5772 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5774 mode
= drm_mode_duplicate(dev
, native_mode
);
5779 mode
->hdisplay
= hdisplay
;
5780 mode
->vdisplay
= vdisplay
;
5781 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
5782 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
5788 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
5789 struct drm_connector
*connector
)
5791 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5792 struct drm_display_mode
*mode
= NULL
;
5793 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5794 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5795 to_amdgpu_dm_connector(connector
);
5799 char name
[DRM_DISPLAY_MODE_LEN
];
5802 } common_modes
[] = {
5803 { "640x480", 640, 480},
5804 { "800x600", 800, 600},
5805 { "1024x768", 1024, 768},
5806 { "1280x720", 1280, 720},
5807 { "1280x800", 1280, 800},
5808 {"1280x1024", 1280, 1024},
5809 { "1440x900", 1440, 900},
5810 {"1680x1050", 1680, 1050},
5811 {"1600x1200", 1600, 1200},
5812 {"1920x1080", 1920, 1080},
5813 {"1920x1200", 1920, 1200}
5816 n
= ARRAY_SIZE(common_modes
);
5818 for (i
= 0; i
< n
; i
++) {
5819 struct drm_display_mode
*curmode
= NULL
;
5820 bool mode_existed
= false;
5822 if (common_modes
[i
].w
> native_mode
->hdisplay
||
5823 common_modes
[i
].h
> native_mode
->vdisplay
||
5824 (common_modes
[i
].w
== native_mode
->hdisplay
&&
5825 common_modes
[i
].h
== native_mode
->vdisplay
))
5828 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
5829 if (common_modes
[i
].w
== curmode
->hdisplay
&&
5830 common_modes
[i
].h
== curmode
->vdisplay
) {
5831 mode_existed
= true;
5839 mode
= amdgpu_dm_create_common_mode(encoder
,
5840 common_modes
[i
].name
, common_modes
[i
].w
,
5842 drm_mode_probed_add(connector
, mode
);
5843 amdgpu_dm_connector
->num_modes
++;
5847 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
5850 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5851 to_amdgpu_dm_connector(connector
);
5854 /* empty probed_modes */
5855 INIT_LIST_HEAD(&connector
->probed_modes
);
5856 amdgpu_dm_connector
->num_modes
=
5857 drm_add_edid_modes(connector
, edid
);
5859 /* sorting the probed modes before calling function
5860 * amdgpu_dm_get_native_mode() since EDID can have
5861 * more than one preferred mode. The modes that are
5862 * later in the probed mode list could be of higher
5863 * and preferred resolution. For example, 3840x2160
5864 * resolution in base EDID preferred timing and 4096x2160
5865 * preferred resolution in DID extension block later.
5867 drm_mode_sort(&connector
->probed_modes
);
5868 amdgpu_dm_get_native_mode(connector
);
5870 amdgpu_dm_connector
->num_modes
= 0;
5874 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
5876 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5877 to_amdgpu_dm_connector(connector
);
5878 struct drm_encoder
*encoder
;
5879 struct edid
*edid
= amdgpu_dm_connector
->edid
;
5881 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5883 if (!edid
|| !drm_edid_is_valid(edid
)) {
5884 amdgpu_dm_connector
->num_modes
=
5885 drm_add_modes_noedid(connector
, 640, 480);
5887 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
5888 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
5890 amdgpu_dm_fbc_init(connector
);
5892 return amdgpu_dm_connector
->num_modes
;
5895 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
5896 struct amdgpu_dm_connector
*aconnector
,
5898 struct dc_link
*link
,
5901 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
5904 * Some of the properties below require access to state, like bpc.
5905 * Allocate some default initial connector state with our reset helper.
5907 if (aconnector
->base
.funcs
->reset
)
5908 aconnector
->base
.funcs
->reset(&aconnector
->base
);
5910 aconnector
->connector_id
= link_index
;
5911 aconnector
->dc_link
= link
;
5912 aconnector
->base
.interlace_allowed
= false;
5913 aconnector
->base
.doublescan_allowed
= false;
5914 aconnector
->base
.stereo_allowed
= false;
5915 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
5916 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
5917 aconnector
->audio_inst
= -1;
5918 mutex_init(&aconnector
->hpd_lock
);
5921 * configure support HPD hot plug connector_>polled default value is 0
5922 * which means HPD hot plug not supported
5924 switch (connector_type
) {
5925 case DRM_MODE_CONNECTOR_HDMIA
:
5926 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5927 aconnector
->base
.ycbcr_420_allowed
=
5928 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
5930 case DRM_MODE_CONNECTOR_DisplayPort
:
5931 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5932 aconnector
->base
.ycbcr_420_allowed
=
5933 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
5935 case DRM_MODE_CONNECTOR_DVID
:
5936 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5942 drm_object_attach_property(&aconnector
->base
.base
,
5943 dm
->ddev
->mode_config
.scaling_mode_property
,
5944 DRM_MODE_SCALE_NONE
);
5946 drm_object_attach_property(&aconnector
->base
.base
,
5947 adev
->mode_info
.underscan_property
,
5949 drm_object_attach_property(&aconnector
->base
.base
,
5950 adev
->mode_info
.underscan_hborder_property
,
5952 drm_object_attach_property(&aconnector
->base
.base
,
5953 adev
->mode_info
.underscan_vborder_property
,
5956 if (!aconnector
->mst_port
)
5957 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
5959 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5960 aconnector
->base
.state
->max_bpc
= (connector_type
== DRM_MODE_CONNECTOR_eDP
) ? 16 : 8;
5961 aconnector
->base
.state
->max_requested_bpc
= aconnector
->base
.state
->max_bpc
;
5963 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
5964 dc_is_dmcu_initialized(adev
->dm
.dc
)) {
5965 drm_object_attach_property(&aconnector
->base
.base
,
5966 adev
->mode_info
.abm_level_property
, 0);
5969 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
5970 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5971 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
5972 drm_object_attach_property(
5973 &aconnector
->base
.base
,
5974 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
5976 if (!aconnector
->mst_port
)
5977 drm_connector_attach_vrr_capable_property(&aconnector
->base
);
5979 #ifdef CONFIG_DRM_AMD_DC_HDCP
5980 if (adev
->dm
.hdcp_workqueue
)
5981 drm_connector_attach_content_protection_property(&aconnector
->base
, true);
5986 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
5987 struct i2c_msg
*msgs
, int num
)
5989 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
5990 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
5991 struct i2c_command cmd
;
5995 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
6000 cmd
.number_of_payloads
= num
;
6001 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
6004 for (i
= 0; i
< num
; i
++) {
6005 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
6006 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
6007 cmd
.payloads
[i
].length
= msgs
[i
].len
;
6008 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
6012 ddc_service
->ctx
->dc
,
6013 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
6017 kfree(cmd
.payloads
);
6021 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
6023 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
6026 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
6027 .master_xfer
= amdgpu_dm_i2c_xfer
,
6028 .functionality
= amdgpu_dm_i2c_func
,
6031 static struct amdgpu_i2c_adapter
*
6032 create_i2c(struct ddc_service
*ddc_service
,
6036 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
6037 struct amdgpu_i2c_adapter
*i2c
;
6039 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
6042 i2c
->base
.owner
= THIS_MODULE
;
6043 i2c
->base
.class = I2C_CLASS_DDC
;
6044 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
6045 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
6046 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
6047 i2c_set_adapdata(&i2c
->base
, i2c
);
6048 i2c
->ddc_service
= ddc_service
;
6049 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
6056 * Note: this function assumes that dc_link_detect() was called for the
6057 * dc_link which will be represented by this aconnector.
6059 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
6060 struct amdgpu_dm_connector
*aconnector
,
6061 uint32_t link_index
,
6062 struct amdgpu_encoder
*aencoder
)
6066 struct dc
*dc
= dm
->dc
;
6067 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
6068 struct amdgpu_i2c_adapter
*i2c
;
6070 link
->priv
= aconnector
;
6072 DRM_DEBUG_DRIVER("%s()\n", __func__
);
6074 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
6076 DRM_ERROR("Failed to create i2c adapter data\n");
6080 aconnector
->i2c
= i2c
;
6081 res
= i2c_add_adapter(&i2c
->base
);
6084 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
6088 connector_type
= to_drm_connector_type(link
->connector_signal
);
6090 res
= drm_connector_init_with_ddc(
6093 &amdgpu_dm_connector_funcs
,
6098 DRM_ERROR("connector_init failed\n");
6099 aconnector
->connector_id
= -1;
6103 drm_connector_helper_add(
6105 &amdgpu_dm_connector_helper_funcs
);
6107 amdgpu_dm_connector_init_helper(
6114 drm_connector_attach_encoder(
6115 &aconnector
->base
, &aencoder
->base
);
6117 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
6118 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
6119 amdgpu_dm_initialize_dp_connector(dm
, aconnector
, link
->link_index
);
6124 aconnector
->i2c
= NULL
;
6129 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
6131 switch (adev
->mode_info
.num_crtc
) {
6148 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
6149 struct amdgpu_encoder
*aencoder
,
6150 uint32_t link_index
)
6152 struct amdgpu_device
*adev
= dev
->dev_private
;
6154 int res
= drm_encoder_init(dev
,
6156 &amdgpu_dm_encoder_funcs
,
6157 DRM_MODE_ENCODER_TMDS
,
6160 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
6163 aencoder
->encoder_id
= link_index
;
6165 aencoder
->encoder_id
= -1;
6167 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
6172 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
6173 struct amdgpu_crtc
*acrtc
,
6177 * this is not correct translation but will work as soon as VBLANK
6178 * constant is the same as PFLIP
6181 amdgpu_display_crtc_idx_to_irq_type(
6186 drm_crtc_vblank_on(&acrtc
->base
);
6189 &adev
->pageflip_irq
,
6195 &adev
->pageflip_irq
,
6197 drm_crtc_vblank_off(&acrtc
->base
);
6202 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
6203 const struct dm_connector_state
*old_dm_state
)
6205 if (dm_state
->scaling
!= old_dm_state
->scaling
)
6207 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
6208 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
6210 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
6211 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
6213 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
6214 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
6219 #ifdef CONFIG_DRM_AMD_DC_HDCP
6220 static bool is_content_protection_different(struct drm_connector_state
*state
,
6221 const struct drm_connector_state
*old_state
,
6222 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
6224 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6226 if (old_state
->hdcp_content_type
!= state
->hdcp_content_type
&&
6227 state
->content_protection
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
6228 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6232 /* CP is being re enabled, ignore this */
6233 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
6234 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
6235 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
6239 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6240 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
6241 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
6242 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6244 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6245 * hot-plug, headless s3, dpms
6247 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
6248 aconnector
->dc_sink
!= NULL
)
6251 if (old_state
->content_protection
== state
->content_protection
)
6254 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
6261 static void remove_stream(struct amdgpu_device
*adev
,
6262 struct amdgpu_crtc
*acrtc
,
6263 struct dc_stream_state
*stream
)
6265 /* this is the update mode case */
6267 acrtc
->otg_inst
= -1;
6268 acrtc
->enabled
= false;
6271 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
6272 struct dc_cursor_position
*position
)
6274 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6276 int xorigin
= 0, yorigin
= 0;
6278 position
->enable
= false;
6282 if (!crtc
|| !plane
->state
->fb
)
6285 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
6286 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
6287 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6289 plane
->state
->crtc_w
,
6290 plane
->state
->crtc_h
);
6294 x
= plane
->state
->crtc_x
;
6295 y
= plane
->state
->crtc_y
;
6297 if (x
<= -amdgpu_crtc
->max_cursor_width
||
6298 y
<= -amdgpu_crtc
->max_cursor_height
)
6302 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
6306 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
6309 position
->enable
= true;
6310 position
->translate_by_source
= true;
6313 position
->x_hotspot
= xorigin
;
6314 position
->y_hotspot
= yorigin
;
6319 static void handle_cursor_update(struct drm_plane
*plane
,
6320 struct drm_plane_state
*old_plane_state
)
6322 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
6323 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
6324 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
6325 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
6326 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6327 uint64_t address
= afb
? afb
->address
: 0;
6328 struct dc_cursor_position position
;
6329 struct dc_cursor_attributes attributes
;
6332 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
6335 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6337 amdgpu_crtc
->crtc_id
,
6338 plane
->state
->crtc_w
,
6339 plane
->state
->crtc_h
);
6341 ret
= get_cursor_position(plane
, crtc
, &position
);
6345 if (!position
.enable
) {
6346 /* turn off cursor */
6347 if (crtc_state
&& crtc_state
->stream
) {
6348 mutex_lock(&adev
->dm
.dc_lock
);
6349 dc_stream_set_cursor_position(crtc_state
->stream
,
6351 mutex_unlock(&adev
->dm
.dc_lock
);
6356 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
6357 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
6359 memset(&attributes
, 0, sizeof(attributes
));
6360 attributes
.address
.high_part
= upper_32_bits(address
);
6361 attributes
.address
.low_part
= lower_32_bits(address
);
6362 attributes
.width
= plane
->state
->crtc_w
;
6363 attributes
.height
= plane
->state
->crtc_h
;
6364 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
6365 attributes
.rotation_angle
= 0;
6366 attributes
.attribute_flags
.value
= 0;
6368 attributes
.pitch
= attributes
.width
;
6370 if (crtc_state
->stream
) {
6371 mutex_lock(&adev
->dm
.dc_lock
);
6372 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
6374 DRM_ERROR("DC failed to set cursor attributes\n");
6376 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
6378 DRM_ERROR("DC failed to set cursor position\n");
6379 mutex_unlock(&adev
->dm
.dc_lock
);
6383 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
6386 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
6387 WARN_ON(acrtc
->event
);
6389 acrtc
->event
= acrtc
->base
.state
->event
;
6391 /* Set the flip status */
6392 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
6394 /* Mark this event as consumed */
6395 acrtc
->base
.state
->event
= NULL
;
6397 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6401 static void update_freesync_state_on_stream(
6402 struct amdgpu_display_manager
*dm
,
6403 struct dm_crtc_state
*new_crtc_state
,
6404 struct dc_stream_state
*new_stream
,
6405 struct dc_plane_state
*surface
,
6406 u32 flip_timestamp_in_us
)
6408 struct mod_vrr_params vrr_params
;
6409 struct dc_info_packet vrr_infopacket
= {0};
6410 struct amdgpu_device
*adev
= dm
->adev
;
6411 unsigned long flags
;
6417 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6418 * For now it's sufficient to just guard against these conditions.
6421 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6424 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6425 vrr_params
= new_crtc_state
->vrr_params
;
6428 mod_freesync_handle_preflip(
6429 dm
->freesync_module
,
6432 flip_timestamp_in_us
,
6435 if (adev
->family
< AMDGPU_FAMILY_AI
&&
6436 amdgpu_dm_vrr_active(new_crtc_state
)) {
6437 mod_freesync_handle_v_update(dm
->freesync_module
,
6438 new_stream
, &vrr_params
);
6440 /* Need to call this before the frame ends. */
6441 dc_stream_adjust_vmin_vmax(dm
->dc
,
6442 new_crtc_state
->stream
,
6443 &vrr_params
.adjust
);
6447 mod_freesync_build_vrr_infopacket(
6448 dm
->freesync_module
,
6452 TRANSFER_FUNC_UNKNOWN
,
6455 new_crtc_state
->freesync_timing_changed
|=
6456 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6458 sizeof(vrr_params
.adjust
)) != 0);
6460 new_crtc_state
->freesync_vrr_info_changed
|=
6461 (memcmp(&new_crtc_state
->vrr_infopacket
,
6463 sizeof(vrr_infopacket
)) != 0);
6465 new_crtc_state
->vrr_params
= vrr_params
;
6466 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
6468 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
6469 new_stream
->vrr_infopacket
= vrr_infopacket
;
6471 if (new_crtc_state
->freesync_vrr_info_changed
)
6472 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6473 new_crtc_state
->base
.crtc
->base
.id
,
6474 (int)new_crtc_state
->base
.vrr_enabled
,
6475 (int)vrr_params
.state
);
6477 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6480 static void pre_update_freesync_state_on_stream(
6481 struct amdgpu_display_manager
*dm
,
6482 struct dm_crtc_state
*new_crtc_state
)
6484 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
6485 struct mod_vrr_params vrr_params
;
6486 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
6487 struct amdgpu_device
*adev
= dm
->adev
;
6488 unsigned long flags
;
6494 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6495 * For now it's sufficient to just guard against these conditions.
6497 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6500 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6501 vrr_params
= new_crtc_state
->vrr_params
;
6503 if (new_crtc_state
->vrr_supported
&&
6504 config
.min_refresh_in_uhz
&&
6505 config
.max_refresh_in_uhz
) {
6506 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6507 VRR_STATE_ACTIVE_VARIABLE
:
6510 config
.state
= VRR_STATE_UNSUPPORTED
;
6513 mod_freesync_build_vrr_params(dm
->freesync_module
,
6515 &config
, &vrr_params
);
6517 new_crtc_state
->freesync_timing_changed
|=
6518 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6520 sizeof(vrr_params
.adjust
)) != 0);
6522 new_crtc_state
->vrr_params
= vrr_params
;
6523 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6526 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
6527 struct dm_crtc_state
*new_state
)
6529 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
6530 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
6532 if (!old_vrr_active
&& new_vrr_active
) {
6533 /* Transition VRR inactive -> active:
6534 * While VRR is active, we must not disable vblank irq, as a
6535 * reenable after disable would compute bogus vblank/pflip
6536 * timestamps if it likely happened inside display front-porch.
6538 * We also need vupdate irq for the actual core vblank handling
6541 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
6542 drm_crtc_vblank_get(new_state
->base
.crtc
);
6543 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6544 __func__
, new_state
->base
.crtc
->base
.id
);
6545 } else if (old_vrr_active
&& !new_vrr_active
) {
6546 /* Transition VRR active -> inactive:
6547 * Allow vblank irq disable again for fixed refresh rate.
6549 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
6550 drm_crtc_vblank_put(new_state
->base
.crtc
);
6551 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6552 __func__
, new_state
->base
.crtc
->base
.id
);
6556 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
6558 struct drm_plane
*plane
;
6559 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6563 * TODO: Make this per-stream so we don't issue redundant updates for
6564 * commits with multiple streams.
6566 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
6568 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6569 handle_cursor_update(plane
, old_plane_state
);
6572 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
6573 struct dc_state
*dc_state
,
6574 struct drm_device
*dev
,
6575 struct amdgpu_display_manager
*dm
,
6576 struct drm_crtc
*pcrtc
,
6577 bool wait_for_vblank
)
6580 uint64_t timestamp_ns
;
6581 struct drm_plane
*plane
;
6582 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6583 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
6584 struct drm_crtc_state
*new_pcrtc_state
=
6585 drm_atomic_get_new_crtc_state(state
, pcrtc
);
6586 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
6587 struct dm_crtc_state
*dm_old_crtc_state
=
6588 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
6589 int planes_count
= 0, vpos
, hpos
;
6591 unsigned long flags
;
6592 struct amdgpu_bo
*abo
;
6593 uint64_t tiling_flags
;
6594 uint32_t target_vblank
, last_flip_vblank
;
6595 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
6596 bool pflip_present
= false;
6598 struct dc_surface_update surface_updates
[MAX_SURFACES
];
6599 struct dc_plane_info plane_infos
[MAX_SURFACES
];
6600 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
6601 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
6602 struct dc_stream_update stream_update
;
6605 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
6608 dm_error("Failed to allocate update bundle\n");
6613 * Disable the cursor first if we're disabling all the planes.
6614 * It'll remain on the screen after the planes are re-enabled
6617 if (acrtc_state
->active_planes
== 0)
6618 amdgpu_dm_commit_cursors(state
);
6620 /* update planes when needed */
6621 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
6622 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
6623 struct drm_crtc_state
*new_crtc_state
;
6624 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
6625 bool plane_needs_flip
;
6626 struct dc_plane_state
*dc_plane
;
6627 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
6629 /* Cursor plane is handled after stream updates */
6630 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6633 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
6636 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
6637 if (!new_crtc_state
->active
)
6640 dc_plane
= dm_new_plane_state
->dc_state
;
6642 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6643 if (new_pcrtc_state
->color_mgmt_changed
) {
6644 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
6645 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
6648 fill_dc_scaling_info(new_plane_state
,
6649 &bundle
->scaling_infos
[planes_count
]);
6651 bundle
->surface_updates
[planes_count
].scaling_info
=
6652 &bundle
->scaling_infos
[planes_count
];
6654 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
6656 pflip_present
= pflip_present
|| plane_needs_flip
;
6658 if (!plane_needs_flip
) {
6663 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
6666 * Wait for all fences on this FB. Do limited wait to avoid
6667 * deadlock during GPU reset when this fence will not signal
6668 * but we hold reservation lock for the BO.
6670 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
6672 msecs_to_jiffies(5000));
6673 if (unlikely(r
<= 0))
6674 DRM_ERROR("Waiting for fences timed out!");
6677 * TODO This might fail and hence better not used, wait
6678 * explicitly on fences instead
6679 * and in general should be called for
6680 * blocking commit to as per framework helpers
6682 r
= amdgpu_bo_reserve(abo
, true);
6683 if (unlikely(r
!= 0))
6684 DRM_ERROR("failed to reserve buffer before flip\n");
6686 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
6688 amdgpu_bo_unreserve(abo
);
6690 fill_dc_plane_info_and_addr(
6691 dm
->adev
, new_plane_state
, tiling_flags
,
6692 &bundle
->plane_infos
[planes_count
],
6693 &bundle
->flip_addrs
[planes_count
].address
,
6696 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6697 new_plane_state
->plane
->index
,
6698 bundle
->plane_infos
[planes_count
].dcc
.enable
);
6700 bundle
->surface_updates
[planes_count
].plane_info
=
6701 &bundle
->plane_infos
[planes_count
];
6704 * Only allow immediate flips for fast updates that don't
6705 * change FB pitch, DCC state, rotation or mirroing.
6707 bundle
->flip_addrs
[planes_count
].flip_immediate
=
6708 crtc
->state
->async_flip
&&
6709 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
6711 timestamp_ns
= ktime_get_ns();
6712 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
6713 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
6714 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6716 if (!bundle
->surface_updates
[planes_count
].surface
) {
6717 DRM_ERROR("No surface for CRTC: id=%d\n",
6718 acrtc_attach
->crtc_id
);
6722 if (plane
== pcrtc
->primary
)
6723 update_freesync_state_on_stream(
6726 acrtc_state
->stream
,
6728 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
6730 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6732 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
6733 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
6739 if (pflip_present
) {
6741 /* Use old throttling in non-vrr fixed refresh rate mode
6742 * to keep flip scheduling based on target vblank counts
6743 * working in a backwards compatible way, e.g., for
6744 * clients using the GLX_OML_sync_control extension or
6745 * DRI3/Present extension with defined target_msc.
6747 last_flip_vblank
= amdgpu_get_vblank_counter_kms(pcrtc
);
6750 /* For variable refresh rate mode only:
6751 * Get vblank of last completed flip to avoid > 1 vrr
6752 * flips per video frame by use of throttling, but allow
6753 * flip programming anywhere in the possibly large
6754 * variable vrr vblank interval for fine-grained flip
6755 * timing control and more opportunity to avoid stutter
6756 * on late submission of flips.
6758 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6759 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
6760 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6763 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
6766 * Wait until we're out of the vertical blank period before the one
6767 * targeted by the flip
6769 while ((acrtc_attach
->enabled
&&
6770 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
6771 0, &vpos
, &hpos
, NULL
,
6772 NULL
, &pcrtc
->hwmode
)
6773 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
6774 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
6775 (int)(target_vblank
-
6776 amdgpu_get_vblank_counter_kms(pcrtc
)) > 0)) {
6777 usleep_range(1000, 1100);
6780 if (acrtc_attach
->base
.state
->event
) {
6781 drm_crtc_vblank_get(pcrtc
);
6783 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6785 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
6786 prepare_flip_isr(acrtc_attach
);
6788 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6791 if (acrtc_state
->stream
) {
6792 if (acrtc_state
->freesync_vrr_info_changed
)
6793 bundle
->stream_update
.vrr_infopacket
=
6794 &acrtc_state
->stream
->vrr_infopacket
;
6798 /* Update the planes if changed or disable if we don't have any. */
6799 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
6800 acrtc_state
->stream
) {
6801 bundle
->stream_update
.stream
= acrtc_state
->stream
;
6802 if (new_pcrtc_state
->mode_changed
) {
6803 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
6804 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
6807 if (new_pcrtc_state
->color_mgmt_changed
) {
6809 * TODO: This isn't fully correct since we've actually
6810 * already modified the stream in place.
6812 bundle
->stream_update
.gamut_remap
=
6813 &acrtc_state
->stream
->gamut_remap_matrix
;
6814 bundle
->stream_update
.output_csc_transform
=
6815 &acrtc_state
->stream
->csc_color_matrix
;
6816 bundle
->stream_update
.out_transfer_func
=
6817 acrtc_state
->stream
->out_transfer_func
;
6820 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
6821 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
6822 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
6825 * If FreeSync state on the stream has changed then we need to
6826 * re-adjust the min/max bounds now that DC doesn't handle this
6827 * as part of commit.
6829 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
6830 amdgpu_dm_vrr_active(acrtc_state
)) {
6831 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6832 dc_stream_adjust_vmin_vmax(
6833 dm
->dc
, acrtc_state
->stream
,
6834 &acrtc_state
->vrr_params
.adjust
);
6835 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6837 mutex_lock(&dm
->dc_lock
);
6838 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6839 acrtc_state
->stream
->link
->psr_allow_active
)
6840 amdgpu_dm_psr_disable(acrtc_state
->stream
);
6842 dc_commit_updates_for_stream(dm
->dc
,
6843 bundle
->surface_updates
,
6845 acrtc_state
->stream
,
6846 &bundle
->stream_update
,
6849 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6850 acrtc_state
->stream
->psr_version
&&
6851 !acrtc_state
->stream
->link
->psr_feature_enabled
)
6852 amdgpu_dm_link_setup_psr(acrtc_state
->stream
);
6853 else if ((acrtc_state
->update_type
== UPDATE_TYPE_FAST
) &&
6854 acrtc_state
->stream
->link
->psr_feature_enabled
&&
6855 !acrtc_state
->stream
->link
->psr_allow_active
) {
6856 amdgpu_dm_psr_enable(acrtc_state
->stream
);
6859 mutex_unlock(&dm
->dc_lock
);
6863 * Update cursor state *after* programming all the planes.
6864 * This avoids redundant programming in the case where we're going
6865 * to be disabling a single plane - those pipes are being disabled.
6867 if (acrtc_state
->active_planes
)
6868 amdgpu_dm_commit_cursors(state
);
6874 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
6875 struct drm_atomic_state
*state
)
6877 struct amdgpu_device
*adev
= dev
->dev_private
;
6878 struct amdgpu_dm_connector
*aconnector
;
6879 struct drm_connector
*connector
;
6880 struct drm_connector_state
*old_con_state
, *new_con_state
;
6881 struct drm_crtc_state
*new_crtc_state
;
6882 struct dm_crtc_state
*new_dm_crtc_state
;
6883 const struct dc_stream_status
*status
;
6886 /* Notify device removals. */
6887 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6888 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
6889 /* CRTC changes require notification. */
6893 if (!new_con_state
->crtc
)
6896 new_crtc_state
= drm_atomic_get_new_crtc_state(
6897 state
, new_con_state
->crtc
);
6899 if (!new_crtc_state
)
6902 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6906 aconnector
= to_amdgpu_dm_connector(connector
);
6908 mutex_lock(&adev
->dm
.audio_lock
);
6909 inst
= aconnector
->audio_inst
;
6910 aconnector
->audio_inst
= -1;
6911 mutex_unlock(&adev
->dm
.audio_lock
);
6913 amdgpu_dm_audio_eld_notify(adev
, inst
);
6916 /* Notify audio device additions. */
6917 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
6918 if (!new_con_state
->crtc
)
6921 new_crtc_state
= drm_atomic_get_new_crtc_state(
6922 state
, new_con_state
->crtc
);
6924 if (!new_crtc_state
)
6927 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6930 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6931 if (!new_dm_crtc_state
->stream
)
6934 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
6938 aconnector
= to_amdgpu_dm_connector(connector
);
6940 mutex_lock(&adev
->dm
.audio_lock
);
6941 inst
= status
->audio_inst
;
6942 aconnector
->audio_inst
= inst
;
6943 mutex_unlock(&adev
->dm
.audio_lock
);
6945 amdgpu_dm_audio_eld_notify(adev
, inst
);
6950 * Enable interrupts on CRTCs that are newly active, undergone
6951 * a modeset, or have active planes again.
6953 * Done in two passes, based on the for_modeset flag:
6954 * Pass 1: For CRTCs going through modeset
6955 * Pass 2: For CRTCs going from 0 to n active planes
6957 * Interrupts can only be enabled after the planes are programmed,
6958 * so this requires a two-pass approach since we don't want to
6959 * just defer the interrupts until after commit planes every time.
6961 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device
*dev
,
6962 struct drm_atomic_state
*state
,
6965 struct amdgpu_device
*adev
= dev
->dev_private
;
6966 struct drm_crtc
*crtc
;
6967 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6969 #ifdef CONFIG_DEBUG_FS
6970 enum amdgpu_dm_pipe_crc_source source
;
6973 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
6974 new_crtc_state
, i
) {
6975 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6976 struct dm_crtc_state
*dm_new_crtc_state
=
6977 to_dm_crtc_state(new_crtc_state
);
6978 struct dm_crtc_state
*dm_old_crtc_state
=
6979 to_dm_crtc_state(old_crtc_state
);
6980 bool modeset
= drm_atomic_crtc_needs_modeset(new_crtc_state
);
6983 run_pass
= (for_modeset
&& modeset
) ||
6984 (!for_modeset
&& !modeset
&&
6985 !dm_old_crtc_state
->interrupts_enabled
);
6990 if (!dm_new_crtc_state
->interrupts_enabled
)
6993 manage_dm_interrupts(adev
, acrtc
, true);
6995 #ifdef CONFIG_DEBUG_FS
6996 /* The stream has changed so CRC capture needs to re-enabled. */
6997 source
= dm_new_crtc_state
->crc_src
;
6998 if (amdgpu_dm_is_valid_crc_source(source
)) {
6999 amdgpu_dm_crtc_configure_crc_source(
7000 crtc
, dm_new_crtc_state
,
7001 dm_new_crtc_state
->crc_src
);
7008 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7009 * @crtc_state: the DRM CRTC state
7010 * @stream_state: the DC stream state.
7012 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7013 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7015 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
7016 struct dc_stream_state
*stream_state
)
7018 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
7021 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
7022 struct drm_atomic_state
*state
,
7025 struct drm_crtc
*crtc
;
7026 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7027 struct amdgpu_device
*adev
= dev
->dev_private
;
7031 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7032 * a modeset, being disabled, or have no active planes.
7034 * It's done in atomic commit rather than commit tail for now since
7035 * some of these interrupt handlers access the current CRTC state and
7036 * potentially the stream pointer itself.
7038 * Since the atomic state is swapped within atomic commit and not within
7039 * commit tail this would leave to new state (that hasn't been committed yet)
7040 * being accesssed from within the handlers.
7042 * TODO: Fix this so we can do this in commit tail and not have to block
7045 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7046 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7047 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7048 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7050 if (dm_old_crtc_state
->interrupts_enabled
&&
7051 (!dm_new_crtc_state
->interrupts_enabled
||
7052 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
7053 manage_dm_interrupts(adev
, acrtc
, false);
7056 * Add check here for SoC's that support hardware cursor plane, to
7057 * unset legacy_cursor_update
7060 return drm_atomic_helper_commit(dev
, state
, nonblock
);
7062 /*TODO Handle EINTR, reenable IRQ*/
7066 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7067 * @state: The atomic state to commit
7069 * This will tell DC to commit the constructed DC state from atomic_check,
7070 * programming the hardware. Any failures here implies a hardware failure, since
7071 * atomic check should have filtered anything non-kosher.
7073 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
7075 struct drm_device
*dev
= state
->dev
;
7076 struct amdgpu_device
*adev
= dev
->dev_private
;
7077 struct amdgpu_display_manager
*dm
= &adev
->dm
;
7078 struct dm_atomic_state
*dm_state
;
7079 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
7081 struct drm_crtc
*crtc
;
7082 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7083 unsigned long flags
;
7084 bool wait_for_vblank
= true;
7085 struct drm_connector
*connector
;
7086 struct drm_connector_state
*old_con_state
, *new_con_state
;
7087 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7088 int crtc_disable_count
= 0;
7090 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
7092 dm_state
= dm_atomic_get_new_state(state
);
7093 if (dm_state
&& dm_state
->context
) {
7094 dc_state
= dm_state
->context
;
7096 /* No state changes, retain current state. */
7097 dc_state_temp
= dc_create_state(dm
->dc
);
7098 ASSERT(dc_state_temp
);
7099 dc_state
= dc_state_temp
;
7100 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
7103 /* update changed items */
7104 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7105 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7107 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7108 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7111 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7112 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7113 "connectors_changed:%d\n",
7115 new_crtc_state
->enable
,
7116 new_crtc_state
->active
,
7117 new_crtc_state
->planes_changed
,
7118 new_crtc_state
->mode_changed
,
7119 new_crtc_state
->active_changed
,
7120 new_crtc_state
->connectors_changed
);
7122 /* Copy all transient state flags into dc state */
7123 if (dm_new_crtc_state
->stream
) {
7124 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
7125 dm_new_crtc_state
->stream
);
7128 /* handles headless hotplug case, updating new_state and
7129 * aconnector as needed
7132 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
7134 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
7136 if (!dm_new_crtc_state
->stream
) {
7138 * this could happen because of issues with
7139 * userspace notifications delivery.
7140 * In this case userspace tries to set mode on
7141 * display which is disconnected in fact.
7142 * dc_sink is NULL in this case on aconnector.
7143 * We expect reset mode will come soon.
7145 * This can also happen when unplug is done
7146 * during resume sequence ended
7148 * In this case, we want to pretend we still
7149 * have a sink to keep the pipe running so that
7150 * hw state is consistent with the sw state
7152 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7153 __func__
, acrtc
->base
.base
.id
);
7157 if (dm_old_crtc_state
->stream
)
7158 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7160 pm_runtime_get_noresume(dev
->dev
);
7162 acrtc
->enabled
= true;
7163 acrtc
->hw_mode
= new_crtc_state
->mode
;
7164 crtc
->hwmode
= new_crtc_state
->mode
;
7165 } else if (modereset_required(new_crtc_state
)) {
7166 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
7167 /* i.e. reset mode */
7168 if (dm_old_crtc_state
->stream
) {
7169 if (dm_old_crtc_state
->stream
->link
->psr_allow_active
)
7170 amdgpu_dm_psr_disable(dm_old_crtc_state
->stream
);
7172 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7175 } /* for_each_crtc_in_state() */
7178 dm_enable_per_frame_crtc_master_sync(dc_state
);
7179 mutex_lock(&dm
->dc_lock
);
7180 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
7181 mutex_unlock(&dm
->dc_lock
);
7184 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7185 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7187 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7189 if (dm_new_crtc_state
->stream
!= NULL
) {
7190 const struct dc_stream_status
*status
=
7191 dc_stream_get_status(dm_new_crtc_state
->stream
);
7194 status
= dc_stream_get_status_from_state(dc_state
,
7195 dm_new_crtc_state
->stream
);
7198 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
7200 acrtc
->otg_inst
= status
->primary_otg_inst
;
7203 #ifdef CONFIG_DRM_AMD_DC_HDCP
7204 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7205 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7206 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7207 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7209 new_crtc_state
= NULL
;
7212 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7214 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7216 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
7217 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
7218 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
7219 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
7223 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
7224 hdcp_update_display(
7225 adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
, aconnector
,
7226 new_con_state
->hdcp_content_type
,
7227 new_con_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
? true
7232 /* Handle connector state changes */
7233 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7234 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7235 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
7236 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7237 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
7238 struct dc_stream_update stream_update
;
7239 struct dc_info_packet hdr_packet
;
7240 struct dc_stream_status
*status
= NULL
;
7241 bool abm_changed
, hdr_changed
, scaling_changed
;
7243 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
7244 memset(&stream_update
, 0, sizeof(stream_update
));
7247 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7248 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
7251 /* Skip any modesets/resets */
7252 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
7255 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7256 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7258 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
7261 abm_changed
= dm_new_crtc_state
->abm_level
!=
7262 dm_old_crtc_state
->abm_level
;
7265 is_hdr_metadata_different(old_con_state
, new_con_state
);
7267 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
7270 stream_update
.stream
= dm_new_crtc_state
->stream
;
7271 if (scaling_changed
) {
7272 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
7273 dm_new_con_state
, dm_new_crtc_state
->stream
);
7275 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
7276 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
7280 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
7282 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
7286 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
7287 stream_update
.hdr_static_metadata
= &hdr_packet
;
7290 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
7292 WARN_ON(!status
->plane_count
);
7295 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7296 * Here we create an empty update on each plane.
7297 * To fix this, DC should permit updating only stream properties.
7299 for (j
= 0; j
< status
->plane_count
; j
++)
7300 dummy_updates
[j
].surface
= status
->plane_states
[0];
7303 mutex_lock(&dm
->dc_lock
);
7304 dc_commit_updates_for_stream(dm
->dc
,
7306 status
->plane_count
,
7307 dm_new_crtc_state
->stream
,
7310 mutex_unlock(&dm
->dc_lock
);
7313 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7314 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
7315 new_crtc_state
, i
) {
7316 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
7317 crtc_disable_count
++;
7319 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7320 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7322 /* Update freesync active state. */
7323 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
7325 /* Handle vrr on->off / off->on transitions */
7326 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
7330 /* Enable interrupts for CRTCs going through a modeset. */
7331 amdgpu_dm_enable_crtc_interrupts(dev
, state
, true);
7333 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
7334 if (new_crtc_state
->async_flip
)
7335 wait_for_vblank
= false;
7337 /* update planes when needed per crtc*/
7338 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
7339 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7341 if (dm_new_crtc_state
->stream
)
7342 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
7343 dm
, crtc
, wait_for_vblank
);
7346 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7347 amdgpu_dm_enable_crtc_interrupts(dev
, state
, false);
7349 /* Update audio instances for each connector. */
7350 amdgpu_dm_commit_audio(dev
, state
);
7353 * send vblank event on all events not handled in flip and
7354 * mark consumed event for drm_atomic_helper_commit_hw_done
7356 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
7357 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7359 if (new_crtc_state
->event
)
7360 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
7362 new_crtc_state
->event
= NULL
;
7364 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
7366 /* Signal HW programming completion */
7367 drm_atomic_helper_commit_hw_done(state
);
7369 if (wait_for_vblank
)
7370 drm_atomic_helper_wait_for_flip_done(dev
, state
);
7372 drm_atomic_helper_cleanup_planes(dev
, state
);
7375 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7376 * so we can put the GPU into runtime suspend if we're not driving any
7379 for (i
= 0; i
< crtc_disable_count
; i
++)
7380 pm_runtime_put_autosuspend(dev
->dev
);
7381 pm_runtime_mark_last_busy(dev
->dev
);
7384 dc_release_state(dc_state_temp
);
7388 static int dm_force_atomic_commit(struct drm_connector
*connector
)
7391 struct drm_device
*ddev
= connector
->dev
;
7392 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
7393 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7394 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
7395 struct drm_connector_state
*conn_state
;
7396 struct drm_crtc_state
*crtc_state
;
7397 struct drm_plane_state
*plane_state
;
7402 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
7404 /* Construct an atomic state to restore previous display setting */
7407 * Attach connectors to drm_atomic_state
7409 conn_state
= drm_atomic_get_connector_state(state
, connector
);
7411 ret
= PTR_ERR_OR_ZERO(conn_state
);
7415 /* Attach crtc to drm_atomic_state*/
7416 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
7418 ret
= PTR_ERR_OR_ZERO(crtc_state
);
7422 /* force a restore */
7423 crtc_state
->mode_changed
= true;
7425 /* Attach plane to drm_atomic_state */
7426 plane_state
= drm_atomic_get_plane_state(state
, plane
);
7428 ret
= PTR_ERR_OR_ZERO(plane_state
);
7433 /* Call commit internally with the state we just constructed */
7434 ret
= drm_atomic_commit(state
);
7439 DRM_ERROR("Restoring old state failed with %i\n", ret
);
7440 drm_atomic_state_put(state
);
7446 * This function handles all cases when set mode does not come upon hotplug.
7447 * This includes when a display is unplugged then plugged back into the
7448 * same port and when running without usermode desktop manager supprot
7450 void dm_restore_drm_connector_state(struct drm_device
*dev
,
7451 struct drm_connector
*connector
)
7453 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7454 struct amdgpu_crtc
*disconnected_acrtc
;
7455 struct dm_crtc_state
*acrtc_state
;
7457 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
7460 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7461 if (!disconnected_acrtc
)
7464 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
7465 if (!acrtc_state
->stream
)
7469 * If the previous sink is not released and different from the current,
7470 * we deduce we are in a state where we can not rely on usermode call
7471 * to turn on the display, so we do it here
7473 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
7474 dm_force_atomic_commit(&aconnector
->base
);
7478 * Grabs all modesetting locks to serialize against any blocking commits,
7479 * Waits for completion of all non blocking commits.
7481 static int do_aquire_global_lock(struct drm_device
*dev
,
7482 struct drm_atomic_state
*state
)
7484 struct drm_crtc
*crtc
;
7485 struct drm_crtc_commit
*commit
;
7489 * Adding all modeset locks to aquire_ctx will
7490 * ensure that when the framework release it the
7491 * extra locks we are locking here will get released to
7493 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
7497 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7498 spin_lock(&crtc
->commit_lock
);
7499 commit
= list_first_entry_or_null(&crtc
->commit_list
,
7500 struct drm_crtc_commit
, commit_entry
);
7502 drm_crtc_commit_get(commit
);
7503 spin_unlock(&crtc
->commit_lock
);
7509 * Make sure all pending HW programming completed and
7512 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
7515 ret
= wait_for_completion_interruptible_timeout(
7516 &commit
->flip_done
, 10*HZ
);
7519 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7520 "timed out\n", crtc
->base
.id
, crtc
->name
);
7522 drm_crtc_commit_put(commit
);
7525 return ret
< 0 ? ret
: 0;
7528 static void get_freesync_config_for_crtc(
7529 struct dm_crtc_state
*new_crtc_state
,
7530 struct dm_connector_state
*new_con_state
)
7532 struct mod_freesync_config config
= {0};
7533 struct amdgpu_dm_connector
*aconnector
=
7534 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
7535 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
7536 int vrefresh
= drm_mode_vrefresh(mode
);
7538 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
7539 vrefresh
>= aconnector
->min_vfreq
&&
7540 vrefresh
<= aconnector
->max_vfreq
;
7542 if (new_crtc_state
->vrr_supported
) {
7543 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
7544 config
.state
= new_crtc_state
->base
.vrr_enabled
?
7545 VRR_STATE_ACTIVE_VARIABLE
:
7547 config
.min_refresh_in_uhz
=
7548 aconnector
->min_vfreq
* 1000000;
7549 config
.max_refresh_in_uhz
=
7550 aconnector
->max_vfreq
* 1000000;
7551 config
.vsif_supported
= true;
7555 new_crtc_state
->freesync_config
= config
;
7558 static void reset_freesync_config_for_crtc(
7559 struct dm_crtc_state
*new_crtc_state
)
7561 new_crtc_state
->vrr_supported
= false;
7563 memset(&new_crtc_state
->vrr_params
, 0,
7564 sizeof(new_crtc_state
->vrr_params
));
7565 memset(&new_crtc_state
->vrr_infopacket
, 0,
7566 sizeof(new_crtc_state
->vrr_infopacket
));
7569 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
7570 struct drm_atomic_state
*state
,
7571 struct drm_crtc
*crtc
,
7572 struct drm_crtc_state
*old_crtc_state
,
7573 struct drm_crtc_state
*new_crtc_state
,
7575 bool *lock_and_validation_needed
)
7577 struct dm_atomic_state
*dm_state
= NULL
;
7578 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7579 struct dc_stream_state
*new_stream
;
7583 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7584 * update changed items
7586 struct amdgpu_crtc
*acrtc
= NULL
;
7587 struct amdgpu_dm_connector
*aconnector
= NULL
;
7588 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
7589 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
7593 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7594 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7595 acrtc
= to_amdgpu_crtc(crtc
);
7596 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
7598 /* TODO This hack should go away */
7599 if (aconnector
&& enable
) {
7600 /* Make sure fake sink is created in plug-in scenario */
7601 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
7603 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
7606 if (IS_ERR(drm_new_conn_state
)) {
7607 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
7611 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
7612 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
7614 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7617 new_stream
= create_stream_for_sink(aconnector
,
7618 &new_crtc_state
->mode
,
7620 dm_old_crtc_state
->stream
);
7623 * we can have no stream on ACTION_SET if a display
7624 * was disconnected during S3, in this case it is not an
7625 * error, the OS will be updated after detection, and
7626 * will do the right thing on next atomic commit
7630 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7631 __func__
, acrtc
->base
.base
.id
);
7636 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7638 ret
= fill_hdr_info_packet(drm_new_conn_state
,
7639 &new_stream
->hdr_static_metadata
);
7644 * If we already removed the old stream from the context
7645 * (and set the new stream to NULL) then we can't reuse
7646 * the old stream even if the stream and scaling are unchanged.
7647 * We'll hit the BUG_ON and black screen.
7649 * TODO: Refactor this function to allow this check to work
7650 * in all conditions.
7652 if (dm_new_crtc_state
->stream
&&
7653 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
7654 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
7655 new_crtc_state
->mode_changed
= false;
7656 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7657 new_crtc_state
->mode_changed
);
7661 /* mode_changed flag may get updated above, need to check again */
7662 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7666 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7667 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7668 "connectors_changed:%d\n",
7670 new_crtc_state
->enable
,
7671 new_crtc_state
->active
,
7672 new_crtc_state
->planes_changed
,
7673 new_crtc_state
->mode_changed
,
7674 new_crtc_state
->active_changed
,
7675 new_crtc_state
->connectors_changed
);
7677 /* Remove stream for any changed/disabled CRTC */
7680 if (!dm_old_crtc_state
->stream
)
7683 ret
= dm_atomic_get_state(state
, &dm_state
);
7687 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7690 /* i.e. reset mode */
7691 if (dc_remove_stream_from_ctx(
7694 dm_old_crtc_state
->stream
) != DC_OK
) {
7699 dc_stream_release(dm_old_crtc_state
->stream
);
7700 dm_new_crtc_state
->stream
= NULL
;
7702 reset_freesync_config_for_crtc(dm_new_crtc_state
);
7704 *lock_and_validation_needed
= true;
7706 } else {/* Add stream for any updated/enabled CRTC */
7708 * Quick fix to prevent NULL pointer on new_stream when
7709 * added MST connectors not found in existing crtc_state in the chained mode
7710 * TODO: need to dig out the root cause of that
7712 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
7715 if (modereset_required(new_crtc_state
))
7718 if (modeset_required(new_crtc_state
, new_stream
,
7719 dm_old_crtc_state
->stream
)) {
7721 WARN_ON(dm_new_crtc_state
->stream
);
7723 ret
= dm_atomic_get_state(state
, &dm_state
);
7727 dm_new_crtc_state
->stream
= new_stream
;
7729 dc_stream_retain(new_stream
);
7731 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7734 if (dc_add_stream_to_ctx(
7737 dm_new_crtc_state
->stream
) != DC_OK
) {
7742 *lock_and_validation_needed
= true;
7747 /* Release extra reference */
7749 dc_stream_release(new_stream
);
7752 * We want to do dc stream updates that do not require a
7753 * full modeset below.
7755 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
7756 new_crtc_state
->active
))
7759 * Given above conditions, the dc state cannot be NULL because:
7760 * 1. We're in the process of enabling CRTCs (just been added
7761 * to the dc context, or already is on the context)
7762 * 2. Has a valid connector attached, and
7763 * 3. Is currently active and enabled.
7764 * => The dc stream state currently exists.
7766 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
7768 /* Scaling or underscan settings */
7769 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
7770 update_stream_scaling_settings(
7771 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
7774 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7777 * Color management settings. We also update color properties
7778 * when a modeset is needed, to ensure it gets reprogrammed.
7780 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
7781 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
7782 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
7787 /* Update Freesync settings. */
7788 get_freesync_config_for_crtc(dm_new_crtc_state
,
7795 dc_stream_release(new_stream
);
7799 static bool should_reset_plane(struct drm_atomic_state
*state
,
7800 struct drm_plane
*plane
,
7801 struct drm_plane_state
*old_plane_state
,
7802 struct drm_plane_state
*new_plane_state
)
7804 struct drm_plane
*other
;
7805 struct drm_plane_state
*old_other_state
, *new_other_state
;
7806 struct drm_crtc_state
*new_crtc_state
;
7810 * TODO: Remove this hack once the checks below are sufficient
7811 * enough to determine when we need to reset all the planes on
7814 if (state
->allow_modeset
)
7817 /* Exit early if we know that we're adding or removing the plane. */
7818 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
7821 /* old crtc == new_crtc == NULL, plane not in context. */
7822 if (!new_plane_state
->crtc
)
7826 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
7828 if (!new_crtc_state
)
7831 /* CRTC Degamma changes currently require us to recreate planes. */
7832 if (new_crtc_state
->color_mgmt_changed
)
7835 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
7839 * If there are any new primary or overlay planes being added or
7840 * removed then the z-order can potentially change. To ensure
7841 * correct z-order and pipe acquisition the current DC architecture
7842 * requires us to remove and recreate all existing planes.
7844 * TODO: Come up with a more elegant solution for this.
7846 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
7847 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
7850 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
7851 new_other_state
->crtc
!= new_plane_state
->crtc
)
7854 if (old_other_state
->crtc
!= new_other_state
->crtc
)
7857 /* TODO: Remove this once we can handle fast format changes. */
7858 if (old_other_state
->fb
&& new_other_state
->fb
&&
7859 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
7866 static int dm_update_plane_state(struct dc
*dc
,
7867 struct drm_atomic_state
*state
,
7868 struct drm_plane
*plane
,
7869 struct drm_plane_state
*old_plane_state
,
7870 struct drm_plane_state
*new_plane_state
,
7872 bool *lock_and_validation_needed
)
7875 struct dm_atomic_state
*dm_state
= NULL
;
7876 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
7877 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7878 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
7879 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
7884 new_plane_crtc
= new_plane_state
->crtc
;
7885 old_plane_crtc
= old_plane_state
->crtc
;
7886 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
7887 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
7889 /*TODO Implement atomic check for cursor plane */
7890 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7893 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
7896 /* Remove any changed/removed planes */
7901 if (!old_plane_crtc
)
7904 old_crtc_state
= drm_atomic_get_old_crtc_state(
7905 state
, old_plane_crtc
);
7906 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7908 if (!dm_old_crtc_state
->stream
)
7911 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7912 plane
->base
.id
, old_plane_crtc
->base
.id
);
7914 ret
= dm_atomic_get_state(state
, &dm_state
);
7918 if (!dc_remove_plane_from_context(
7920 dm_old_crtc_state
->stream
,
7921 dm_old_plane_state
->dc_state
,
7922 dm_state
->context
)) {
7929 dc_plane_state_release(dm_old_plane_state
->dc_state
);
7930 dm_new_plane_state
->dc_state
= NULL
;
7932 *lock_and_validation_needed
= true;
7934 } else { /* Add new planes */
7935 struct dc_plane_state
*dc_new_plane_state
;
7937 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
7940 if (!new_plane_crtc
)
7943 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
7944 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7946 if (!dm_new_crtc_state
->stream
)
7952 WARN_ON(dm_new_plane_state
->dc_state
);
7954 dc_new_plane_state
= dc_create_plane_state(dc
);
7955 if (!dc_new_plane_state
)
7958 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7959 plane
->base
.id
, new_plane_crtc
->base
.id
);
7961 ret
= fill_dc_plane_attributes(
7962 new_plane_crtc
->dev
->dev_private
,
7967 dc_plane_state_release(dc_new_plane_state
);
7971 ret
= dm_atomic_get_state(state
, &dm_state
);
7973 dc_plane_state_release(dc_new_plane_state
);
7978 * Any atomic check errors that occur after this will
7979 * not need a release. The plane state will be attached
7980 * to the stream, and therefore part of the atomic
7981 * state. It'll be released when the atomic state is
7984 if (!dc_add_plane_to_context(
7986 dm_new_crtc_state
->stream
,
7988 dm_state
->context
)) {
7990 dc_plane_state_release(dc_new_plane_state
);
7994 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
7996 /* Tell DC to do a full surface update every time there
7997 * is a plane change. Inefficient, but works for now.
7999 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
8001 *lock_and_validation_needed
= true;
8009 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
8010 struct drm_atomic_state
*state
,
8011 enum surface_update_type
*out_type
)
8013 struct dc
*dc
= dm
->dc
;
8014 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
8015 int i
, j
, num_plane
, ret
= 0;
8016 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8017 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
8018 struct drm_crtc
*new_plane_crtc
;
8019 struct drm_plane
*plane
;
8021 struct drm_crtc
*crtc
;
8022 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
8023 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
8024 struct dc_stream_status
*status
= NULL
;
8025 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8026 struct surface_info_bundle
{
8027 struct dc_surface_update surface_updates
[MAX_SURFACES
];
8028 struct dc_plane_info plane_infos
[MAX_SURFACES
];
8029 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
8030 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
8031 struct dc_stream_update stream_update
;
8034 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
8037 DRM_ERROR("Failed to allocate update bundle\n");
8038 /* Set type to FULL to avoid crashing in DC*/
8039 update_type
= UPDATE_TYPE_FULL
;
8043 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8045 memset(bundle
, 0, sizeof(struct surface_info_bundle
));
8047 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8048 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8051 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
8052 update_type
= UPDATE_TYPE_FULL
;
8056 if (!new_dm_crtc_state
->stream
)
8059 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
8060 const struct amdgpu_framebuffer
*amdgpu_fb
=
8061 to_amdgpu_framebuffer(new_plane_state
->fb
);
8062 struct dc_plane_info
*plane_info
= &bundle
->plane_infos
[num_plane
];
8063 struct dc_flip_addrs
*flip_addr
= &bundle
->flip_addrs
[num_plane
];
8064 struct dc_scaling_info
*scaling_info
= &bundle
->scaling_infos
[num_plane
];
8065 uint64_t tiling_flags
;
8067 new_plane_crtc
= new_plane_state
->crtc
;
8068 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
8069 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
8071 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8074 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
8075 update_type
= UPDATE_TYPE_FULL
;
8079 if (crtc
!= new_plane_crtc
)
8082 bundle
->surface_updates
[num_plane
].surface
=
8083 new_dm_plane_state
->dc_state
;
8085 if (new_crtc_state
->mode_changed
) {
8086 bundle
->stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
8087 bundle
->stream_update
.src
= new_dm_crtc_state
->stream
->src
;
8090 if (new_crtc_state
->color_mgmt_changed
) {
8091 bundle
->surface_updates
[num_plane
].gamma
=
8092 new_dm_plane_state
->dc_state
->gamma_correction
;
8093 bundle
->surface_updates
[num_plane
].in_transfer_func
=
8094 new_dm_plane_state
->dc_state
->in_transfer_func
;
8095 bundle
->stream_update
.gamut_remap
=
8096 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
8097 bundle
->stream_update
.output_csc_transform
=
8098 &new_dm_crtc_state
->stream
->csc_color_matrix
;
8099 bundle
->stream_update
.out_transfer_func
=
8100 new_dm_crtc_state
->stream
->out_transfer_func
;
8103 ret
= fill_dc_scaling_info(new_plane_state
,
8108 bundle
->surface_updates
[num_plane
].scaling_info
= scaling_info
;
8111 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
8115 ret
= fill_dc_plane_info_and_addr(
8116 dm
->adev
, new_plane_state
, tiling_flags
,
8118 &flip_addr
->address
,
8123 bundle
->surface_updates
[num_plane
].plane_info
= plane_info
;
8124 bundle
->surface_updates
[num_plane
].flip_addr
= flip_addr
;
8133 ret
= dm_atomic_get_state(state
, &dm_state
);
8137 old_dm_state
= dm_atomic_get_old_state(state
);
8138 if (!old_dm_state
) {
8143 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
8144 new_dm_crtc_state
->stream
);
8145 bundle
->stream_update
.stream
= new_dm_crtc_state
->stream
;
8147 * TODO: DC modifies the surface during this call so we need
8148 * to lock here - find a way to do this without locking.
8150 mutex_lock(&dm
->dc_lock
);
8151 update_type
= dc_check_update_surfaces_for_stream(
8152 dc
, bundle
->surface_updates
, num_plane
,
8153 &bundle
->stream_update
, status
);
8154 mutex_unlock(&dm
->dc_lock
);
8156 if (update_type
> UPDATE_TYPE_MED
) {
8157 update_type
= UPDATE_TYPE_FULL
;
8165 *out_type
= update_type
;
8169 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
8171 struct drm_connector
*connector
;
8172 struct drm_connector_state
*conn_state
;
8173 struct amdgpu_dm_connector
*aconnector
= NULL
;
8175 for_each_new_connector_in_state(state
, connector
, conn_state
, i
) {
8176 if (conn_state
->crtc
!= crtc
)
8179 aconnector
= to_amdgpu_dm_connector(connector
);
8180 if (!aconnector
->port
|| !aconnector
->mst_port
)
8189 return drm_dp_mst_add_affected_dsc_crtcs(state
, &aconnector
->mst_port
->mst_mgr
);
8193 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8194 * @dev: The DRM device
8195 * @state: The atomic state to commit
8197 * Validate that the given atomic state is programmable by DC into hardware.
8198 * This involves constructing a &struct dc_state reflecting the new hardware
8199 * state we wish to commit, then querying DC to see if it is programmable. It's
8200 * important not to modify the existing DC state. Otherwise, atomic_check
8201 * may unexpectedly commit hardware changes.
8203 * When validating the DC state, it's important that the right locks are
8204 * acquired. For full updates case which removes/adds/updates streams on one
8205 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8206 * that any such full update commit will wait for completion of any outstanding
8207 * flip using DRMs synchronization events. See
8208 * dm_determine_update_type_for_commit()
8210 * Note that DM adds the affected connectors for all CRTCs in state, when that
8211 * might not seem necessary. This is because DC stream creation requires the
8212 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8213 * be possible but non-trivial - a possible TODO item.
8215 * Return: -Error code if validation failed.
8217 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
8218 struct drm_atomic_state
*state
)
8220 struct amdgpu_device
*adev
= dev
->dev_private
;
8221 struct dm_atomic_state
*dm_state
= NULL
;
8222 struct dc
*dc
= adev
->dm
.dc
;
8223 struct drm_connector
*connector
;
8224 struct drm_connector_state
*old_con_state
, *new_con_state
;
8225 struct drm_crtc
*crtc
;
8226 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8227 struct drm_plane
*plane
;
8228 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8229 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8230 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
8235 * This bool will be set for true for any modeset/reset
8236 * or plane update which implies non fast surface update.
8238 bool lock_and_validation_needed
= false;
8240 ret
= drm_atomic_helper_check_modeset(dev
, state
);
8244 if (adev
->asic_type
>= CHIP_NAVI10
) {
8245 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8246 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8247 ret
= add_affected_mst_dsc_crtcs(state
, crtc
);
8254 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8255 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
8256 !new_crtc_state
->color_mgmt_changed
&&
8257 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
8260 if (!new_crtc_state
->enable
)
8263 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
8267 ret
= drm_atomic_add_affected_planes(state
, crtc
);
8273 * Add all primary and overlay planes on the CRTC to the state
8274 * whenever a plane is enabled to maintain correct z-ordering
8275 * and to enable fast surface updates.
8277 drm_for_each_crtc(crtc
, dev
) {
8278 bool modified
= false;
8280 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8281 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8284 if (new_plane_state
->crtc
== crtc
||
8285 old_plane_state
->crtc
== crtc
) {
8294 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
8295 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8299 drm_atomic_get_plane_state(state
, plane
);
8301 if (IS_ERR(new_plane_state
)) {
8302 ret
= PTR_ERR(new_plane_state
);
8308 /* Remove exiting planes if they are modified */
8309 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8310 ret
= dm_update_plane_state(dc
, state
, plane
,
8314 &lock_and_validation_needed
);
8319 /* Disable all crtcs which require disable */
8320 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8321 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8325 &lock_and_validation_needed
);
8330 /* Enable all crtcs which require enable */
8331 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8332 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8336 &lock_and_validation_needed
);
8341 /* Add new/modified planes */
8342 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8343 ret
= dm_update_plane_state(dc
, state
, plane
,
8347 &lock_and_validation_needed
);
8352 /* Run this here since we want to validate the streams we created */
8353 ret
= drm_atomic_helper_check_planes(dev
, state
);
8357 if (state
->legacy_cursor_update
) {
8359 * This is a fast cursor update coming from the plane update
8360 * helper, check if it can be done asynchronously for better
8363 state
->async_update
=
8364 !drm_atomic_helper_async_check(dev
, state
);
8367 * Skip the remaining global validation if this is an async
8368 * update. Cursor updates can be done without affecting
8369 * state or bandwidth calcs and this avoids the performance
8370 * penalty of locking the private state object and
8371 * allocating a new dc_state.
8373 if (state
->async_update
)
8377 /* Check scaling and underscan changes*/
8378 /* TODO Removed scaling changes validation due to inability to commit
8379 * new stream into context w\o causing full reset. Need to
8380 * decide how to handle.
8382 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8383 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8384 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8385 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
8387 /* Skip any modesets/resets */
8388 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
8389 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
8392 /* Skip any thing not scale or underscan changes */
8393 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
8396 overall_update_type
= UPDATE_TYPE_FULL
;
8397 lock_and_validation_needed
= true;
8400 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
8404 if (overall_update_type
< update_type
)
8405 overall_update_type
= update_type
;
8408 * lock_and_validation_needed was an old way to determine if we need to set
8409 * the global lock. Leaving it in to check if we broke any corner cases
8410 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8411 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8413 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
8414 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8416 if (overall_update_type
> UPDATE_TYPE_FAST
) {
8417 ret
= dm_atomic_get_state(state
, &dm_state
);
8421 ret
= do_aquire_global_lock(dev
, state
);
8425 #if defined(CONFIG_DRM_AMD_DC_DCN)
8426 if (!compute_mst_dsc_configs_for_state(state
, dm_state
->context
))
8429 ret
= dm_update_mst_vcpi_slots_for_dsc(state
, dm_state
->context
);
8435 * Perform validation of MST topology in the state:
8436 * We need to perform MST atomic check before calling
8437 * dc_validate_global_state(), or there is a chance
8438 * to get stuck in an infinite loop and hang eventually.
8440 ret
= drm_dp_mst_atomic_check(state
);
8444 if (dc_validate_global_state(dc
, dm_state
->context
, false) != DC_OK
) {
8450 * The commit is a fast update. Fast updates shouldn't change
8451 * the DC context, affect global validation, and can have their
8452 * commit work done in parallel with other commits not touching
8453 * the same resource. If we have a new DC context as part of
8454 * the DM atomic state from validation we need to free it and
8455 * retain the existing one instead.
8457 struct dm_atomic_state
*new_dm_state
, *old_dm_state
;
8459 new_dm_state
= dm_atomic_get_new_state(state
);
8460 old_dm_state
= dm_atomic_get_old_state(state
);
8462 if (new_dm_state
&& old_dm_state
) {
8463 if (new_dm_state
->context
)
8464 dc_release_state(new_dm_state
->context
);
8466 new_dm_state
->context
= old_dm_state
->context
;
8468 if (old_dm_state
->context
)
8469 dc_retain_state(old_dm_state
->context
);
8473 /* Store the overall update type for use later in atomic check. */
8474 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
8475 struct dm_crtc_state
*dm_new_crtc_state
=
8476 to_dm_crtc_state(new_crtc_state
);
8478 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
8481 /* Must be success */
8486 if (ret
== -EDEADLK
)
8487 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8488 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
8489 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8491 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
8496 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
8497 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
8500 bool capable
= false;
8502 if (amdgpu_dm_connector
->dc_link
&&
8503 dm_helpers_dp_read_dpcd(
8505 amdgpu_dm_connector
->dc_link
,
8506 DP_DOWN_STREAM_PORT_COUNT
,
8508 sizeof(dpcd_data
))) {
8509 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
8514 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
8518 bool edid_check_required
;
8519 struct detailed_timing
*timing
;
8520 struct detailed_non_pixel
*data
;
8521 struct detailed_data_monitor_range
*range
;
8522 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
8523 to_amdgpu_dm_connector(connector
);
8524 struct dm_connector_state
*dm_con_state
= NULL
;
8526 struct drm_device
*dev
= connector
->dev
;
8527 struct amdgpu_device
*adev
= dev
->dev_private
;
8528 bool freesync_capable
= false;
8530 if (!connector
->state
) {
8531 DRM_ERROR("%s - Connector has no state", __func__
);
8536 dm_con_state
= to_dm_connector_state(connector
->state
);
8538 amdgpu_dm_connector
->min_vfreq
= 0;
8539 amdgpu_dm_connector
->max_vfreq
= 0;
8540 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
8545 dm_con_state
= to_dm_connector_state(connector
->state
);
8547 edid_check_required
= false;
8548 if (!amdgpu_dm_connector
->dc_sink
) {
8549 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8552 if (!adev
->dm
.freesync_module
)
8555 * if edid non zero restrict freesync only for dp and edp
8558 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
8559 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
8560 edid_check_required
= is_dp_capable_without_timing_msa(
8562 amdgpu_dm_connector
);
8565 if (edid_check_required
== true && (edid
->version
> 1 ||
8566 (edid
->version
== 1 && edid
->revision
> 1))) {
8567 for (i
= 0; i
< 4; i
++) {
8569 timing
= &edid
->detailed_timings
[i
];
8570 data
= &timing
->data
.other_data
;
8571 range
= &data
->data
.range
;
8573 * Check if monitor has continuous frequency mode
8575 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
8578 * Check for flag range limits only. If flag == 1 then
8579 * no additional timing information provided.
8580 * Default GTF, GTF Secondary curve and CVT are not
8583 if (range
->flags
!= 1)
8586 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
8587 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
8588 amdgpu_dm_connector
->pixel_clock_mhz
=
8589 range
->pixel_clock_mhz
* 10;
8593 if (amdgpu_dm_connector
->max_vfreq
-
8594 amdgpu_dm_connector
->min_vfreq
> 10) {
8596 freesync_capable
= true;
8602 dm_con_state
->freesync_capable
= freesync_capable
;
8604 if (connector
->vrr_capable_property
)
8605 drm_connector_set_vrr_capable_property(connector
,
8609 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
)
8611 uint8_t dpcd_data
[EDP_PSR_RECEIVER_CAP_SIZE
];
8613 if (!(link
->connector_signal
& SIGNAL_TYPE_EDP
))
8615 if (link
->type
== dc_connection_none
)
8617 if (dm_helpers_dp_read_dpcd(NULL
, link
, DP_PSR_SUPPORT
,
8618 dpcd_data
, sizeof(dpcd_data
))) {
8619 link
->psr_feature_enabled
= dpcd_data
[0] ? true:false;
8620 DRM_INFO("PSR support:%d\n", link
->psr_feature_enabled
);
8625 * amdgpu_dm_link_setup_psr() - configure psr link
8626 * @stream: stream state
8628 * Return: true if success
8630 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
)
8632 struct dc_link
*link
= NULL
;
8633 struct psr_config psr_config
= {0};
8634 struct psr_context psr_context
= {0};
8635 struct dc
*dc
= NULL
;
8641 link
= stream
->link
;
8644 psr_config
.psr_version
= dc
->res_pool
->dmcu
->dmcu_version
.psr_version
;
8646 if (psr_config
.psr_version
> 0) {
8647 psr_config
.psr_exit_link_training_required
= 0x1;
8648 psr_config
.psr_frame_capture_indication_req
= 0;
8649 psr_config
.psr_rfb_setup_time
= 0x37;
8650 psr_config
.psr_sdp_transmit_line_num_deadline
= 0x20;
8651 psr_config
.allow_smu_optimizations
= 0x0;
8653 ret
= dc_link_setup_psr(link
, stream
, &psr_config
, &psr_context
);
8656 DRM_DEBUG_DRIVER("PSR link: %d\n", link
->psr_feature_enabled
);
8662 * amdgpu_dm_psr_enable() - enable psr f/w
8663 * @stream: stream state
8665 * Return: true if success
8667 bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
)
8669 struct dc_link
*link
= stream
->link
;
8670 unsigned int vsync_rate_hz
= 0;
8671 struct dc_static_screen_params params
= {0};
8672 /* Calculate number of static frames before generating interrupt to
8675 // Init fail safe of 2 frames static
8676 unsigned int num_frames_static
= 2;
8678 DRM_DEBUG_DRIVER("Enabling psr...\n");
8680 vsync_rate_hz
= div64_u64(div64_u64((
8681 stream
->timing
.pix_clk_100hz
* 100),
8682 stream
->timing
.v_total
),
8683 stream
->timing
.h_total
);
8686 * Calculate number of frames such that at least 30 ms of time has
8689 if (vsync_rate_hz
!= 0) {
8690 unsigned int frame_time_microsec
= 1000000 / vsync_rate_hz
;
8691 num_frames_static
= (30000 / frame_time_microsec
) + 1;
8694 params
.triggers
.cursor_update
= true;
8695 params
.triggers
.overlay_update
= true;
8696 params
.triggers
.surface_update
= true;
8697 params
.num_frames
= num_frames_static
;
8699 dc_stream_set_static_screen_params(link
->ctx
->dc
,
8703 return dc_link_set_psr_allow_active(link
, true, false);
8707 * amdgpu_dm_psr_disable() - disable psr f/w
8708 * @stream: stream state
8710 * Return: true if success
8712 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
)
8715 DRM_DEBUG_DRIVER("Disabling psr...\n");
8717 return dc_link_set_psr_allow_active(stream
->link
, false, true);