2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB
);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU
);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
122 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
136 struct drm_plane
*plane
,
137 unsigned long possible_crtcs
,
138 const struct dc_plane_cap
*plane_cap
);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
140 struct drm_plane
*plane
,
141 uint32_t link_index
);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
143 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
145 struct amdgpu_encoder
*amdgpu_encoder
);
146 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
147 struct amdgpu_encoder
*aencoder
,
148 uint32_t link_index
);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
152 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
153 struct drm_atomic_state
*state
,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
158 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
159 struct drm_atomic_state
*state
);
161 static void handle_cursor_update(struct drm_plane
*plane
,
162 struct drm_plane_state
*old_plane_state
);
164 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
185 if (crtc
>= adev
->mode_info
.num_crtc
)
188 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
189 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
193 if (acrtc_state
->stream
== NULL
) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
204 u32
*vbl
, u32
*position
)
206 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
208 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
211 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
212 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
215 if (acrtc_state
->stream
== NULL
) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state
->stream
,
231 *position
= v_position
| (h_position
<< 16);
232 *vbl
= v_blank_start
| (v_blank_end
<< 16);
238 static bool dm_is_idle(void *handle
)
244 static int dm_wait_for_idle(void *handle
)
250 static bool dm_check_soft_reset(void *handle
)
255 static int dm_soft_reset(void *handle
)
261 static struct amdgpu_crtc
*
262 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
265 struct drm_device
*dev
= adev
->ddev
;
266 struct drm_crtc
*crtc
;
267 struct amdgpu_crtc
*amdgpu_crtc
;
269 if (otg_inst
== -1) {
271 return adev
->mode_info
.crtcs
[0];
274 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
275 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
277 if (amdgpu_crtc
->otg_inst
== otg_inst
)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
286 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
287 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params
)
299 struct amdgpu_crtc
*amdgpu_crtc
;
300 struct common_irq_params
*irq_params
= interrupt_params
;
301 struct amdgpu_device
*adev
= irq_params
->adev
;
303 struct drm_pending_vblank_event
*e
;
304 struct dm_crtc_state
*acrtc_state
;
305 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
308 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc
== NULL
) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
319 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc
->pflip_status
,
322 AMDGPU_FLIP_SUBMITTED
,
323 amdgpu_crtc
->crtc_id
,
325 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
329 /* page flip completed. */
330 e
= amdgpu_crtc
->event
;
331 amdgpu_crtc
->event
= NULL
;
336 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
337 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
342 &v_blank_end
, &hpos
, &vpos
) ||
343 (vpos
< v_blank_start
)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
375 e
->pipe
= amdgpu_crtc
->crtc_id
;
377 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc
->last_flip_vblank
=
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc
->base
);
389 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
390 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
394 vrr_active
, (int) !e
);
397 static void dm_vupdate_high_irq(void *interrupt_params
)
399 struct common_irq_params
*irq_params
= interrupt_params
;
400 struct amdgpu_device
*adev
= irq_params
->adev
;
401 struct amdgpu_crtc
*acrtc
;
402 struct dm_crtc_state
*acrtc_state
;
405 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
408 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state
));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state
)) {
421 drm_crtc_handle_vblank(&acrtc
->base
);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state
->stream
&&
425 adev
->family
< AMDGPU_FAMILY_AI
) {
426 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
427 mod_freesync_handle_v_update(
428 adev
->dm
.freesync_module
,
430 &acrtc_state
->vrr_params
);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state
->vrr_params
.adjust
);
436 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: used for determining the CRTC instance
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params
)
451 struct common_irq_params
*irq_params
= interrupt_params
;
452 struct amdgpu_device
*adev
= irq_params
->adev
;
453 struct amdgpu_crtc
*acrtc
;
454 struct dm_crtc_state
*acrtc_state
;
457 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
461 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
463 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc
->crtc_id
,
464 amdgpu_dm_vrr_active(acrtc_state
),
465 acrtc_state
->active_planes
);
468 * Core vblank handling at start of front-porch is only possible
469 * in non-vrr mode, as only there vblank timestamping will give
470 * valid results while done in front-porch. Otherwise defer it
471 * to dm_vupdate_high_irq after end of front-porch.
473 if (!amdgpu_dm_vrr_active(acrtc_state
))
474 drm_crtc_handle_vblank(&acrtc
->base
);
477 * Following stuff must happen at start of vblank, for crc
478 * computation and below-the-range btr support in vrr mode.
480 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
482 /* BTR updates need to happen before VUPDATE on Vega and above. */
483 if (adev
->family
< AMDGPU_FAMILY_AI
)
486 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
488 if (acrtc_state
->stream
&& acrtc_state
->vrr_params
.supported
&&
489 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
490 mod_freesync_handle_v_update(adev
->dm
.freesync_module
,
492 &acrtc_state
->vrr_params
);
494 dc_stream_adjust_vmin_vmax(adev
->dm
.dc
, acrtc_state
->stream
,
495 &acrtc_state
->vrr_params
.adjust
);
499 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 * In that case, pageflip completion interrupts won't fire and pageflip
501 * completion events won't get delivered. Prevent this by sending
502 * pending pageflip events from here if a flip is still pending.
504 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 * avoid race conditions between flip programming and completion,
506 * which could cause too early flip completion events.
508 if (adev
->family
>= AMDGPU_FAMILY_RV
&&
509 acrtc
->pflip_status
== AMDGPU_FLIP_SUBMITTED
&&
510 acrtc_state
->active_planes
== 0) {
512 drm_crtc_send_vblank_event(&acrtc
->base
, acrtc
->event
);
514 drm_crtc_vblank_put(&acrtc
->base
);
516 acrtc
->pflip_status
= AMDGPU_FLIP_NONE
;
519 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
522 static int dm_set_clockgating_state(void *handle
,
523 enum amd_clockgating_state state
)
528 static int dm_set_powergating_state(void *handle
,
529 enum amd_powergating_state state
)
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle
);
537 /* Allocate memory for FBC compressed data */
538 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
540 struct drm_device
*dev
= connector
->dev
;
541 struct amdgpu_device
*adev
= dev
->dev_private
;
542 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
543 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
544 struct drm_display_mode
*mode
;
545 unsigned long max_size
= 0;
547 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
550 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
553 if (compressor
->bo_ptr
)
557 list_for_each_entry(mode
, &connector
->modes
, head
) {
558 if (max_size
< mode
->htotal
* mode
->vtotal
)
559 max_size
= mode
->htotal
* mode
->vtotal
;
563 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
564 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
565 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
568 DRM_ERROR("DM: Failed to initialize FBC\n");
570 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
571 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
578 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
579 int pipe
, bool *enabled
,
580 unsigned char *buf
, int max_bytes
)
582 struct drm_device
*dev
= dev_get_drvdata(kdev
);
583 struct amdgpu_device
*adev
= dev
->dev_private
;
584 struct drm_connector
*connector
;
585 struct drm_connector_list_iter conn_iter
;
586 struct amdgpu_dm_connector
*aconnector
;
591 mutex_lock(&adev
->dm
.audio_lock
);
593 drm_connector_list_iter_begin(dev
, &conn_iter
);
594 drm_for_each_connector_iter(connector
, &conn_iter
) {
595 aconnector
= to_amdgpu_dm_connector(connector
);
596 if (aconnector
->audio_inst
!= port
)
600 ret
= drm_eld_size(connector
->eld
);
601 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
605 drm_connector_list_iter_end(&conn_iter
);
607 mutex_unlock(&adev
->dm
.audio_lock
);
609 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
615 .get_eld
= amdgpu_dm_audio_component_get_eld
,
618 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
619 struct device
*hda_kdev
, void *data
)
621 struct drm_device
*dev
= dev_get_drvdata(kdev
);
622 struct amdgpu_device
*adev
= dev
->dev_private
;
623 struct drm_audio_component
*acomp
= data
;
625 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
627 adev
->dm
.audio_component
= acomp
;
632 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
633 struct device
*hda_kdev
, void *data
)
635 struct drm_device
*dev
= dev_get_drvdata(kdev
);
636 struct amdgpu_device
*adev
= dev
->dev_private
;
637 struct drm_audio_component
*acomp
= data
;
641 adev
->dm
.audio_component
= NULL
;
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
645 .bind
= amdgpu_dm_audio_component_bind
,
646 .unbind
= amdgpu_dm_audio_component_unbind
,
649 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
656 adev
->mode_info
.audio
.enabled
= true;
658 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
660 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
661 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
662 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
663 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
664 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
665 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
666 adev
->mode_info
.audio
.pin
[i
].connected
= false;
667 adev
->mode_info
.audio
.pin
[i
].id
=
668 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
669 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
672 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
676 adev
->dm
.audio_registered
= true;
681 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
686 if (!adev
->mode_info
.audio
.enabled
)
689 if (adev
->dm
.audio_registered
) {
690 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
691 adev
->dm
.audio_registered
= false;
694 /* TODO: Disable audio? */
696 adev
->mode_info
.audio
.enabled
= false;
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
701 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
703 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
704 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
706 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
711 static int dm_dmub_hw_init(struct amdgpu_device
*adev
)
713 const struct dmcub_firmware_header_v1_0
*hdr
;
714 struct dmub_srv
*dmub_srv
= adev
->dm
.dmub_srv
;
715 struct dmub_srv_fb_info
*fb_info
= adev
->dm
.dmub_fb_info
;
716 const struct firmware
*dmub_fw
= adev
->dm
.dmub_fw
;
717 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
718 struct abm
*abm
= adev
->dm
.dc
->res_pool
->abm
;
719 struct dmub_srv_hw_params hw_params
;
720 enum dmub_status status
;
721 const unsigned char *fw_inst_const
, *fw_bss_data
;
722 uint32_t i
, fw_inst_const_size
, fw_bss_data_size
;
726 /* DMUB isn't supported on the ASIC. */
730 DRM_ERROR("No framebuffer info for DMUB service.\n");
735 /* Firmware required for DMUB support. */
736 DRM_ERROR("No firmware provided for DMUB.\n");
740 status
= dmub_srv_has_hw_support(dmub_srv
, &has_hw_support
);
741 if (status
!= DMUB_STATUS_OK
) {
742 DRM_ERROR("Error checking HW support for DMUB: %d\n", status
);
746 if (!has_hw_support
) {
747 DRM_INFO("DMUB unsupported on ASIC\n");
751 hdr
= (const struct dmcub_firmware_header_v1_0
*)dmub_fw
->data
;
753 fw_inst_const
= dmub_fw
->data
+
754 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
757 fw_bss_data
= dmub_fw
->data
+
758 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
759 le32_to_cpu(hdr
->inst_const_bytes
);
761 /* Copy firmware and bios info into FB memory. */
762 fw_inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
763 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
765 fw_bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
767 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 * amdgpu_ucode_init_single_fw will load dmub firmware
769 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 * will be done by dm_dmub_hw_init
772 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
773 memcpy(fb_info
->fb
[DMUB_WINDOW_0_INST_CONST
].cpu_addr
, fw_inst_const
,
777 memcpy(fb_info
->fb
[DMUB_WINDOW_2_BSS_DATA
].cpu_addr
, fw_bss_data
,
780 /* Copy firmware bios info into FB memory. */
781 memcpy(fb_info
->fb
[DMUB_WINDOW_3_VBIOS
].cpu_addr
, adev
->bios
,
784 /* Reset regions that need to be reset. */
785 memset(fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].cpu_addr
, 0,
786 fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].size
);
788 memset(fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].cpu_addr
, 0,
789 fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].size
);
791 memset(fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].cpu_addr
, 0,
792 fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].size
);
794 /* Initialize hardware. */
795 memset(&hw_params
, 0, sizeof(hw_params
));
796 hw_params
.fb_base
= adev
->gmc
.fb_start
;
797 hw_params
.fb_offset
= adev
->gmc
.aper_base
;
799 /* backdoor load firmware and trigger dmub running */
800 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
)
801 hw_params
.load_inst_const
= true;
804 hw_params
.psp_version
= dmcu
->psp_version
;
806 for (i
= 0; i
< fb_info
->num_fb
; ++i
)
807 hw_params
.fb
[i
] = &fb_info
->fb
[i
];
809 status
= dmub_srv_hw_init(dmub_srv
, &hw_params
);
810 if (status
!= DMUB_STATUS_OK
) {
811 DRM_ERROR("Error initializing DMUB HW: %d\n", status
);
815 /* Wait for firmware load to finish. */
816 status
= dmub_srv_wait_for_auto_load(dmub_srv
, 100000);
817 if (status
!= DMUB_STATUS_OK
)
818 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status
);
820 /* Init DMCU and ABM if available. */
822 dmcu
->funcs
->dmcu_init(dmcu
);
823 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
826 adev
->dm
.dc
->ctx
->dmub_srv
= dc_dmub_srv_create(adev
->dm
.dc
, dmub_srv
);
827 if (!adev
->dm
.dc
->ctx
->dmub_srv
) {
828 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
832 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
833 adev
->dm
.dmcub_fw_version
);
838 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
840 struct dc_init_data init_data
;
841 #ifdef CONFIG_DRM_AMD_DC_HDCP
842 struct dc_callback_init init_params
;
846 adev
->dm
.ddev
= adev
->ddev
;
847 adev
->dm
.adev
= adev
;
849 /* Zero all the fields */
850 memset(&init_data
, 0, sizeof(init_data
));
851 #ifdef CONFIG_DRM_AMD_DC_HDCP
852 memset(&init_params
, 0, sizeof(init_params
));
855 mutex_init(&adev
->dm
.dc_lock
);
856 mutex_init(&adev
->dm
.audio_lock
);
858 if(amdgpu_dm_irq_init(adev
)) {
859 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
863 init_data
.asic_id
.chip_family
= adev
->family
;
865 init_data
.asic_id
.pci_revision_id
= adev
->pdev
->revision
;
866 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
868 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
869 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
870 init_data
.asic_id
.atombios_base_address
=
871 adev
->mode_info
.atom_context
->bios
;
873 init_data
.driver
= adev
;
875 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
877 if (!adev
->dm
.cgs_device
) {
878 DRM_ERROR("amdgpu: failed to create cgs device.\n");
882 init_data
.cgs_device
= adev
->dm
.cgs_device
;
884 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
886 switch (adev
->asic_type
) {
891 init_data
.flags
.gpu_vm_support
= true;
897 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
898 init_data
.flags
.fbc_support
= true;
900 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
901 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
903 if (amdgpu_dc_feature_mask
& DC_DISABLE_FRACTIONAL_PWM_MASK
)
904 init_data
.flags
.disable_fractional_pwm
= true;
906 init_data
.flags
.power_down_display_on_boot
= true;
908 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
910 /* Display Core create. */
911 adev
->dm
.dc
= dc_create(&init_data
);
914 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
916 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
920 r
= dm_dmub_hw_init(adev
);
922 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
926 dc_hardware_init(adev
->dm
.dc
);
928 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
929 if (!adev
->dm
.freesync_module
) {
931 "amdgpu: failed to initialize freesync_module.\n");
933 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
934 adev
->dm
.freesync_module
);
936 amdgpu_dm_init_color_mod();
938 #ifdef CONFIG_DRM_AMD_DC_HDCP
939 if (adev
->asic_type
>= CHIP_RAVEN
) {
940 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(adev
, &init_params
.cp_psp
, adev
->dm
.dc
);
942 if (!adev
->dm
.hdcp_workqueue
)
943 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
945 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
947 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
950 if (amdgpu_dm_initialize_drm_device(adev
)) {
952 "amdgpu: failed to initialize sw for display support.\n");
956 /* Update the actual used number of crtc */
957 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
959 /* TODO: Add_display_info? */
961 /* TODO use dynamic cursor width */
962 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
963 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
965 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
967 "amdgpu: failed to initialize sw for display support.\n");
971 DRM_DEBUG_DRIVER("KMS initialized.\n");
975 amdgpu_dm_fini(adev
);
980 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
982 amdgpu_dm_audio_fini(adev
);
984 amdgpu_dm_destroy_drm_device(&adev
->dm
);
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 if (adev
->dm
.hdcp_workqueue
) {
988 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
989 adev
->dm
.hdcp_workqueue
= NULL
;
993 dc_deinit_callbacks(adev
->dm
.dc
);
995 if (adev
->dm
.dc
->ctx
->dmub_srv
) {
996 dc_dmub_srv_destroy(&adev
->dm
.dc
->ctx
->dmub_srv
);
997 adev
->dm
.dc
->ctx
->dmub_srv
= NULL
;
1000 if (adev
->dm
.dmub_bo
)
1001 amdgpu_bo_free_kernel(&adev
->dm
.dmub_bo
,
1002 &adev
->dm
.dmub_bo_gpu_addr
,
1003 &adev
->dm
.dmub_bo_cpu_addr
);
1005 /* DC Destroy TODO: Replace destroy DAL */
1007 dc_destroy(&adev
->dm
.dc
);
1009 * TODO: pageflip, vlank interrupt
1011 * amdgpu_dm_irq_fini(adev);
1014 if (adev
->dm
.cgs_device
) {
1015 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
1016 adev
->dm
.cgs_device
= NULL
;
1018 if (adev
->dm
.freesync_module
) {
1019 mod_freesync_destroy(adev
->dm
.freesync_module
);
1020 adev
->dm
.freesync_module
= NULL
;
1023 mutex_destroy(&adev
->dm
.audio_lock
);
1024 mutex_destroy(&adev
->dm
.dc_lock
);
1029 static int load_dmcu_fw(struct amdgpu_device
*adev
)
1031 const char *fw_name_dmcu
= NULL
;
1033 const struct dmcu_firmware_header_v1_0
*hdr
;
1035 switch(adev
->asic_type
) {
1045 case CHIP_POLARIS11
:
1046 case CHIP_POLARIS10
:
1047 case CHIP_POLARIS12
:
1057 fw_name_dmcu
= FIRMWARE_NAVI12_DMCU
;
1060 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
1061 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1062 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
1063 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1068 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1072 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1073 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1077 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
1079 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1080 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1081 adev
->dm
.fw_dmcu
= NULL
;
1085 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
1090 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
1092 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1094 release_firmware(adev
->dm
.fw_dmcu
);
1095 adev
->dm
.fw_dmcu
= NULL
;
1099 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
1100 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
1101 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
1102 adev
->firmware
.fw_size
+=
1103 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1105 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
1106 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
1107 adev
->firmware
.fw_size
+=
1108 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1110 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1112 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1117 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx
, uint32_t address
)
1119 struct amdgpu_device
*adev
= ctx
;
1121 return dm_read_reg(adev
->dm
.dc
->ctx
, address
);
1124 static void amdgpu_dm_dmub_reg_write(void *ctx
, uint32_t address
,
1127 struct amdgpu_device
*adev
= ctx
;
1129 return dm_write_reg(adev
->dm
.dc
->ctx
, address
, value
);
1132 static int dm_dmub_sw_init(struct amdgpu_device
*adev
)
1134 struct dmub_srv_create_params create_params
;
1135 struct dmub_srv_region_params region_params
;
1136 struct dmub_srv_region_info region_info
;
1137 struct dmub_srv_fb_params fb_params
;
1138 struct dmub_srv_fb_info
*fb_info
;
1139 struct dmub_srv
*dmub_srv
;
1140 const struct dmcub_firmware_header_v1_0
*hdr
;
1141 const char *fw_name_dmub
;
1142 enum dmub_asic dmub_asic
;
1143 enum dmub_status status
;
1146 switch (adev
->asic_type
) {
1148 dmub_asic
= DMUB_ASIC_DCN21
;
1149 fw_name_dmub
= FIRMWARE_RENOIR_DMUB
;
1153 /* ASIC doesn't support DMUB. */
1157 r
= request_firmware_direct(&adev
->dm
.dmub_fw
, fw_name_dmub
, adev
->dev
);
1159 DRM_ERROR("DMUB firmware loading failed: %d\n", r
);
1163 r
= amdgpu_ucode_validate(adev
->dm
.dmub_fw
);
1165 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r
);
1169 hdr
= (const struct dmcub_firmware_header_v1_0
*)adev
->dm
.dmub_fw
->data
;
1171 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
1172 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].ucode_id
=
1173 AMDGPU_UCODE_ID_DMCUB
;
1174 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].fw
=
1176 adev
->firmware
.fw_size
+=
1177 ALIGN(le32_to_cpu(hdr
->inst_const_bytes
), PAGE_SIZE
);
1179 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1180 adev
->dm
.dmcub_fw_version
);
1183 adev
->dm
.dmcub_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1185 adev
->dm
.dmub_srv
= kzalloc(sizeof(*adev
->dm
.dmub_srv
), GFP_KERNEL
);
1186 dmub_srv
= adev
->dm
.dmub_srv
;
1189 DRM_ERROR("Failed to allocate DMUB service!\n");
1193 memset(&create_params
, 0, sizeof(create_params
));
1194 create_params
.user_ctx
= adev
;
1195 create_params
.funcs
.reg_read
= amdgpu_dm_dmub_reg_read
;
1196 create_params
.funcs
.reg_write
= amdgpu_dm_dmub_reg_write
;
1197 create_params
.asic
= dmub_asic
;
1199 /* Create the DMUB service. */
1200 status
= dmub_srv_create(dmub_srv
, &create_params
);
1201 if (status
!= DMUB_STATUS_OK
) {
1202 DRM_ERROR("Error creating DMUB service: %d\n", status
);
1206 /* Calculate the size of all the regions for the DMUB service. */
1207 memset(®ion_params
, 0, sizeof(region_params
));
1209 region_params
.inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
1210 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
1211 region_params
.bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
1212 region_params
.vbios_size
= adev
->bios_size
;
1213 region_params
.fw_bss_data
=
1214 adev
->dm
.dmub_fw
->data
+
1215 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1216 le32_to_cpu(hdr
->inst_const_bytes
);
1218 status
= dmub_srv_calc_region_info(dmub_srv
, ®ion_params
,
1221 if (status
!= DMUB_STATUS_OK
) {
1222 DRM_ERROR("Error calculating DMUB region info: %d\n", status
);
1227 * Allocate a framebuffer based on the total size of all the regions.
1228 * TODO: Move this into GART.
1230 r
= amdgpu_bo_create_kernel(adev
, region_info
.fb_size
, PAGE_SIZE
,
1231 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->dm
.dmub_bo
,
1232 &adev
->dm
.dmub_bo_gpu_addr
,
1233 &adev
->dm
.dmub_bo_cpu_addr
);
1237 /* Rebase the regions on the framebuffer address. */
1238 memset(&fb_params
, 0, sizeof(fb_params
));
1239 fb_params
.cpu_addr
= adev
->dm
.dmub_bo_cpu_addr
;
1240 fb_params
.gpu_addr
= adev
->dm
.dmub_bo_gpu_addr
;
1241 fb_params
.region_info
= ®ion_info
;
1243 adev
->dm
.dmub_fb_info
=
1244 kzalloc(sizeof(*adev
->dm
.dmub_fb_info
), GFP_KERNEL
);
1245 fb_info
= adev
->dm
.dmub_fb_info
;
1249 "Failed to allocate framebuffer info for DMUB service!\n");
1253 status
= dmub_srv_calc_fb_info(dmub_srv
, &fb_params
, fb_info
);
1254 if (status
!= DMUB_STATUS_OK
) {
1255 DRM_ERROR("Error calculating DMUB FB info: %d\n", status
);
1262 static int dm_sw_init(void *handle
)
1264 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1267 r
= dm_dmub_sw_init(adev
);
1271 return load_dmcu_fw(adev
);
1274 static int dm_sw_fini(void *handle
)
1276 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1278 kfree(adev
->dm
.dmub_fb_info
);
1279 adev
->dm
.dmub_fb_info
= NULL
;
1281 if (adev
->dm
.dmub_srv
) {
1282 dmub_srv_destroy(adev
->dm
.dmub_srv
);
1283 adev
->dm
.dmub_srv
= NULL
;
1286 if (adev
->dm
.dmub_fw
) {
1287 release_firmware(adev
->dm
.dmub_fw
);
1288 adev
->dm
.dmub_fw
= NULL
;
1291 if(adev
->dm
.fw_dmcu
) {
1292 release_firmware(adev
->dm
.fw_dmcu
);
1293 adev
->dm
.fw_dmcu
= NULL
;
1299 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
1301 struct amdgpu_dm_connector
*aconnector
;
1302 struct drm_connector
*connector
;
1303 struct drm_connector_list_iter iter
;
1306 drm_connector_list_iter_begin(dev
, &iter
);
1307 drm_for_each_connector_iter(connector
, &iter
) {
1308 aconnector
= to_amdgpu_dm_connector(connector
);
1309 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
1310 aconnector
->mst_mgr
.aux
) {
1311 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1313 aconnector
->base
.base
.id
);
1315 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
1317 DRM_ERROR("DM_MST: Failed to start MST\n");
1318 aconnector
->dc_link
->type
=
1319 dc_connection_single
;
1324 drm_connector_list_iter_end(&iter
);
1329 static int dm_late_init(void *handle
)
1331 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1333 struct dmcu_iram_parameters params
;
1334 unsigned int linear_lut
[16];
1336 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
1339 for (i
= 0; i
< 16; i
++)
1340 linear_lut
[i
] = 0xFFFF * i
/ 15;
1343 params
.backlight_ramping_start
= 0xCCCC;
1344 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
1345 params
.backlight_lut_array_size
= 16;
1346 params
.backlight_lut_array
= linear_lut
;
1348 /* Min backlight level after ABM reduction, Don't allow below 1%
1349 * 0xFFFF x 0.01 = 0x28F
1351 params
.min_abm_backlight
= 0x28F;
1353 /* todo will enable for navi10 */
1354 if (adev
->asic_type
<= CHIP_RAVEN
) {
1355 ret
= dmcu_load_iram(dmcu
, params
);
1361 return detect_mst_link_for_all_connectors(adev
->ddev
);
1364 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1366 struct amdgpu_dm_connector
*aconnector
;
1367 struct drm_connector
*connector
;
1368 struct drm_connector_list_iter iter
;
1369 struct drm_dp_mst_topology_mgr
*mgr
;
1371 bool need_hotplug
= false;
1373 drm_connector_list_iter_begin(dev
, &iter
);
1374 drm_for_each_connector_iter(connector
, &iter
) {
1375 aconnector
= to_amdgpu_dm_connector(connector
);
1376 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1377 aconnector
->mst_port
)
1380 mgr
= &aconnector
->mst_mgr
;
1383 drm_dp_mst_topology_mgr_suspend(mgr
);
1385 ret
= drm_dp_mst_topology_mgr_resume(mgr
, true);
1387 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1388 need_hotplug
= true;
1392 drm_connector_list_iter_end(&iter
);
1395 drm_kms_helper_hotplug_event(dev
);
1398 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device
*adev
)
1400 struct smu_context
*smu
= &adev
->smu
;
1403 if (!is_support_sw_smu(adev
))
1406 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1407 * on window driver dc implementation.
1408 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1409 * should be passed to smu during boot up and resume from s3.
1410 * boot up: dc calculate dcn watermark clock settings within dc_create,
1411 * dcn20_resource_construct
1412 * then call pplib functions below to pass the settings to smu:
1413 * smu_set_watermarks_for_clock_ranges
1414 * smu_set_watermarks_table
1415 * navi10_set_watermarks_table
1416 * smu_write_watermarks_table
1418 * For Renoir, clock settings of dcn watermark are also fixed values.
1419 * dc has implemented different flow for window driver:
1420 * dc_hardware_init / dc_set_power_state
1425 * smu_set_watermarks_for_clock_ranges
1426 * renoir_set_watermarks_table
1427 * smu_write_watermarks_table
1430 * dc_hardware_init -> amdgpu_dm_init
1431 * dc_set_power_state --> dm_resume
1433 * therefore, this function apply to navi10/12/14 but not Renoir
1436 switch(adev
->asic_type
) {
1445 mutex_lock(&smu
->mutex
);
1447 /* pass data to smu controller */
1448 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
1449 !(smu
->watermarks_bitmap
& WATERMARKS_LOADED
)) {
1450 ret
= smu_write_watermarks_table(smu
);
1453 mutex_unlock(&smu
->mutex
);
1454 DRM_ERROR("Failed to update WMTABLE!\n");
1457 smu
->watermarks_bitmap
|= WATERMARKS_LOADED
;
1460 mutex_unlock(&smu
->mutex
);
1466 * dm_hw_init() - Initialize DC device
1467 * @handle: The base driver device containing the amdgpu_dm device.
1469 * Initialize the &struct amdgpu_display_manager device. This involves calling
1470 * the initializers of each DM component, then populating the struct with them.
1472 * Although the function implies hardware initialization, both hardware and
1473 * software are initialized here. Splitting them out to their relevant init
1474 * hooks is a future TODO item.
1476 * Some notable things that are initialized here:
1478 * - Display Core, both software and hardware
1479 * - DC modules that we need (freesync and color management)
1480 * - DRM software states
1481 * - Interrupt sources and handlers
1483 * - Debug FS entries, if enabled
1485 static int dm_hw_init(void *handle
)
1487 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1488 /* Create DAL display manager */
1489 amdgpu_dm_init(adev
);
1490 amdgpu_dm_hpd_init(adev
);
1496 * dm_hw_fini() - Teardown DC device
1497 * @handle: The base driver device containing the amdgpu_dm device.
1499 * Teardown components within &struct amdgpu_display_manager that require
1500 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1501 * were loaded. Also flush IRQ workqueues and disable them.
1503 static int dm_hw_fini(void *handle
)
1505 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1507 amdgpu_dm_hpd_fini(adev
);
1509 amdgpu_dm_irq_fini(adev
);
1510 amdgpu_dm_fini(adev
);
1514 static int dm_suspend(void *handle
)
1516 struct amdgpu_device
*adev
= handle
;
1517 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1520 WARN_ON(adev
->dm
.cached_state
);
1521 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1523 s3_handle_mst(adev
->ddev
, true);
1525 amdgpu_dm_irq_suspend(adev
);
1528 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1533 static struct amdgpu_dm_connector
*
1534 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1535 struct drm_crtc
*crtc
)
1538 struct drm_connector_state
*new_con_state
;
1539 struct drm_connector
*connector
;
1540 struct drm_crtc
*crtc_from_state
;
1542 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1543 crtc_from_state
= new_con_state
->crtc
;
1545 if (crtc_from_state
== crtc
)
1546 return to_amdgpu_dm_connector(connector
);
1552 static void emulated_link_detect(struct dc_link
*link
)
1554 struct dc_sink_init_data sink_init_data
= { 0 };
1555 struct display_sink_capability sink_caps
= { 0 };
1556 enum dc_edid_status edid_status
;
1557 struct dc_context
*dc_ctx
= link
->ctx
;
1558 struct dc_sink
*sink
= NULL
;
1559 struct dc_sink
*prev_sink
= NULL
;
1561 link
->type
= dc_connection_none
;
1562 prev_sink
= link
->local_sink
;
1564 if (prev_sink
!= NULL
)
1565 dc_sink_retain(prev_sink
);
1567 switch (link
->connector_signal
) {
1568 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1569 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1570 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1574 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1575 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1576 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1580 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1581 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1582 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1586 case SIGNAL_TYPE_LVDS
: {
1587 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1588 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1592 case SIGNAL_TYPE_EDP
: {
1593 sink_caps
.transaction_type
=
1594 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1595 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1599 case SIGNAL_TYPE_DISPLAY_PORT
: {
1600 sink_caps
.transaction_type
=
1601 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1602 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1607 DC_ERROR("Invalid connector type! signal:%d\n",
1608 link
->connector_signal
);
1612 sink_init_data
.link
= link
;
1613 sink_init_data
.sink_signal
= sink_caps
.signal
;
1615 sink
= dc_sink_create(&sink_init_data
);
1617 DC_ERROR("Failed to create sink!\n");
1621 /* dc_sink_create returns a new reference */
1622 link
->local_sink
= sink
;
1624 edid_status
= dm_helpers_read_local_edid(
1629 if (edid_status
!= EDID_OK
)
1630 DC_ERROR("Failed to read EDID");
1634 static int dm_resume(void *handle
)
1636 struct amdgpu_device
*adev
= handle
;
1637 struct drm_device
*ddev
= adev
->ddev
;
1638 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1639 struct amdgpu_dm_connector
*aconnector
;
1640 struct drm_connector
*connector
;
1641 struct drm_connector_list_iter iter
;
1642 struct drm_crtc
*crtc
;
1643 struct drm_crtc_state
*new_crtc_state
;
1644 struct dm_crtc_state
*dm_new_crtc_state
;
1645 struct drm_plane
*plane
;
1646 struct drm_plane_state
*new_plane_state
;
1647 struct dm_plane_state
*dm_new_plane_state
;
1648 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1649 enum dc_connection_type new_connection_type
= dc_connection_none
;
1652 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1653 dc_release_state(dm_state
->context
);
1654 dm_state
->context
= dc_create_state(dm
->dc
);
1655 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1656 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1658 /* Before powering on DC we need to re-initialize DMUB. */
1659 r
= dm_dmub_hw_init(adev
);
1661 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1663 /* power on hardware */
1664 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1666 /* program HPD filter */
1670 * early enable HPD Rx IRQ, should be done before set mode as short
1671 * pulse interrupts are used for MST
1673 amdgpu_dm_irq_resume_early(adev
);
1675 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1676 s3_handle_mst(ddev
, false);
1679 drm_connector_list_iter_begin(ddev
, &iter
);
1680 drm_for_each_connector_iter(connector
, &iter
) {
1681 aconnector
= to_amdgpu_dm_connector(connector
);
1684 * this is the case when traversing through already created
1685 * MST connectors, should be skipped
1687 if (aconnector
->mst_port
)
1690 mutex_lock(&aconnector
->hpd_lock
);
1691 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1692 DRM_ERROR("KMS: Failed to detect connector\n");
1694 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1695 emulated_link_detect(aconnector
->dc_link
);
1697 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1699 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1700 aconnector
->fake_enable
= false;
1702 if (aconnector
->dc_sink
)
1703 dc_sink_release(aconnector
->dc_sink
);
1704 aconnector
->dc_sink
= NULL
;
1705 amdgpu_dm_update_connector_after_detect(aconnector
);
1706 mutex_unlock(&aconnector
->hpd_lock
);
1708 drm_connector_list_iter_end(&iter
);
1710 /* Force mode set in atomic commit */
1711 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1712 new_crtc_state
->active_changed
= true;
1715 * atomic_check is expected to create the dc states. We need to release
1716 * them here, since they were duplicated as part of the suspend
1719 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1720 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1721 if (dm_new_crtc_state
->stream
) {
1722 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1723 dc_stream_release(dm_new_crtc_state
->stream
);
1724 dm_new_crtc_state
->stream
= NULL
;
1728 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1729 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1730 if (dm_new_plane_state
->dc_state
) {
1731 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1732 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1733 dm_new_plane_state
->dc_state
= NULL
;
1737 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1739 dm
->cached_state
= NULL
;
1741 amdgpu_dm_irq_resume_late(adev
);
1743 amdgpu_dm_smu_write_watermarks_table(adev
);
1751 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1752 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1753 * the base driver's device list to be initialized and torn down accordingly.
1755 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1758 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1760 .early_init
= dm_early_init
,
1761 .late_init
= dm_late_init
,
1762 .sw_init
= dm_sw_init
,
1763 .sw_fini
= dm_sw_fini
,
1764 .hw_init
= dm_hw_init
,
1765 .hw_fini
= dm_hw_fini
,
1766 .suspend
= dm_suspend
,
1767 .resume
= dm_resume
,
1768 .is_idle
= dm_is_idle
,
1769 .wait_for_idle
= dm_wait_for_idle
,
1770 .check_soft_reset
= dm_check_soft_reset
,
1771 .soft_reset
= dm_soft_reset
,
1772 .set_clockgating_state
= dm_set_clockgating_state
,
1773 .set_powergating_state
= dm_set_powergating_state
,
1776 const struct amdgpu_ip_block_version dm_ip_block
=
1778 .type
= AMD_IP_BLOCK_TYPE_DCE
,
1782 .funcs
= &amdgpu_dm_funcs
,
1792 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
1793 .fb_create
= amdgpu_display_user_framebuffer_create
,
1794 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1795 .atomic_check
= amdgpu_dm_atomic_check
,
1796 .atomic_commit
= amdgpu_dm_atomic_commit
,
1799 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
1800 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
1803 static void update_connector_ext_caps(struct amdgpu_dm_connector
*aconnector
)
1805 u32 max_cll
, min_cll
, max
, min
, q
, r
;
1806 struct amdgpu_dm_backlight_caps
*caps
;
1807 struct amdgpu_display_manager
*dm
;
1808 struct drm_connector
*conn_base
;
1809 struct amdgpu_device
*adev
;
1810 static const u8 pre_computed_values
[] = {
1811 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1812 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1814 if (!aconnector
|| !aconnector
->dc_link
)
1817 conn_base
= &aconnector
->base
;
1818 adev
= conn_base
->dev
->dev_private
;
1820 caps
= &dm
->backlight_caps
;
1821 caps
->ext_caps
= &aconnector
->dc_link
->dpcd_sink_ext_caps
;
1822 caps
->aux_support
= false;
1823 max_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.max_cll
;
1824 min_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.min_cll
;
1826 if (caps
->ext_caps
->bits
.oled
== 1 ||
1827 caps
->ext_caps
->bits
.sdr_aux_backlight_control
== 1 ||
1828 caps
->ext_caps
->bits
.hdr_aux_backlight_control
== 1)
1829 caps
->aux_support
= true;
1831 /* From the specification (CTA-861-G), for calculating the maximum
1832 * luminance we need to use:
1833 * Luminance = 50*2**(CV/32)
1834 * Where CV is a one-byte value.
1835 * For calculating this expression we may need float point precision;
1836 * to avoid this complexity level, we take advantage that CV is divided
1837 * by a constant. From the Euclids division algorithm, we know that CV
1838 * can be written as: CV = 32*q + r. Next, we replace CV in the
1839 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1840 * need to pre-compute the value of r/32. For pre-computing the values
1841 * We just used the following Ruby line:
1842 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1843 * The results of the above expressions can be verified at
1844 * pre_computed_values.
1848 max
= (1 << q
) * pre_computed_values
[r
];
1850 // min luminance: maxLum * (CV/255)^2 / 100
1851 q
= DIV_ROUND_CLOSEST(min_cll
, 255);
1852 min
= max
* DIV_ROUND_CLOSEST((q
* q
), 100);
1854 caps
->aux_max_input_signal
= max
;
1855 caps
->aux_min_input_signal
= min
;
1858 void amdgpu_dm_update_connector_after_detect(
1859 struct amdgpu_dm_connector
*aconnector
)
1861 struct drm_connector
*connector
= &aconnector
->base
;
1862 struct drm_device
*dev
= connector
->dev
;
1863 struct dc_sink
*sink
;
1865 /* MST handled by drm_mst framework */
1866 if (aconnector
->mst_mgr
.mst_state
== true)
1870 sink
= aconnector
->dc_link
->local_sink
;
1872 dc_sink_retain(sink
);
1875 * Edid mgmt connector gets first update only in mode_valid hook and then
1876 * the connector sink is set to either fake or physical sink depends on link status.
1877 * Skip if already done during boot.
1879 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1880 && aconnector
->dc_em_sink
) {
1883 * For S3 resume with headless use eml_sink to fake stream
1884 * because on resume connector->sink is set to NULL
1886 mutex_lock(&dev
->mode_config
.mutex
);
1889 if (aconnector
->dc_sink
) {
1890 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1892 * retain and release below are used to
1893 * bump up refcount for sink because the link doesn't point
1894 * to it anymore after disconnect, so on next crtc to connector
1895 * reshuffle by UMD we will get into unwanted dc_sink release
1897 dc_sink_release(aconnector
->dc_sink
);
1899 aconnector
->dc_sink
= sink
;
1900 dc_sink_retain(aconnector
->dc_sink
);
1901 amdgpu_dm_update_freesync_caps(connector
,
1904 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1905 if (!aconnector
->dc_sink
) {
1906 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1907 dc_sink_retain(aconnector
->dc_sink
);
1911 mutex_unlock(&dev
->mode_config
.mutex
);
1914 dc_sink_release(sink
);
1919 * TODO: temporary guard to look for proper fix
1920 * if this sink is MST sink, we should not do anything
1922 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
1923 dc_sink_release(sink
);
1927 if (aconnector
->dc_sink
== sink
) {
1929 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1932 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1933 aconnector
->connector_id
);
1935 dc_sink_release(sink
);
1939 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1940 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1942 mutex_lock(&dev
->mode_config
.mutex
);
1945 * 1. Update status of the drm connector
1946 * 2. Send an event and let userspace tell us what to do
1950 * TODO: check if we still need the S3 mode update workaround.
1951 * If yes, put it here.
1953 if (aconnector
->dc_sink
)
1954 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1956 aconnector
->dc_sink
= sink
;
1957 dc_sink_retain(aconnector
->dc_sink
);
1958 if (sink
->dc_edid
.length
== 0) {
1959 aconnector
->edid
= NULL
;
1960 if (aconnector
->dc_link
->aux_mode
) {
1961 drm_dp_cec_unset_edid(
1962 &aconnector
->dm_dp_aux
.aux
);
1966 (struct edid
*)sink
->dc_edid
.raw_edid
;
1968 drm_connector_update_edid_property(connector
,
1971 if (aconnector
->dc_link
->aux_mode
)
1972 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
1976 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
1977 update_connector_ext_caps(aconnector
);
1979 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
1980 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1981 drm_connector_update_edid_property(connector
, NULL
);
1982 aconnector
->num_modes
= 0;
1983 dc_sink_release(aconnector
->dc_sink
);
1984 aconnector
->dc_sink
= NULL
;
1985 aconnector
->edid
= NULL
;
1986 #ifdef CONFIG_DRM_AMD_DC_HDCP
1987 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1988 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
1989 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
1993 mutex_unlock(&dev
->mode_config
.mutex
);
1996 dc_sink_release(sink
);
1999 static void handle_hpd_irq(void *param
)
2001 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2002 struct drm_connector
*connector
= &aconnector
->base
;
2003 struct drm_device
*dev
= connector
->dev
;
2004 enum dc_connection_type new_connection_type
= dc_connection_none
;
2005 #ifdef CONFIG_DRM_AMD_DC_HDCP
2006 struct amdgpu_device
*adev
= dev
->dev_private
;
2010 * In case of failure or MST no need to update connector status or notify the OS
2011 * since (for MST case) MST does this in its own context.
2013 mutex_lock(&aconnector
->hpd_lock
);
2015 #ifdef CONFIG_DRM_AMD_DC_HDCP
2016 if (adev
->dm
.hdcp_workqueue
)
2017 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
2019 if (aconnector
->fake_enable
)
2020 aconnector
->fake_enable
= false;
2022 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
2023 DRM_ERROR("KMS: Failed to detect connector\n");
2025 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2026 emulated_link_detect(aconnector
->dc_link
);
2029 drm_modeset_lock_all(dev
);
2030 dm_restore_drm_connector_state(dev
, connector
);
2031 drm_modeset_unlock_all(dev
);
2033 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2034 drm_kms_helper_hotplug_event(dev
);
2036 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
2037 amdgpu_dm_update_connector_after_detect(aconnector
);
2040 drm_modeset_lock_all(dev
);
2041 dm_restore_drm_connector_state(dev
, connector
);
2042 drm_modeset_unlock_all(dev
);
2044 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2045 drm_kms_helper_hotplug_event(dev
);
2047 mutex_unlock(&aconnector
->hpd_lock
);
2051 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
2053 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
2055 bool new_irq_handled
= false;
2057 int dpcd_bytes_to_read
;
2059 const int max_process_count
= 30;
2060 int process_count
= 0;
2062 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
2064 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
2065 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
2066 /* DPCD 0x200 - 0x201 for downstream IRQ */
2067 dpcd_addr
= DP_SINK_COUNT
;
2069 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
2070 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2071 dpcd_addr
= DP_SINK_COUNT_ESI
;
2074 dret
= drm_dp_dpcd_read(
2075 &aconnector
->dm_dp_aux
.aux
,
2078 dpcd_bytes_to_read
);
2080 while (dret
== dpcd_bytes_to_read
&&
2081 process_count
< max_process_count
) {
2087 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
2088 /* handle HPD short pulse irq */
2089 if (aconnector
->mst_mgr
.mst_state
)
2091 &aconnector
->mst_mgr
,
2095 if (new_irq_handled
) {
2096 /* ACK at DPCD to notify down stream */
2097 const int ack_dpcd_bytes_to_write
=
2098 dpcd_bytes_to_read
- 1;
2100 for (retry
= 0; retry
< 3; retry
++) {
2103 wret
= drm_dp_dpcd_write(
2104 &aconnector
->dm_dp_aux
.aux
,
2107 ack_dpcd_bytes_to_write
);
2108 if (wret
== ack_dpcd_bytes_to_write
)
2112 /* check if there is new irq to be handled */
2113 dret
= drm_dp_dpcd_read(
2114 &aconnector
->dm_dp_aux
.aux
,
2117 dpcd_bytes_to_read
);
2119 new_irq_handled
= false;
2125 if (process_count
== max_process_count
)
2126 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2129 static void handle_hpd_rx_irq(void *param
)
2131 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2132 struct drm_connector
*connector
= &aconnector
->base
;
2133 struct drm_device
*dev
= connector
->dev
;
2134 struct dc_link
*dc_link
= aconnector
->dc_link
;
2135 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
2136 enum dc_connection_type new_connection_type
= dc_connection_none
;
2137 #ifdef CONFIG_DRM_AMD_DC_HDCP
2138 union hpd_irq_data hpd_irq_data
;
2139 struct amdgpu_device
*adev
= dev
->dev_private
;
2141 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
2145 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2146 * conflict, after implement i2c helper, this mutex should be
2149 if (dc_link
->type
!= dc_connection_mst_branch
)
2150 mutex_lock(&aconnector
->hpd_lock
);
2153 #ifdef CONFIG_DRM_AMD_DC_HDCP
2154 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
2156 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
2158 !is_mst_root_connector
) {
2159 /* Downstream Port status changed. */
2160 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
2161 DRM_ERROR("KMS: Failed to detect connector\n");
2163 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2164 emulated_link_detect(dc_link
);
2166 if (aconnector
->fake_enable
)
2167 aconnector
->fake_enable
= false;
2169 amdgpu_dm_update_connector_after_detect(aconnector
);
2172 drm_modeset_lock_all(dev
);
2173 dm_restore_drm_connector_state(dev
, connector
);
2174 drm_modeset_unlock_all(dev
);
2176 drm_kms_helper_hotplug_event(dev
);
2177 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
2179 if (aconnector
->fake_enable
)
2180 aconnector
->fake_enable
= false;
2182 amdgpu_dm_update_connector_after_detect(aconnector
);
2185 drm_modeset_lock_all(dev
);
2186 dm_restore_drm_connector_state(dev
, connector
);
2187 drm_modeset_unlock_all(dev
);
2189 drm_kms_helper_hotplug_event(dev
);
2192 #ifdef CONFIG_DRM_AMD_DC_HDCP
2193 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
) {
2194 if (adev
->dm
.hdcp_workqueue
)
2195 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
2198 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
2199 (dc_link
->type
== dc_connection_mst_branch
))
2200 dm_handle_hpd_rx_irq(aconnector
);
2202 if (dc_link
->type
!= dc_connection_mst_branch
) {
2203 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
2204 mutex_unlock(&aconnector
->hpd_lock
);
2208 static void register_hpd_handlers(struct amdgpu_device
*adev
)
2210 struct drm_device
*dev
= adev
->ddev
;
2211 struct drm_connector
*connector
;
2212 struct amdgpu_dm_connector
*aconnector
;
2213 const struct dc_link
*dc_link
;
2214 struct dc_interrupt_params int_params
= {0};
2216 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2217 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2219 list_for_each_entry(connector
,
2220 &dev
->mode_config
.connector_list
, head
) {
2222 aconnector
= to_amdgpu_dm_connector(connector
);
2223 dc_link
= aconnector
->dc_link
;
2225 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
2226 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2227 int_params
.irq_source
= dc_link
->irq_source_hpd
;
2229 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2231 (void *) aconnector
);
2234 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
2236 /* Also register for DP short pulse (hpd_rx). */
2237 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2238 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
2240 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2242 (void *) aconnector
);
2247 /* Register IRQ sources and initialize IRQ callbacks */
2248 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
2250 struct dc
*dc
= adev
->dm
.dc
;
2251 struct common_irq_params
*c_irq_params
;
2252 struct dc_interrupt_params int_params
= {0};
2255 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2257 if (adev
->asic_type
>= CHIP_VEGA10
)
2258 client_id
= SOC15_IH_CLIENTID_DCE
;
2260 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2261 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2264 * Actions of amdgpu_irq_add_id():
2265 * 1. Register a set() function with base driver.
2266 * Base driver will call set() function to enable/disable an
2267 * interrupt in DC hardware.
2268 * 2. Register amdgpu_dm_irq_handler().
2269 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2270 * coming from DC hardware.
2271 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2272 * for acknowledging and handling. */
2274 /* Use VBLANK interrupt */
2275 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
2276 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
2278 DRM_ERROR("Failed to add crtc irq id!\n");
2282 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2283 int_params
.irq_source
=
2284 dc_interrupt_to_irq_source(dc
, i
, 0);
2286 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2288 c_irq_params
->adev
= adev
;
2289 c_irq_params
->irq_src
= int_params
.irq_source
;
2291 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2292 dm_crtc_high_irq
, c_irq_params
);
2295 /* Use VUPDATE interrupt */
2296 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
2297 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
2299 DRM_ERROR("Failed to add vupdate irq id!\n");
2303 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2304 int_params
.irq_source
=
2305 dc_interrupt_to_irq_source(dc
, i
, 0);
2307 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2309 c_irq_params
->adev
= adev
;
2310 c_irq_params
->irq_src
= int_params
.irq_source
;
2312 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2313 dm_vupdate_high_irq
, c_irq_params
);
2316 /* Use GRPH_PFLIP interrupt */
2317 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2318 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2319 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2321 DRM_ERROR("Failed to add page flip irq id!\n");
2325 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2326 int_params
.irq_source
=
2327 dc_interrupt_to_irq_source(dc
, i
, 0);
2329 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2331 c_irq_params
->adev
= adev
;
2332 c_irq_params
->irq_src
= int_params
.irq_source
;
2334 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2335 dm_pflip_high_irq
, c_irq_params
);
2340 r
= amdgpu_irq_add_id(adev
, client_id
,
2341 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2343 DRM_ERROR("Failed to add hpd irq id!\n");
2347 register_hpd_handlers(adev
);
2352 #if defined(CONFIG_DRM_AMD_DC_DCN)
2353 /* Register IRQ sources and initialize IRQ callbacks */
2354 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
2356 struct dc
*dc
= adev
->dm
.dc
;
2357 struct common_irq_params
*c_irq_params
;
2358 struct dc_interrupt_params int_params
= {0};
2362 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2363 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2366 * Actions of amdgpu_irq_add_id():
2367 * 1. Register a set() function with base driver.
2368 * Base driver will call set() function to enable/disable an
2369 * interrupt in DC hardware.
2370 * 2. Register amdgpu_dm_irq_handler().
2371 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2372 * coming from DC hardware.
2373 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2374 * for acknowledging and handling.
2377 /* Use VSTARTUP interrupt */
2378 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
2379 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
2381 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
2384 DRM_ERROR("Failed to add crtc irq id!\n");
2388 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2389 int_params
.irq_source
=
2390 dc_interrupt_to_irq_source(dc
, i
, 0);
2392 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2394 c_irq_params
->adev
= adev
;
2395 c_irq_params
->irq_src
= int_params
.irq_source
;
2397 amdgpu_dm_irq_register_interrupt(
2398 adev
, &int_params
, dm_crtc_high_irq
, c_irq_params
);
2401 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2402 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2403 * to trigger at end of each vblank, regardless of state of the lock,
2404 * matching DCE behaviour.
2406 for (i
= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
;
2407 i
<= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2409 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->vupdate_irq
);
2412 DRM_ERROR("Failed to add vupdate irq id!\n");
2416 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2417 int_params
.irq_source
=
2418 dc_interrupt_to_irq_source(dc
, i
, 0);
2420 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2422 c_irq_params
->adev
= adev
;
2423 c_irq_params
->irq_src
= int_params
.irq_source
;
2425 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2426 dm_vupdate_high_irq
, c_irq_params
);
2429 /* Use GRPH_PFLIP interrupt */
2430 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
2431 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2433 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
2435 DRM_ERROR("Failed to add page flip irq id!\n");
2439 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2440 int_params
.irq_source
=
2441 dc_interrupt_to_irq_source(dc
, i
, 0);
2443 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2445 c_irq_params
->adev
= adev
;
2446 c_irq_params
->irq_src
= int_params
.irq_source
;
2448 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2449 dm_pflip_high_irq
, c_irq_params
);
2454 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
2457 DRM_ERROR("Failed to add hpd irq id!\n");
2461 register_hpd_handlers(adev
);
2468 * Acquires the lock for the atomic state object and returns
2469 * the new atomic state.
2471 * This should only be called during atomic check.
2473 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
2474 struct dm_atomic_state
**dm_state
)
2476 struct drm_device
*dev
= state
->dev
;
2477 struct amdgpu_device
*adev
= dev
->dev_private
;
2478 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2479 struct drm_private_state
*priv_state
;
2484 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
2485 if (IS_ERR(priv_state
))
2486 return PTR_ERR(priv_state
);
2488 *dm_state
= to_dm_atomic_state(priv_state
);
2493 struct dm_atomic_state
*
2494 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2496 struct drm_device
*dev
= state
->dev
;
2497 struct amdgpu_device
*adev
= dev
->dev_private
;
2498 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2499 struct drm_private_obj
*obj
;
2500 struct drm_private_state
*new_obj_state
;
2503 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2504 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2505 return to_dm_atomic_state(new_obj_state
);
2511 struct dm_atomic_state
*
2512 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2514 struct drm_device
*dev
= state
->dev
;
2515 struct amdgpu_device
*adev
= dev
->dev_private
;
2516 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2517 struct drm_private_obj
*obj
;
2518 struct drm_private_state
*old_obj_state
;
2521 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2522 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2523 return to_dm_atomic_state(old_obj_state
);
2529 static struct drm_private_state
*
2530 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2532 struct dm_atomic_state
*old_state
, *new_state
;
2534 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2538 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2540 old_state
= to_dm_atomic_state(obj
->state
);
2542 if (old_state
&& old_state
->context
)
2543 new_state
->context
= dc_copy_state(old_state
->context
);
2545 if (!new_state
->context
) {
2550 return &new_state
->base
;
2553 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2554 struct drm_private_state
*state
)
2556 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2558 if (dm_state
&& dm_state
->context
)
2559 dc_release_state(dm_state
->context
);
2564 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2565 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2566 .atomic_destroy_state
= dm_atomic_destroy_state
,
2569 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2571 struct dm_atomic_state
*state
;
2574 adev
->mode_info
.mode_config_initialized
= true;
2576 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2577 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2579 adev
->ddev
->mode_config
.max_width
= 16384;
2580 adev
->ddev
->mode_config
.max_height
= 16384;
2582 adev
->ddev
->mode_config
.preferred_depth
= 24;
2583 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2584 /* indicates support for immediate flip */
2585 adev
->ddev
->mode_config
.async_page_flip
= true;
2587 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2589 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2593 state
->context
= dc_create_state(adev
->dm
.dc
);
2594 if (!state
->context
) {
2599 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2601 drm_atomic_private_obj_init(adev
->ddev
,
2602 &adev
->dm
.atomic_obj
,
2604 &dm_atomic_state_funcs
);
2606 r
= amdgpu_display_modeset_create_props(adev
);
2610 r
= amdgpu_dm_audio_init(adev
);
2617 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2618 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2619 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2621 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2622 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2624 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2626 #if defined(CONFIG_ACPI)
2627 struct amdgpu_dm_backlight_caps caps
;
2629 if (dm
->backlight_caps
.caps_valid
)
2632 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2633 if (caps
.caps_valid
) {
2634 dm
->backlight_caps
.caps_valid
= true;
2635 if (caps
.aux_support
)
2637 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2638 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2640 dm
->backlight_caps
.min_input_signal
=
2641 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2642 dm
->backlight_caps
.max_input_signal
=
2643 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2646 if (dm
->backlight_caps
.aux_support
)
2649 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2650 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2654 static int set_backlight_via_aux(struct dc_link
*link
, uint32_t brightness
)
2661 rc
= dc_link_set_backlight_level_nits(link
, true, brightness
,
2662 AUX_BL_DEFAULT_TRANSITION_TIME_MS
);
2667 static u32
convert_brightness(const struct amdgpu_dm_backlight_caps
*caps
,
2668 const uint32_t user_brightness
)
2670 u32 min
, max
, conversion_pace
;
2671 u32 brightness
= user_brightness
;
2676 if (!caps
->aux_support
) {
2677 max
= caps
->max_input_signal
;
2678 min
= caps
->min_input_signal
;
2680 * The brightness input is in the range 0-255
2681 * It needs to be rescaled to be between the
2682 * requested min and max input signal
2683 * It also needs to be scaled up by 0x101 to
2684 * match the DC interface which has a range of
2687 conversion_pace
= 0x101;
2692 / AMDGPU_MAX_BL_LEVEL
2693 + min
* conversion_pace
;
2696 * We are doing a linear interpolation here, which is OK but
2697 * does not provide the optimal result. We probably want
2698 * something close to the Perceptual Quantizer (PQ) curve.
2700 max
= caps
->aux_max_input_signal
;
2701 min
= caps
->aux_min_input_signal
;
2703 brightness
= (AMDGPU_MAX_BL_LEVEL
- user_brightness
) * min
2704 + user_brightness
* max
;
2705 // Multiple the value by 1000 since we use millinits
2707 brightness
= DIV_ROUND_CLOSEST(brightness
, AMDGPU_MAX_BL_LEVEL
);
2714 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
2716 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2717 struct amdgpu_dm_backlight_caps caps
;
2718 struct dc_link
*link
= NULL
;
2722 amdgpu_dm_update_backlight_caps(dm
);
2723 caps
= dm
->backlight_caps
;
2725 link
= (struct dc_link
*)dm
->backlight_link
;
2727 brightness
= convert_brightness(&caps
, bd
->props
.brightness
);
2728 // Change brightness based on AUX property
2729 if (caps
.aux_support
)
2730 return set_backlight_via_aux(link
, brightness
);
2732 rc
= dc_link_set_backlight_level(dm
->backlight_link
, brightness
, 0);
2737 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
2739 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2740 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
2742 if (ret
== DC_ERROR_UNEXPECTED
)
2743 return bd
->props
.brightness
;
2747 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
2748 .options
= BL_CORE_SUSPENDRESUME
,
2749 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
2750 .update_status
= amdgpu_dm_backlight_update_status
,
2754 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
2757 struct backlight_properties props
= { 0 };
2759 amdgpu_dm_update_backlight_caps(dm
);
2761 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
2762 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
2763 props
.type
= BACKLIGHT_RAW
;
2765 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
2766 dm
->adev
->ddev
->primary
->index
);
2768 dm
->backlight_dev
= backlight_device_register(bl_name
,
2769 dm
->adev
->ddev
->dev
,
2771 &amdgpu_dm_backlight_ops
,
2774 if (IS_ERR(dm
->backlight_dev
))
2775 DRM_ERROR("DM: Backlight registration failed!\n");
2777 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
2782 static int initialize_plane(struct amdgpu_display_manager
*dm
,
2783 struct amdgpu_mode_info
*mode_info
, int plane_id
,
2784 enum drm_plane_type plane_type
,
2785 const struct dc_plane_cap
*plane_cap
)
2787 struct drm_plane
*plane
;
2788 unsigned long possible_crtcs
;
2791 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
2793 DRM_ERROR("KMS: Failed to allocate plane\n");
2796 plane
->type
= plane_type
;
2799 * HACK: IGT tests expect that the primary plane for a CRTC
2800 * can only have one possible CRTC. Only expose support for
2801 * any CRTC if they're not going to be used as a primary plane
2802 * for a CRTC - like overlay or underlay planes.
2804 possible_crtcs
= 1 << plane_id
;
2805 if (plane_id
>= dm
->dc
->caps
.max_streams
)
2806 possible_crtcs
= 0xff;
2808 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
2811 DRM_ERROR("KMS: Failed to initialize plane\n");
2817 mode_info
->planes
[plane_id
] = plane
;
2823 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
2824 struct dc_link
*link
)
2826 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2827 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2829 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2830 link
->type
!= dc_connection_none
) {
2832 * Event if registration failed, we should continue with
2833 * DM initialization because not having a backlight control
2834 * is better then a black screen.
2836 amdgpu_dm_register_backlight_device(dm
);
2838 if (dm
->backlight_dev
)
2839 dm
->backlight_link
= link
;
2846 * In this architecture, the association
2847 * connector -> encoder -> crtc
2848 * id not really requried. The crtc and connector will hold the
2849 * display_index as an abstraction to use with DAL component
2851 * Returns 0 on success
2853 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
2855 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2857 struct amdgpu_dm_connector
*aconnector
= NULL
;
2858 struct amdgpu_encoder
*aencoder
= NULL
;
2859 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2861 int32_t primary_planes
;
2862 enum dc_connection_type new_connection_type
= dc_connection_none
;
2863 const struct dc_plane_cap
*plane
;
2865 link_cnt
= dm
->dc
->caps
.max_links
;
2866 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
2867 DRM_ERROR("DM: Failed to initialize mode config\n");
2871 /* There is one primary plane per CRTC */
2872 primary_planes
= dm
->dc
->caps
.max_streams
;
2873 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
2876 * Initialize primary planes, implicit planes for legacy IOCTLS.
2877 * Order is reversed to match iteration order in atomic check.
2879 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
2880 plane
= &dm
->dc
->caps
.planes
[i
];
2882 if (initialize_plane(dm
, mode_info
, i
,
2883 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
2884 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2890 * Initialize overlay planes, index starting after primary planes.
2891 * These planes have a higher DRM index than the primary planes since
2892 * they should be considered as having a higher z-order.
2893 * Order is reversed to match iteration order in atomic check.
2895 * Only support DCN for now, and only expose one so we don't encourage
2896 * userspace to use up all the pipes.
2898 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
2899 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
2901 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
2904 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
2907 if (!plane
->pixel_format_support
.argb8888
)
2910 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
2911 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
2912 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2916 /* Only create one overlay plane. */
2920 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
2921 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
2922 DRM_ERROR("KMS: Failed to initialize crtc\n");
2926 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
2928 /* loops over all connectors on the board */
2929 for (i
= 0; i
< link_cnt
; i
++) {
2930 struct dc_link
*link
= NULL
;
2932 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
2934 "KMS: Cannot support more than %d display indexes\n",
2935 AMDGPU_DM_MAX_DISPLAY_INDEX
);
2939 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
2943 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
2947 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
2948 DRM_ERROR("KMS: Failed to initialize encoder\n");
2952 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
2953 DRM_ERROR("KMS: Failed to initialize connector\n");
2957 link
= dc_get_link_at_index(dm
->dc
, i
);
2959 if (!dc_link_detect_sink(link
, &new_connection_type
))
2960 DRM_ERROR("KMS: Failed to detect connector\n");
2962 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2963 emulated_link_detect(link
);
2964 amdgpu_dm_update_connector_after_detect(aconnector
);
2966 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
2967 amdgpu_dm_update_connector_after_detect(aconnector
);
2968 register_backlight_device(dm
, link
);
2969 if (amdgpu_dc_feature_mask
& DC_PSR_MASK
)
2970 amdgpu_dm_set_psr_caps(link
);
2976 /* Software is initialized. Now we can register interrupt handlers. */
2977 switch (adev
->asic_type
) {
2987 case CHIP_POLARIS11
:
2988 case CHIP_POLARIS10
:
2989 case CHIP_POLARIS12
:
2994 if (dce110_register_irq_handlers(dm
->adev
)) {
2995 DRM_ERROR("DM: Failed to initialize IRQ\n");
2999 #if defined(CONFIG_DRM_AMD_DC_DCN)
3005 if (dcn10_register_irq_handlers(dm
->adev
)) {
3006 DRM_ERROR("DM: Failed to initialize IRQ\n");
3012 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3016 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
3017 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
3019 /* No userspace support. */
3020 dm
->dc
->debug
.disable_tri_buf
= true;
3030 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
3032 drm_mode_config_cleanup(dm
->ddev
);
3033 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
3037 /******************************************************************************
3038 * amdgpu_display_funcs functions
3039 *****************************************************************************/
3042 * dm_bandwidth_update - program display watermarks
3044 * @adev: amdgpu_device pointer
3046 * Calculate and program the display watermarks and line buffer allocation.
3048 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
3050 /* TODO: implement later */
3053 static const struct amdgpu_display_funcs dm_display_funcs
= {
3054 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
3055 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
3056 .backlight_set_level
= NULL
, /* never called for DC */
3057 .backlight_get_level
= NULL
, /* never called for DC */
3058 .hpd_sense
= NULL
,/* called unconditionally */
3059 .hpd_set_polarity
= NULL
, /* called unconditionally */
3060 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
3061 .page_flip_get_scanoutpos
=
3062 dm_crtc_get_scanoutpos
,/* called unconditionally */
3063 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
3064 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
3067 #if defined(CONFIG_DEBUG_KERNEL_DC)
3069 static ssize_t
s3_debug_store(struct device
*device
,
3070 struct device_attribute
*attr
,
3076 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
3077 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
3079 ret
= kstrtoint(buf
, 0, &s3_state
);
3084 drm_kms_helper_hotplug_event(adev
->ddev
);
3089 return ret
== 0 ? count
: 0;
3092 DEVICE_ATTR_WO(s3_debug
);
3096 static int dm_early_init(void *handle
)
3098 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3100 switch (adev
->asic_type
) {
3103 adev
->mode_info
.num_crtc
= 6;
3104 adev
->mode_info
.num_hpd
= 6;
3105 adev
->mode_info
.num_dig
= 6;
3108 adev
->mode_info
.num_crtc
= 4;
3109 adev
->mode_info
.num_hpd
= 6;
3110 adev
->mode_info
.num_dig
= 7;
3114 adev
->mode_info
.num_crtc
= 2;
3115 adev
->mode_info
.num_hpd
= 6;
3116 adev
->mode_info
.num_dig
= 6;
3120 adev
->mode_info
.num_crtc
= 6;
3121 adev
->mode_info
.num_hpd
= 6;
3122 adev
->mode_info
.num_dig
= 7;
3125 adev
->mode_info
.num_crtc
= 3;
3126 adev
->mode_info
.num_hpd
= 6;
3127 adev
->mode_info
.num_dig
= 9;
3130 adev
->mode_info
.num_crtc
= 2;
3131 adev
->mode_info
.num_hpd
= 6;
3132 adev
->mode_info
.num_dig
= 9;
3134 case CHIP_POLARIS11
:
3135 case CHIP_POLARIS12
:
3136 adev
->mode_info
.num_crtc
= 5;
3137 adev
->mode_info
.num_hpd
= 5;
3138 adev
->mode_info
.num_dig
= 5;
3140 case CHIP_POLARIS10
:
3142 adev
->mode_info
.num_crtc
= 6;
3143 adev
->mode_info
.num_hpd
= 6;
3144 adev
->mode_info
.num_dig
= 6;
3149 adev
->mode_info
.num_crtc
= 6;
3150 adev
->mode_info
.num_hpd
= 6;
3151 adev
->mode_info
.num_dig
= 6;
3153 #if defined(CONFIG_DRM_AMD_DC_DCN)
3155 adev
->mode_info
.num_crtc
= 4;
3156 adev
->mode_info
.num_hpd
= 4;
3157 adev
->mode_info
.num_dig
= 4;
3162 adev
->mode_info
.num_crtc
= 6;
3163 adev
->mode_info
.num_hpd
= 6;
3164 adev
->mode_info
.num_dig
= 6;
3167 adev
->mode_info
.num_crtc
= 5;
3168 adev
->mode_info
.num_hpd
= 5;
3169 adev
->mode_info
.num_dig
= 5;
3172 adev
->mode_info
.num_crtc
= 4;
3173 adev
->mode_info
.num_hpd
= 4;
3174 adev
->mode_info
.num_dig
= 4;
3177 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3181 amdgpu_dm_set_irq_funcs(adev
);
3183 if (adev
->mode_info
.funcs
== NULL
)
3184 adev
->mode_info
.funcs
= &dm_display_funcs
;
3187 * Note: Do NOT change adev->audio_endpt_rreg and
3188 * adev->audio_endpt_wreg because they are initialised in
3189 * amdgpu_device_init()
3191 #if defined(CONFIG_DEBUG_KERNEL_DC)
3194 &dev_attr_s3_debug
);
3200 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
3201 struct dc_stream_state
*new_stream
,
3202 struct dc_stream_state
*old_stream
)
3204 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3207 if (!crtc_state
->enable
)
3210 return crtc_state
->active
;
3213 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
3215 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3218 return !crtc_state
->enable
|| !crtc_state
->active
;
3221 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
3223 drm_encoder_cleanup(encoder
);
3227 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
3228 .destroy
= amdgpu_dm_encoder_destroy
,
3232 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
3233 struct dc_scaling_info
*scaling_info
)
3235 int scale_w
, scale_h
;
3237 memset(scaling_info
, 0, sizeof(*scaling_info
));
3239 /* Source is fixed 16.16 but we ignore mantissa for now... */
3240 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
3241 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
3243 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
3244 if (scaling_info
->src_rect
.width
== 0)
3247 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
3248 if (scaling_info
->src_rect
.height
== 0)
3251 scaling_info
->dst_rect
.x
= state
->crtc_x
;
3252 scaling_info
->dst_rect
.y
= state
->crtc_y
;
3254 if (state
->crtc_w
== 0)
3257 scaling_info
->dst_rect
.width
= state
->crtc_w
;
3259 if (state
->crtc_h
== 0)
3262 scaling_info
->dst_rect
.height
= state
->crtc_h
;
3264 /* DRM doesn't specify clipping on destination output. */
3265 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
3267 /* TODO: Validate scaling per-format with DC plane caps */
3268 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
3269 scaling_info
->src_rect
.width
;
3271 if (scale_w
< 250 || scale_w
> 16000)
3274 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
3275 scaling_info
->src_rect
.height
;
3277 if (scale_h
< 250 || scale_h
> 16000)
3281 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3282 * assume reasonable defaults based on the format.
3288 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
3289 uint64_t *tiling_flags
)
3291 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
3292 int r
= amdgpu_bo_reserve(rbo
, false);
3295 /* Don't show error message when returning -ERESTARTSYS */
3296 if (r
!= -ERESTARTSYS
)
3297 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
3302 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
3304 amdgpu_bo_unreserve(rbo
);
3309 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
3311 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
3313 return offset
? (address
+ offset
* 256) : 0;
3317 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
3318 const struct amdgpu_framebuffer
*afb
,
3319 const enum surface_pixel_format format
,
3320 const enum dc_rotation_angle rotation
,
3321 const struct plane_size
*plane_size
,
3322 const union dc_tiling_info
*tiling_info
,
3323 const uint64_t info
,
3324 struct dc_plane_dcc_param
*dcc
,
3325 struct dc_plane_address
*address
,
3326 bool force_disable_dcc
)
3328 struct dc
*dc
= adev
->dm
.dc
;
3329 struct dc_dcc_surface_param input
;
3330 struct dc_surface_dcc_cap output
;
3331 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
3332 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
3333 uint64_t dcc_address
;
3335 memset(&input
, 0, sizeof(input
));
3336 memset(&output
, 0, sizeof(output
));
3338 if (force_disable_dcc
)
3344 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3347 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
3350 input
.format
= format
;
3351 input
.surface_size
.width
= plane_size
->surface_size
.width
;
3352 input
.surface_size
.height
= plane_size
->surface_size
.height
;
3353 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
3355 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
3356 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
3357 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
3358 input
.scan
= SCAN_DIRECTION_VERTICAL
;
3360 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
3363 if (!output
.capable
)
3366 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
3371 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
3372 dcc
->independent_64b_blks
= i64b
;
3374 dcc_address
= get_dcc_address(afb
->address
, info
);
3375 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
3376 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
3382 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
3383 const struct amdgpu_framebuffer
*afb
,
3384 const enum surface_pixel_format format
,
3385 const enum dc_rotation_angle rotation
,
3386 const uint64_t tiling_flags
,
3387 union dc_tiling_info
*tiling_info
,
3388 struct plane_size
*plane_size
,
3389 struct dc_plane_dcc_param
*dcc
,
3390 struct dc_plane_address
*address
,
3391 bool force_disable_dcc
)
3393 const struct drm_framebuffer
*fb
= &afb
->base
;
3396 memset(tiling_info
, 0, sizeof(*tiling_info
));
3397 memset(plane_size
, 0, sizeof(*plane_size
));
3398 memset(dcc
, 0, sizeof(*dcc
));
3399 memset(address
, 0, sizeof(*address
));
3401 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3402 plane_size
->surface_size
.x
= 0;
3403 plane_size
->surface_size
.y
= 0;
3404 plane_size
->surface_size
.width
= fb
->width
;
3405 plane_size
->surface_size
.height
= fb
->height
;
3406 plane_size
->surface_pitch
=
3407 fb
->pitches
[0] / fb
->format
->cpp
[0];
3409 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
3410 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3411 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3412 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
3413 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
3415 plane_size
->surface_size
.x
= 0;
3416 plane_size
->surface_size
.y
= 0;
3417 plane_size
->surface_size
.width
= fb
->width
;
3418 plane_size
->surface_size
.height
= fb
->height
;
3419 plane_size
->surface_pitch
=
3420 fb
->pitches
[0] / fb
->format
->cpp
[0];
3422 plane_size
->chroma_size
.x
= 0;
3423 plane_size
->chroma_size
.y
= 0;
3424 /* TODO: set these based on surface format */
3425 plane_size
->chroma_size
.width
= fb
->width
/ 2;
3426 plane_size
->chroma_size
.height
= fb
->height
/ 2;
3428 plane_size
->chroma_pitch
=
3429 fb
->pitches
[1] / fb
->format
->cpp
[1];
3431 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3432 address
->video_progressive
.luma_addr
.low_part
=
3433 lower_32_bits(afb
->address
);
3434 address
->video_progressive
.luma_addr
.high_part
=
3435 upper_32_bits(afb
->address
);
3436 address
->video_progressive
.chroma_addr
.low_part
=
3437 lower_32_bits(chroma_addr
);
3438 address
->video_progressive
.chroma_addr
.high_part
=
3439 upper_32_bits(chroma_addr
);
3442 /* Fill GFX8 params */
3443 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
3444 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
3446 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
3447 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
3448 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
3449 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
3450 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
3452 /* XXX fix me for VI */
3453 tiling_info
->gfx8
.num_banks
= num_banks
;
3454 tiling_info
->gfx8
.array_mode
=
3455 DC_ARRAY_2D_TILED_THIN1
;
3456 tiling_info
->gfx8
.tile_split
= tile_split
;
3457 tiling_info
->gfx8
.bank_width
= bankw
;
3458 tiling_info
->gfx8
.bank_height
= bankh
;
3459 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
3460 tiling_info
->gfx8
.tile_mode
=
3461 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
3462 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
3463 == DC_ARRAY_1D_TILED_THIN1
) {
3464 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
3467 tiling_info
->gfx8
.pipe_config
=
3468 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
3470 if (adev
->asic_type
== CHIP_VEGA10
||
3471 adev
->asic_type
== CHIP_VEGA12
||
3472 adev
->asic_type
== CHIP_VEGA20
||
3473 adev
->asic_type
== CHIP_NAVI10
||
3474 adev
->asic_type
== CHIP_NAVI14
||
3475 adev
->asic_type
== CHIP_NAVI12
||
3476 adev
->asic_type
== CHIP_RENOIR
||
3477 adev
->asic_type
== CHIP_RAVEN
) {
3478 /* Fill GFX9 params */
3479 tiling_info
->gfx9
.num_pipes
=
3480 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
3481 tiling_info
->gfx9
.num_banks
=
3482 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
3483 tiling_info
->gfx9
.pipe_interleave
=
3484 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
3485 tiling_info
->gfx9
.num_shader_engines
=
3486 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
3487 tiling_info
->gfx9
.max_compressed_frags
=
3488 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
3489 tiling_info
->gfx9
.num_rb_per_se
=
3490 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
3491 tiling_info
->gfx9
.swizzle
=
3492 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
3493 tiling_info
->gfx9
.shaderEnable
= 1;
3495 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
3496 plane_size
, tiling_info
,
3497 tiling_flags
, dcc
, address
,
3507 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
3508 bool *per_pixel_alpha
, bool *global_alpha
,
3509 int *global_alpha_value
)
3511 *per_pixel_alpha
= false;
3512 *global_alpha
= false;
3513 *global_alpha_value
= 0xff;
3515 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
3518 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
3519 static const uint32_t alpha_formats
[] = {
3520 DRM_FORMAT_ARGB8888
,
3521 DRM_FORMAT_RGBA8888
,
3522 DRM_FORMAT_ABGR8888
,
3524 uint32_t format
= plane_state
->fb
->format
->format
;
3527 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
3528 if (format
== alpha_formats
[i
]) {
3529 *per_pixel_alpha
= true;
3535 if (plane_state
->alpha
< 0xffff) {
3536 *global_alpha
= true;
3537 *global_alpha_value
= plane_state
->alpha
>> 8;
3542 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
3543 const enum surface_pixel_format format
,
3544 enum dc_color_space
*color_space
)
3548 *color_space
= COLOR_SPACE_SRGB
;
3550 /* DRM color properties only affect non-RGB formats. */
3551 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3554 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3556 switch (plane_state
->color_encoding
) {
3557 case DRM_COLOR_YCBCR_BT601
:
3559 *color_space
= COLOR_SPACE_YCBCR601
;
3561 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3564 case DRM_COLOR_YCBCR_BT709
:
3566 *color_space
= COLOR_SPACE_YCBCR709
;
3568 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3571 case DRM_COLOR_YCBCR_BT2020
:
3573 *color_space
= COLOR_SPACE_2020_YCBCR
;
3586 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3587 const struct drm_plane_state
*plane_state
,
3588 const uint64_t tiling_flags
,
3589 struct dc_plane_info
*plane_info
,
3590 struct dc_plane_address
*address
,
3591 bool force_disable_dcc
)
3593 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3594 const struct amdgpu_framebuffer
*afb
=
3595 to_amdgpu_framebuffer(plane_state
->fb
);
3596 struct drm_format_name_buf format_name
;
3599 memset(plane_info
, 0, sizeof(*plane_info
));
3601 switch (fb
->format
->format
) {
3603 plane_info
->format
=
3604 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3606 case DRM_FORMAT_RGB565
:
3607 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3609 case DRM_FORMAT_XRGB8888
:
3610 case DRM_FORMAT_ARGB8888
:
3611 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3613 case DRM_FORMAT_XRGB2101010
:
3614 case DRM_FORMAT_ARGB2101010
:
3615 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3617 case DRM_FORMAT_XBGR2101010
:
3618 case DRM_FORMAT_ABGR2101010
:
3619 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3621 case DRM_FORMAT_XBGR8888
:
3622 case DRM_FORMAT_ABGR8888
:
3623 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3625 case DRM_FORMAT_NV21
:
3626 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3628 case DRM_FORMAT_NV12
:
3629 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3631 case DRM_FORMAT_P010
:
3632 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
;
3636 "Unsupported screen format %s\n",
3637 drm_get_format_name(fb
->format
->format
, &format_name
));
3641 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
3642 case DRM_MODE_ROTATE_0
:
3643 plane_info
->rotation
= ROTATION_ANGLE_0
;
3645 case DRM_MODE_ROTATE_90
:
3646 plane_info
->rotation
= ROTATION_ANGLE_90
;
3648 case DRM_MODE_ROTATE_180
:
3649 plane_info
->rotation
= ROTATION_ANGLE_180
;
3651 case DRM_MODE_ROTATE_270
:
3652 plane_info
->rotation
= ROTATION_ANGLE_270
;
3655 plane_info
->rotation
= ROTATION_ANGLE_0
;
3659 plane_info
->visible
= true;
3660 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
3662 plane_info
->layer_index
= 0;
3664 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
3665 &plane_info
->color_space
);
3669 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
3670 plane_info
->rotation
, tiling_flags
,
3671 &plane_info
->tiling_info
,
3672 &plane_info
->plane_size
,
3673 &plane_info
->dcc
, address
,
3678 fill_blending_from_plane_state(
3679 plane_state
, &plane_info
->per_pixel_alpha
,
3680 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
3685 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
3686 struct dc_plane_state
*dc_plane_state
,
3687 struct drm_plane_state
*plane_state
,
3688 struct drm_crtc_state
*crtc_state
)
3690 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
3691 const struct amdgpu_framebuffer
*amdgpu_fb
=
3692 to_amdgpu_framebuffer(plane_state
->fb
);
3693 struct dc_scaling_info scaling_info
;
3694 struct dc_plane_info plane_info
;
3695 uint64_t tiling_flags
;
3697 bool force_disable_dcc
= false;
3699 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
3703 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
3704 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
3705 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
3706 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
3708 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
3712 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
3713 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
3715 &dc_plane_state
->address
,
3720 dc_plane_state
->format
= plane_info
.format
;
3721 dc_plane_state
->color_space
= plane_info
.color_space
;
3722 dc_plane_state
->format
= plane_info
.format
;
3723 dc_plane_state
->plane_size
= plane_info
.plane_size
;
3724 dc_plane_state
->rotation
= plane_info
.rotation
;
3725 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
3726 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
3727 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
3728 dc_plane_state
->visible
= plane_info
.visible
;
3729 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
3730 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
3731 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
3732 dc_plane_state
->dcc
= plane_info
.dcc
;
3733 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
3736 * Always set input transfer function, since plane state is refreshed
3739 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
3746 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
3747 const struct dm_connector_state
*dm_state
,
3748 struct dc_stream_state
*stream
)
3750 enum amdgpu_rmx_type rmx_type
;
3752 struct rect src
= { 0 }; /* viewport in composition space*/
3753 struct rect dst
= { 0 }; /* stream addressable area */
3755 /* no mode. nothing to be done */
3759 /* Full screen scaling by default */
3760 src
.width
= mode
->hdisplay
;
3761 src
.height
= mode
->vdisplay
;
3762 dst
.width
= stream
->timing
.h_addressable
;
3763 dst
.height
= stream
->timing
.v_addressable
;
3766 rmx_type
= dm_state
->scaling
;
3767 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
3768 if (src
.width
* dst
.height
<
3769 src
.height
* dst
.width
) {
3770 /* height needs less upscaling/more downscaling */
3771 dst
.width
= src
.width
*
3772 dst
.height
/ src
.height
;
3774 /* width needs less upscaling/more downscaling */
3775 dst
.height
= src
.height
*
3776 dst
.width
/ src
.width
;
3778 } else if (rmx_type
== RMX_CENTER
) {
3782 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
3783 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
3785 if (dm_state
->underscan_enable
) {
3786 dst
.x
+= dm_state
->underscan_hborder
/ 2;
3787 dst
.y
+= dm_state
->underscan_vborder
/ 2;
3788 dst
.width
-= dm_state
->underscan_hborder
;
3789 dst
.height
-= dm_state
->underscan_vborder
;
3796 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3797 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
3801 static enum dc_color_depth
3802 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
3803 const struct drm_connector_state
*state
,
3811 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3812 if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_48
)
3814 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_36
)
3816 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_30
)
3819 bpc
= (uint8_t)connector
->display_info
.bpc
;
3820 /* Assume 8 bpc by default if no bpc is specified. */
3821 bpc
= bpc
? bpc
: 8;
3825 state
= connector
->state
;
3829 * Cap display bpc based on the user requested value.
3831 * The value for state->max_bpc may not correctly updated
3832 * depending on when the connector gets added to the state
3833 * or if this was called outside of atomic check, so it
3834 * can't be used directly.
3836 bpc
= min(bpc
, state
->max_requested_bpc
);
3838 /* Round down to the nearest even number. */
3839 bpc
= bpc
- (bpc
& 1);
3845 * Temporary Work around, DRM doesn't parse color depth for
3846 * EDID revision before 1.4
3847 * TODO: Fix edid parsing
3849 return COLOR_DEPTH_888
;
3851 return COLOR_DEPTH_666
;
3853 return COLOR_DEPTH_888
;
3855 return COLOR_DEPTH_101010
;
3857 return COLOR_DEPTH_121212
;
3859 return COLOR_DEPTH_141414
;
3861 return COLOR_DEPTH_161616
;
3863 return COLOR_DEPTH_UNDEFINED
;
3867 static enum dc_aspect_ratio
3868 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
3870 /* 1-1 mapping, since both enums follow the HDMI spec. */
3871 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
3874 static enum dc_color_space
3875 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
3877 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
3879 switch (dc_crtc_timing
->pixel_encoding
) {
3880 case PIXEL_ENCODING_YCBCR422
:
3881 case PIXEL_ENCODING_YCBCR444
:
3882 case PIXEL_ENCODING_YCBCR420
:
3885 * 27030khz is the separation point between HDTV and SDTV
3886 * according to HDMI spec, we use YCbCr709 and YCbCr601
3889 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
3890 if (dc_crtc_timing
->flags
.Y_ONLY
)
3892 COLOR_SPACE_YCBCR709_LIMITED
;
3894 color_space
= COLOR_SPACE_YCBCR709
;
3896 if (dc_crtc_timing
->flags
.Y_ONLY
)
3898 COLOR_SPACE_YCBCR601_LIMITED
;
3900 color_space
= COLOR_SPACE_YCBCR601
;
3905 case PIXEL_ENCODING_RGB
:
3906 color_space
= COLOR_SPACE_SRGB
;
3917 static bool adjust_colour_depth_from_display_info(
3918 struct dc_crtc_timing
*timing_out
,
3919 const struct drm_display_info
*info
)
3921 enum dc_color_depth depth
= timing_out
->display_color_depth
;
3924 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
3925 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3926 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3927 normalized_clk
/= 2;
3928 /* Adjusting pix clock following on HDMI spec based on colour depth */
3930 case COLOR_DEPTH_888
:
3932 case COLOR_DEPTH_101010
:
3933 normalized_clk
= (normalized_clk
* 30) / 24;
3935 case COLOR_DEPTH_121212
:
3936 normalized_clk
= (normalized_clk
* 36) / 24;
3938 case COLOR_DEPTH_161616
:
3939 normalized_clk
= (normalized_clk
* 48) / 24;
3942 /* The above depths are the only ones valid for HDMI. */
3945 if (normalized_clk
<= info
->max_tmds_clock
) {
3946 timing_out
->display_color_depth
= depth
;
3949 } while (--depth
> COLOR_DEPTH_666
);
3953 static void fill_stream_properties_from_drm_display_mode(
3954 struct dc_stream_state
*stream
,
3955 const struct drm_display_mode
*mode_in
,
3956 const struct drm_connector
*connector
,
3957 const struct drm_connector_state
*connector_state
,
3958 const struct dc_stream_state
*old_stream
)
3960 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
3961 const struct drm_display_info
*info
= &connector
->display_info
;
3962 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3963 struct hdmi_vendor_infoframe hv_frame
;
3964 struct hdmi_avi_infoframe avi_frame
;
3966 memset(&hv_frame
, 0, sizeof(hv_frame
));
3967 memset(&avi_frame
, 0, sizeof(avi_frame
));
3969 timing_out
->h_border_left
= 0;
3970 timing_out
->h_border_right
= 0;
3971 timing_out
->v_border_top
= 0;
3972 timing_out
->v_border_bottom
= 0;
3973 /* TODO: un-hardcode */
3974 if (drm_mode_is_420_only(info
, mode_in
)
3975 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3976 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3977 else if (drm_mode_is_420_also(info
, mode_in
)
3978 && aconnector
->force_yuv420_output
)
3979 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
3980 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
3981 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
3982 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
3984 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
3986 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
3987 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
3988 connector
, connector_state
,
3989 (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
));
3990 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
3991 timing_out
->hdmi_vic
= 0;
3994 timing_out
->vic
= old_stream
->timing
.vic
;
3995 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
3996 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
3998 timing_out
->vic
= drm_match_cea_mode(mode_in
);
3999 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
4000 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
4001 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
4002 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
4005 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4006 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
4007 timing_out
->vic
= avi_frame
.video_code
;
4008 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
4009 timing_out
->hdmi_vic
= hv_frame
.vic
;
4012 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
4013 timing_out
->h_total
= mode_in
->crtc_htotal
;
4014 timing_out
->h_sync_width
=
4015 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
4016 timing_out
->h_front_porch
=
4017 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
4018 timing_out
->v_total
= mode_in
->crtc_vtotal
;
4019 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
4020 timing_out
->v_front_porch
=
4021 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
4022 timing_out
->v_sync_width
=
4023 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
4024 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
4025 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
4027 stream
->output_color_space
= get_output_color_space(timing_out
);
4029 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
4030 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
4031 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4032 if (!adjust_colour_depth_from_display_info(timing_out
, info
) &&
4033 drm_mode_is_420_also(info
, mode_in
) &&
4034 timing_out
->pixel_encoding
!= PIXEL_ENCODING_YCBCR420
) {
4035 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4036 adjust_colour_depth_from_display_info(timing_out
, info
);
4041 static void fill_audio_info(struct audio_info
*audio_info
,
4042 const struct drm_connector
*drm_connector
,
4043 const struct dc_sink
*dc_sink
)
4046 int cea_revision
= 0;
4047 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
4049 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
4050 audio_info
->product_id
= edid_caps
->product_id
;
4052 cea_revision
= drm_connector
->display_info
.cea_rev
;
4054 strscpy(audio_info
->display_name
,
4055 edid_caps
->display_name
,
4056 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
4058 if (cea_revision
>= 3) {
4059 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
4061 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
4062 audio_info
->modes
[i
].format_code
=
4063 (enum audio_format_code
)
4064 (edid_caps
->audio_modes
[i
].format_code
);
4065 audio_info
->modes
[i
].channel_count
=
4066 edid_caps
->audio_modes
[i
].channel_count
;
4067 audio_info
->modes
[i
].sample_rates
.all
=
4068 edid_caps
->audio_modes
[i
].sample_rate
;
4069 audio_info
->modes
[i
].sample_size
=
4070 edid_caps
->audio_modes
[i
].sample_size
;
4074 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
4076 /* TODO: We only check for the progressive mode, check for interlace mode too */
4077 if (drm_connector
->latency_present
[0]) {
4078 audio_info
->video_latency
= drm_connector
->video_latency
[0];
4079 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
4082 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4087 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
4088 struct drm_display_mode
*dst_mode
)
4090 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
4091 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
4092 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
4093 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
4094 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
4095 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
4096 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
4097 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
4098 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
4099 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
4100 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
4101 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
4102 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
4103 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
4107 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
4108 const struct drm_display_mode
*native_mode
,
4111 if (scale_enabled
) {
4112 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4113 } else if (native_mode
->clock
== drm_mode
->clock
&&
4114 native_mode
->htotal
== drm_mode
->htotal
&&
4115 native_mode
->vtotal
== drm_mode
->vtotal
) {
4116 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4118 /* no scaling nor amdgpu inserted, no need to patch */
4122 static struct dc_sink
*
4123 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
4125 struct dc_sink_init_data sink_init_data
= { 0 };
4126 struct dc_sink
*sink
= NULL
;
4127 sink_init_data
.link
= aconnector
->dc_link
;
4128 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
4130 sink
= dc_sink_create(&sink_init_data
);
4132 DRM_ERROR("Failed to create sink!\n");
4135 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
4140 static void set_multisync_trigger_params(
4141 struct dc_stream_state
*stream
)
4143 if (stream
->triggered_crtc_reset
.enabled
) {
4144 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
4145 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
4149 static void set_master_stream(struct dc_stream_state
*stream_set
[],
4152 int j
, highest_rfr
= 0, master_stream
= 0;
4154 for (j
= 0; j
< stream_count
; j
++) {
4155 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
4156 int refresh_rate
= 0;
4158 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
4159 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
4160 if (refresh_rate
> highest_rfr
) {
4161 highest_rfr
= refresh_rate
;
4166 for (j
= 0; j
< stream_count
; j
++) {
4168 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
4172 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
4176 if (context
->stream_count
< 2)
4178 for (i
= 0; i
< context
->stream_count
; i
++) {
4179 if (!context
->streams
[i
])
4182 * TODO: add a function to read AMD VSDB bits and set
4183 * crtc_sync_master.multi_sync_enabled flag
4184 * For now it's set to false
4186 set_multisync_trigger_params(context
->streams
[i
]);
4188 set_master_stream(context
->streams
, context
->stream_count
);
4191 static struct dc_stream_state
*
4192 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
4193 const struct drm_display_mode
*drm_mode
,
4194 const struct dm_connector_state
*dm_state
,
4195 const struct dc_stream_state
*old_stream
)
4197 struct drm_display_mode
*preferred_mode
= NULL
;
4198 struct drm_connector
*drm_connector
;
4199 const struct drm_connector_state
*con_state
=
4200 dm_state
? &dm_state
->base
: NULL
;
4201 struct dc_stream_state
*stream
= NULL
;
4202 struct drm_display_mode mode
= *drm_mode
;
4203 bool native_mode_found
= false;
4204 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
4206 int preferred_refresh
= 0;
4207 #if defined(CONFIG_DRM_AMD_DC_DCN)
4208 struct dsc_dec_dpcd_caps dsc_caps
;
4210 uint32_t link_bandwidth_kbps
;
4212 struct dc_sink
*sink
= NULL
;
4213 if (aconnector
== NULL
) {
4214 DRM_ERROR("aconnector is NULL!\n");
4218 drm_connector
= &aconnector
->base
;
4220 if (!aconnector
->dc_sink
) {
4221 sink
= create_fake_sink(aconnector
);
4225 sink
= aconnector
->dc_sink
;
4226 dc_sink_retain(sink
);
4229 stream
= dc_create_stream_for_sink(sink
);
4231 if (stream
== NULL
) {
4232 DRM_ERROR("Failed to create stream for sink!\n");
4236 stream
->dm_stream_context
= aconnector
;
4238 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
4239 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
4241 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
4242 /* Search for preferred mode */
4243 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
4244 native_mode_found
= true;
4248 if (!native_mode_found
)
4249 preferred_mode
= list_first_entry_or_null(
4250 &aconnector
->base
.modes
,
4251 struct drm_display_mode
,
4254 mode_refresh
= drm_mode_vrefresh(&mode
);
4256 if (preferred_mode
== NULL
) {
4258 * This may not be an error, the use case is when we have no
4259 * usermode calls to reset and set mode upon hotplug. In this
4260 * case, we call set mode ourselves to restore the previous mode
4261 * and the modelist may not be filled in in time.
4263 DRM_DEBUG_DRIVER("No preferred mode found\n");
4265 decide_crtc_timing_for_drm_display_mode(
4266 &mode
, preferred_mode
,
4267 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
4268 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
4272 drm_mode_set_crtcinfo(&mode
, 0);
4275 * If scaling is enabled and refresh rate didn't change
4276 * we copy the vic and polarities of the old timings
4278 if (!scale
|| mode_refresh
!= preferred_refresh
)
4279 fill_stream_properties_from_drm_display_mode(stream
,
4280 &mode
, &aconnector
->base
, con_state
, NULL
);
4282 fill_stream_properties_from_drm_display_mode(stream
,
4283 &mode
, &aconnector
->base
, con_state
, old_stream
);
4285 stream
->timing
.flags
.DSC
= 0;
4287 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4288 #if defined(CONFIG_DRM_AMD_DC_DCN)
4289 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
4290 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
4291 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_ext_caps
.raw
,
4294 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
4295 dc_link_get_link_cap(aconnector
->dc_link
));
4297 #if defined(CONFIG_DRM_AMD_DC_DCN)
4298 if (dsc_caps
.is_dsc_supported
)
4299 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
4301 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
4302 link_bandwidth_kbps
,
4304 &stream
->timing
.dsc_cfg
))
4305 stream
->timing
.flags
.DSC
= 1;
4309 update_stream_scaling_settings(&mode
, dm_state
, stream
);
4312 &stream
->audio_info
,
4316 update_stream_signal(stream
, sink
);
4318 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4319 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
4320 if (stream
->link
->psr_feature_enabled
) {
4321 struct dc
*core_dc
= stream
->link
->ctx
->dc
;
4323 if (dc_is_dmcu_initialized(core_dc
)) {
4324 struct dmcu
*dmcu
= core_dc
->res_pool
->dmcu
;
4326 stream
->psr_version
= dmcu
->dmcu_version
.psr_version
;
4329 // should decide stream support vsc sdp colorimetry capability
4330 // before building vsc info packet
4332 stream
->use_vsc_sdp_for_colorimetry
= false;
4333 if (aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
4334 stream
->use_vsc_sdp_for_colorimetry
=
4335 aconnector
->dc_sink
->is_vsc_sdp_colorimetry_supported
;
4337 if (stream
->link
->dpcd_caps
.dpcd_rev
.raw
>= 0x14 &&
4338 stream
->link
->dpcd_caps
.dprx_feature
.bits
.VSC_SDP_COLORIMETRY_SUPPORTED
) {
4339 stream
->use_vsc_sdp_for_colorimetry
= true;
4342 mod_build_vsc_infopacket(stream
, &stream
->vsc_infopacket
);
4346 dc_sink_release(sink
);
4351 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
4353 drm_crtc_cleanup(crtc
);
4357 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
4358 struct drm_crtc_state
*state
)
4360 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
4362 /* TODO Destroy dc_stream objects are stream object is flattened */
4364 dc_stream_release(cur
->stream
);
4367 __drm_atomic_helper_crtc_destroy_state(state
);
4373 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
4375 struct dm_crtc_state
*state
;
4378 dm_crtc_destroy_state(crtc
, crtc
->state
);
4380 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4381 if (WARN_ON(!state
))
4384 crtc
->state
= &state
->base
;
4385 crtc
->state
->crtc
= crtc
;
4389 static struct drm_crtc_state
*
4390 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
4392 struct dm_crtc_state
*state
, *cur
;
4394 cur
= to_dm_crtc_state(crtc
->state
);
4396 if (WARN_ON(!crtc
->state
))
4399 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4403 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
4406 state
->stream
= cur
->stream
;
4407 dc_stream_retain(state
->stream
);
4410 state
->active_planes
= cur
->active_planes
;
4411 state
->interrupts_enabled
= cur
->interrupts_enabled
;
4412 state
->vrr_params
= cur
->vrr_params
;
4413 state
->vrr_infopacket
= cur
->vrr_infopacket
;
4414 state
->abm_level
= cur
->abm_level
;
4415 state
->vrr_supported
= cur
->vrr_supported
;
4416 state
->freesync_config
= cur
->freesync_config
;
4417 state
->crc_src
= cur
->crc_src
;
4418 state
->cm_has_degamma
= cur
->cm_has_degamma
;
4419 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
4421 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4423 return &state
->base
;
4426 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
4428 enum dc_irq_source irq_source
;
4429 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4430 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4433 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
4435 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4437 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4438 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
4442 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
4444 enum dc_irq_source irq_source
;
4445 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4446 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4447 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4451 /* vblank irq on -> Only need vupdate irq in vrr mode */
4452 if (amdgpu_dm_vrr_active(acrtc_state
))
4453 rc
= dm_set_vupdate_irq(crtc
, true);
4455 /* vblank irq off -> vupdate irq off */
4456 rc
= dm_set_vupdate_irq(crtc
, false);
4462 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
4463 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4466 static int dm_enable_vblank(struct drm_crtc
*crtc
)
4468 return dm_set_vblank(crtc
, true);
4471 static void dm_disable_vblank(struct drm_crtc
*crtc
)
4473 dm_set_vblank(crtc
, false);
4476 /* Implemented only the options currently availible for the driver */
4477 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
4478 .reset
= dm_crtc_reset_state
,
4479 .destroy
= amdgpu_dm_crtc_destroy
,
4480 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
4481 .set_config
= drm_atomic_helper_set_config
,
4482 .page_flip
= drm_atomic_helper_page_flip
,
4483 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
4484 .atomic_destroy_state
= dm_crtc_destroy_state
,
4485 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
4486 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
4487 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
4488 .get_vblank_counter
= amdgpu_get_vblank_counter_kms
,
4489 .enable_vblank
= dm_enable_vblank
,
4490 .disable_vblank
= dm_disable_vblank
,
4491 .get_vblank_timestamp
= drm_crtc_vblank_helper_get_vblank_timestamp
,
4494 static enum drm_connector_status
4495 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
4498 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4502 * 1. This interface is NOT called in context of HPD irq.
4503 * 2. This interface *is called* in context of user-mode ioctl. Which
4504 * makes it a bad place for *any* MST-related activity.
4507 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
4508 !aconnector
->fake_enable
)
4509 connected
= (aconnector
->dc_sink
!= NULL
);
4511 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
4513 return (connected
? connector_status_connected
:
4514 connector_status_disconnected
);
4517 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
4518 struct drm_connector_state
*connector_state
,
4519 struct drm_property
*property
,
4522 struct drm_device
*dev
= connector
->dev
;
4523 struct amdgpu_device
*adev
= dev
->dev_private
;
4524 struct dm_connector_state
*dm_old_state
=
4525 to_dm_connector_state(connector
->state
);
4526 struct dm_connector_state
*dm_new_state
=
4527 to_dm_connector_state(connector_state
);
4531 if (property
== dev
->mode_config
.scaling_mode_property
) {
4532 enum amdgpu_rmx_type rmx_type
;
4535 case DRM_MODE_SCALE_CENTER
:
4536 rmx_type
= RMX_CENTER
;
4538 case DRM_MODE_SCALE_ASPECT
:
4539 rmx_type
= RMX_ASPECT
;
4541 case DRM_MODE_SCALE_FULLSCREEN
:
4542 rmx_type
= RMX_FULL
;
4544 case DRM_MODE_SCALE_NONE
:
4550 if (dm_old_state
->scaling
== rmx_type
)
4553 dm_new_state
->scaling
= rmx_type
;
4555 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4556 dm_new_state
->underscan_hborder
= val
;
4558 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4559 dm_new_state
->underscan_vborder
= val
;
4561 } else if (property
== adev
->mode_info
.underscan_property
) {
4562 dm_new_state
->underscan_enable
= val
;
4564 } else if (property
== adev
->mode_info
.abm_level_property
) {
4565 dm_new_state
->abm_level
= val
;
4572 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
4573 const struct drm_connector_state
*state
,
4574 struct drm_property
*property
,
4577 struct drm_device
*dev
= connector
->dev
;
4578 struct amdgpu_device
*adev
= dev
->dev_private
;
4579 struct dm_connector_state
*dm_state
=
4580 to_dm_connector_state(state
);
4583 if (property
== dev
->mode_config
.scaling_mode_property
) {
4584 switch (dm_state
->scaling
) {
4586 *val
= DRM_MODE_SCALE_CENTER
;
4589 *val
= DRM_MODE_SCALE_ASPECT
;
4592 *val
= DRM_MODE_SCALE_FULLSCREEN
;
4596 *val
= DRM_MODE_SCALE_NONE
;
4600 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4601 *val
= dm_state
->underscan_hborder
;
4603 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4604 *val
= dm_state
->underscan_vborder
;
4606 } else if (property
== adev
->mode_info
.underscan_property
) {
4607 *val
= dm_state
->underscan_enable
;
4609 } else if (property
== adev
->mode_info
.abm_level_property
) {
4610 *val
= dm_state
->abm_level
;
4617 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4619 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4621 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4624 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4626 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4627 const struct dc_link
*link
= aconnector
->dc_link
;
4628 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4629 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4631 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4632 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4634 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
4635 link
->type
!= dc_connection_none
&&
4636 dm
->backlight_dev
) {
4637 backlight_device_unregister(dm
->backlight_dev
);
4638 dm
->backlight_dev
= NULL
;
4642 if (aconnector
->dc_em_sink
)
4643 dc_sink_release(aconnector
->dc_em_sink
);
4644 aconnector
->dc_em_sink
= NULL
;
4645 if (aconnector
->dc_sink
)
4646 dc_sink_release(aconnector
->dc_sink
);
4647 aconnector
->dc_sink
= NULL
;
4649 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
4650 drm_connector_unregister(connector
);
4651 drm_connector_cleanup(connector
);
4652 if (aconnector
->i2c
) {
4653 i2c_del_adapter(&aconnector
->i2c
->base
);
4654 kfree(aconnector
->i2c
);
4656 kfree(aconnector
->dm_dp_aux
.aux
.name
);
4661 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
4663 struct dm_connector_state
*state
=
4664 to_dm_connector_state(connector
->state
);
4666 if (connector
->state
)
4667 __drm_atomic_helper_connector_destroy_state(connector
->state
);
4671 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4674 state
->scaling
= RMX_OFF
;
4675 state
->underscan_enable
= false;
4676 state
->underscan_hborder
= 0;
4677 state
->underscan_vborder
= 0;
4678 state
->base
.max_requested_bpc
= 8;
4679 state
->vcpi_slots
= 0;
4681 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4682 state
->abm_level
= amdgpu_dm_abm_level
;
4684 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
4688 struct drm_connector_state
*
4689 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
4691 struct dm_connector_state
*state
=
4692 to_dm_connector_state(connector
->state
);
4694 struct dm_connector_state
*new_state
=
4695 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
4700 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
4702 new_state
->freesync_capable
= state
->freesync_capable
;
4703 new_state
->abm_level
= state
->abm_level
;
4704 new_state
->scaling
= state
->scaling
;
4705 new_state
->underscan_enable
= state
->underscan_enable
;
4706 new_state
->underscan_hborder
= state
->underscan_hborder
;
4707 new_state
->underscan_vborder
= state
->underscan_vborder
;
4708 new_state
->vcpi_slots
= state
->vcpi_slots
;
4709 new_state
->pbn
= state
->pbn
;
4710 return &new_state
->base
;
4714 amdgpu_dm_connector_late_register(struct drm_connector
*connector
)
4716 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4717 to_amdgpu_dm_connector(connector
);
4720 if ((connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
) ||
4721 (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)) {
4722 amdgpu_dm_connector
->dm_dp_aux
.aux
.dev
= connector
->kdev
;
4723 r
= drm_dp_aux_register(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4728 #if defined(CONFIG_DEBUG_FS)
4729 connector_debugfs_init(amdgpu_dm_connector
);
4735 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
4736 .reset
= amdgpu_dm_connector_funcs_reset
,
4737 .detect
= amdgpu_dm_connector_detect
,
4738 .fill_modes
= drm_helper_probe_single_connector_modes
,
4739 .destroy
= amdgpu_dm_connector_destroy
,
4740 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
4741 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4742 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
4743 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
4744 .late_register
= amdgpu_dm_connector_late_register
,
4745 .early_unregister
= amdgpu_dm_connector_unregister
4748 static int get_modes(struct drm_connector
*connector
)
4750 return amdgpu_dm_connector_get_modes(connector
);
4753 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
4755 struct dc_sink_init_data init_params
= {
4756 .link
= aconnector
->dc_link
,
4757 .sink_signal
= SIGNAL_TYPE_VIRTUAL
4761 if (!aconnector
->base
.edid_blob_ptr
) {
4762 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4763 aconnector
->base
.name
);
4765 aconnector
->base
.force
= DRM_FORCE_OFF
;
4766 aconnector
->base
.override_edid
= false;
4770 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
4772 aconnector
->edid
= edid
;
4774 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
4775 aconnector
->dc_link
,
4777 (edid
->extensions
+ 1) * EDID_LENGTH
,
4780 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
4781 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
4782 aconnector
->dc_link
->local_sink
:
4783 aconnector
->dc_em_sink
;
4784 dc_sink_retain(aconnector
->dc_sink
);
4788 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
4790 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
4793 * In case of headless boot with force on for DP managed connector
4794 * Those settings have to be != 0 to get initial modeset
4796 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4797 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
4798 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
4802 aconnector
->base
.override_edid
= true;
4803 create_eml_sink(aconnector
);
4806 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
4807 struct drm_display_mode
*mode
)
4809 int result
= MODE_ERROR
;
4810 struct dc_sink
*dc_sink
;
4811 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4812 /* TODO: Unhardcode stream count */
4813 struct dc_stream_state
*stream
;
4814 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4815 enum dc_status dc_result
= DC_OK
;
4817 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
4818 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
4822 * Only run this the first time mode_valid is called to initilialize
4825 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
4826 !aconnector
->dc_em_sink
)
4827 handle_edid_mgmt(aconnector
);
4829 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
4831 if (dc_sink
== NULL
) {
4832 DRM_ERROR("dc_sink is NULL!\n");
4836 stream
= create_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
4837 if (stream
== NULL
) {
4838 DRM_ERROR("Failed to create stream for sink!\n");
4842 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
4844 if (dc_result
== DC_OK
)
4847 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4853 dc_stream_release(stream
);
4856 /* TODO: error handling*/
4860 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
4861 struct dc_info_packet
*out
)
4863 struct hdmi_drm_infoframe frame
;
4864 unsigned char buf
[30]; /* 26 + 4 */
4868 memset(out
, 0, sizeof(*out
));
4870 if (!state
->hdr_output_metadata
)
4873 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
4877 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
4881 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4885 /* Prepare the infopacket for DC. */
4886 switch (state
->connector
->connector_type
) {
4887 case DRM_MODE_CONNECTOR_HDMIA
:
4888 out
->hb0
= 0x87; /* type */
4889 out
->hb1
= 0x01; /* version */
4890 out
->hb2
= 0x1A; /* length */
4891 out
->sb
[0] = buf
[3]; /* checksum */
4895 case DRM_MODE_CONNECTOR_DisplayPort
:
4896 case DRM_MODE_CONNECTOR_eDP
:
4897 out
->hb0
= 0x00; /* sdp id, zero */
4898 out
->hb1
= 0x87; /* type */
4899 out
->hb2
= 0x1D; /* payload len - 1 */
4900 out
->hb3
= (0x13 << 2); /* sdp version */
4901 out
->sb
[0] = 0x01; /* version */
4902 out
->sb
[1] = 0x1A; /* length */
4910 memcpy(&out
->sb
[i
], &buf
[4], 26);
4913 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
4914 sizeof(out
->sb
), false);
4920 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
4921 const struct drm_connector_state
*new_state
)
4923 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
4924 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
4926 if (old_blob
!= new_blob
) {
4927 if (old_blob
&& new_blob
&&
4928 old_blob
->length
== new_blob
->length
)
4929 return memcmp(old_blob
->data
, new_blob
->data
,
4939 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
4940 struct drm_atomic_state
*state
)
4942 struct drm_connector_state
*new_con_state
=
4943 drm_atomic_get_new_connector_state(state
, conn
);
4944 struct drm_connector_state
*old_con_state
=
4945 drm_atomic_get_old_connector_state(state
, conn
);
4946 struct drm_crtc
*crtc
= new_con_state
->crtc
;
4947 struct drm_crtc_state
*new_crtc_state
;
4953 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
4954 struct dc_info_packet hdr_infopacket
;
4956 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
4960 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
4961 if (IS_ERR(new_crtc_state
))
4962 return PTR_ERR(new_crtc_state
);
4965 * DC considers the stream backends changed if the
4966 * static metadata changes. Forcing the modeset also
4967 * gives a simple way for userspace to switch from
4968 * 8bpc to 10bpc when setting the metadata to enter
4971 * Changing the static metadata after it's been
4972 * set is permissible, however. So only force a
4973 * modeset if we're entering or exiting HDR.
4975 new_crtc_state
->mode_changed
=
4976 !old_con_state
->hdr_output_metadata
||
4977 !new_con_state
->hdr_output_metadata
;
4983 static const struct drm_connector_helper_funcs
4984 amdgpu_dm_connector_helper_funcs
= {
4986 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4987 * modes will be filtered by drm_mode_validate_size(), and those modes
4988 * are missing after user start lightdm. So we need to renew modes list.
4989 * in get_modes call back, not just return the modes count
4991 .get_modes
= get_modes
,
4992 .mode_valid
= amdgpu_dm_connector_mode_valid
,
4993 .atomic_check
= amdgpu_dm_connector_atomic_check
,
4996 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
5000 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
5002 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
5003 struct drm_plane
*plane
;
5005 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
5006 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5013 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
5015 struct drm_atomic_state
*state
= new_crtc_state
->state
;
5016 struct drm_plane
*plane
;
5019 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
5020 struct drm_plane_state
*new_plane_state
;
5022 /* Cursor planes are "fake". */
5023 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5026 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
5028 if (!new_plane_state
) {
5030 * The plane is enable on the CRTC and hasn't changed
5031 * state. This means that it previously passed
5032 * validation and is therefore enabled.
5038 /* We need a framebuffer to be considered enabled. */
5039 num_active
+= (new_plane_state
->fb
!= NULL
);
5046 * Sets whether interrupts should be enabled on a specific CRTC.
5047 * We require that the stream be enabled and that there exist active
5048 * DC planes on the stream.
5051 dm_update_crtc_interrupt_state(struct drm_crtc
*crtc
,
5052 struct drm_crtc_state
*new_crtc_state
)
5054 struct dm_crtc_state
*dm_new_crtc_state
=
5055 to_dm_crtc_state(new_crtc_state
);
5057 dm_new_crtc_state
->active_planes
= 0;
5058 dm_new_crtc_state
->interrupts_enabled
= false;
5060 if (!dm_new_crtc_state
->stream
)
5063 dm_new_crtc_state
->active_planes
=
5064 count_crtc_active_planes(new_crtc_state
);
5066 dm_new_crtc_state
->interrupts_enabled
=
5067 dm_new_crtc_state
->active_planes
> 0;
5070 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
5071 struct drm_crtc_state
*state
)
5073 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
5074 struct dc
*dc
= adev
->dm
.dc
;
5075 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
5079 * Update interrupt state for the CRTC. This needs to happen whenever
5080 * the CRTC has changed or whenever any of its planes have changed.
5081 * Atomic check satisfies both of these requirements since the CRTC
5082 * is added to the state by DRM during drm_atomic_helper_check_planes.
5084 dm_update_crtc_interrupt_state(crtc
, state
);
5086 if (unlikely(!dm_crtc_state
->stream
&&
5087 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
5092 /* In some use cases, like reset, no stream is attached */
5093 if (!dm_crtc_state
->stream
)
5097 * We want at least one hardware plane enabled to use
5098 * the stream with a cursor enabled.
5100 if (state
->enable
&& state
->active
&&
5101 does_crtc_have_active_cursor(state
) &&
5102 dm_crtc_state
->active_planes
== 0)
5105 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
5111 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
5112 const struct drm_display_mode
*mode
,
5113 struct drm_display_mode
*adjusted_mode
)
5118 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
5119 .disable
= dm_crtc_helper_disable
,
5120 .atomic_check
= dm_crtc_helper_atomic_check
,
5121 .mode_fixup
= dm_crtc_helper_mode_fixup
,
5122 .get_scanout_position
= amdgpu_crtc_get_scanout_position
,
5125 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
5130 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth
)
5132 switch (display_color_depth
) {
5133 case COLOR_DEPTH_666
:
5135 case COLOR_DEPTH_888
:
5137 case COLOR_DEPTH_101010
:
5139 case COLOR_DEPTH_121212
:
5141 case COLOR_DEPTH_141414
:
5143 case COLOR_DEPTH_161616
:
5151 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
5152 struct drm_crtc_state
*crtc_state
,
5153 struct drm_connector_state
*conn_state
)
5155 struct drm_atomic_state
*state
= crtc_state
->state
;
5156 struct drm_connector
*connector
= conn_state
->connector
;
5157 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5158 struct dm_connector_state
*dm_new_connector_state
= to_dm_connector_state(conn_state
);
5159 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
5160 struct drm_dp_mst_topology_mgr
*mst_mgr
;
5161 struct drm_dp_mst_port
*mst_port
;
5162 enum dc_color_depth color_depth
;
5164 bool is_y420
= false;
5166 if (!aconnector
->port
|| !aconnector
->dc_sink
)
5169 mst_port
= aconnector
->port
;
5170 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
5172 if (!crtc_state
->connectors_changed
&& !crtc_state
->mode_changed
)
5175 if (!state
->duplicated
) {
5176 is_y420
= drm_mode_is_420_also(&connector
->display_info
, adjusted_mode
) &&
5177 aconnector
->force_yuv420_output
;
5178 color_depth
= convert_color_depth_from_display_info(connector
, conn_state
,
5180 bpp
= convert_dc_color_depth_into_bpc(color_depth
) * 3;
5181 clock
= adjusted_mode
->clock
;
5182 dm_new_connector_state
->pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, false);
5184 dm_new_connector_state
->vcpi_slots
= drm_dp_atomic_find_vcpi_slots(state
,
5187 dm_new_connector_state
->pbn
,
5189 if (dm_new_connector_state
->vcpi_slots
< 0) {
5190 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state
->vcpi_slots
);
5191 return dm_new_connector_state
->vcpi_slots
;
5196 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
5197 .disable
= dm_encoder_helper_disable
,
5198 .atomic_check
= dm_encoder_helper_atomic_check
5201 #if defined(CONFIG_DRM_AMD_DC_DCN)
5202 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
*state
,
5203 struct dc_state
*dc_state
)
5205 struct dc_stream_state
*stream
= NULL
;
5206 struct drm_connector
*connector
;
5207 struct drm_connector_state
*new_con_state
, *old_con_state
;
5208 struct amdgpu_dm_connector
*aconnector
;
5209 struct dm_connector_state
*dm_conn_state
;
5210 int i
, j
, clock
, bpp
;
5211 int vcpi
, pbn_div
, pbn
= 0;
5213 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5215 aconnector
= to_amdgpu_dm_connector(connector
);
5217 if (!aconnector
->port
)
5220 if (!new_con_state
|| !new_con_state
->crtc
)
5223 dm_conn_state
= to_dm_connector_state(new_con_state
);
5225 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
5226 stream
= dc_state
->streams
[j
];
5230 if ((struct amdgpu_dm_connector
*)stream
->dm_stream_context
== aconnector
)
5239 if (stream
->timing
.flags
.DSC
!= 1) {
5240 drm_dp_mst_atomic_enable_dsc(state
,
5248 pbn_div
= dm_mst_get_pbn_divider(stream
->link
);
5249 bpp
= stream
->timing
.dsc_cfg
.bits_per_pixel
;
5250 clock
= stream
->timing
.pix_clk_100hz
/ 10;
5251 pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, true);
5252 vcpi
= drm_dp_mst_atomic_enable_dsc(state
,
5259 dm_conn_state
->pbn
= pbn
;
5260 dm_conn_state
->vcpi_slots
= vcpi
;
5266 static void dm_drm_plane_reset(struct drm_plane
*plane
)
5268 struct dm_plane_state
*amdgpu_state
= NULL
;
5271 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
5273 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
5274 WARN_ON(amdgpu_state
== NULL
);
5277 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
5280 static struct drm_plane_state
*
5281 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
5283 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
5285 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
5286 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
5287 if (!dm_plane_state
)
5290 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
5292 if (old_dm_plane_state
->dc_state
) {
5293 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
5294 dc_plane_state_retain(dm_plane_state
->dc_state
);
5297 return &dm_plane_state
->base
;
5300 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
5301 struct drm_plane_state
*state
)
5303 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
5305 if (dm_plane_state
->dc_state
)
5306 dc_plane_state_release(dm_plane_state
->dc_state
);
5308 drm_atomic_helper_plane_destroy_state(plane
, state
);
5311 static const struct drm_plane_funcs dm_plane_funcs
= {
5312 .update_plane
= drm_atomic_helper_update_plane
,
5313 .disable_plane
= drm_atomic_helper_disable_plane
,
5314 .destroy
= drm_primary_helper_destroy
,
5315 .reset
= dm_drm_plane_reset
,
5316 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
5317 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
5320 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
5321 struct drm_plane_state
*new_state
)
5323 struct amdgpu_framebuffer
*afb
;
5324 struct drm_gem_object
*obj
;
5325 struct amdgpu_device
*adev
;
5326 struct amdgpu_bo
*rbo
;
5327 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
5328 struct list_head list
;
5329 struct ttm_validate_buffer tv
;
5330 struct ww_acquire_ctx ticket
;
5331 uint64_t tiling_flags
;
5334 bool force_disable_dcc
= false;
5336 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
5337 dm_plane_state_new
= to_dm_plane_state(new_state
);
5339 if (!new_state
->fb
) {
5340 DRM_DEBUG_DRIVER("No FB bound\n");
5344 afb
= to_amdgpu_framebuffer(new_state
->fb
);
5345 obj
= new_state
->fb
->obj
[0];
5346 rbo
= gem_to_amdgpu_bo(obj
);
5347 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
5348 INIT_LIST_HEAD(&list
);
5352 list_add(&tv
.head
, &list
);
5354 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
5356 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
5360 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5361 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
5363 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
5365 r
= amdgpu_bo_pin(rbo
, domain
);
5366 if (unlikely(r
!= 0)) {
5367 if (r
!= -ERESTARTSYS
)
5368 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
5369 ttm_eu_backoff_reservation(&ticket
, &list
);
5373 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
5374 if (unlikely(r
!= 0)) {
5375 amdgpu_bo_unpin(rbo
);
5376 ttm_eu_backoff_reservation(&ticket
, &list
);
5377 DRM_ERROR("%p bind failed\n", rbo
);
5381 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
5383 ttm_eu_backoff_reservation(&ticket
, &list
);
5385 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
5389 if (dm_plane_state_new
->dc_state
&&
5390 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
5391 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
5393 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
5394 fill_plane_buffer_attributes(
5395 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
5396 tiling_flags
, &plane_state
->tiling_info
,
5397 &plane_state
->plane_size
, &plane_state
->dcc
,
5398 &plane_state
->address
,
5405 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
5406 struct drm_plane_state
*old_state
)
5408 struct amdgpu_bo
*rbo
;
5414 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
5415 r
= amdgpu_bo_reserve(rbo
, false);
5417 DRM_ERROR("failed to reserve rbo before unpin\n");
5421 amdgpu_bo_unpin(rbo
);
5422 amdgpu_bo_unreserve(rbo
);
5423 amdgpu_bo_unref(&rbo
);
5426 static int dm_plane_atomic_check(struct drm_plane
*plane
,
5427 struct drm_plane_state
*state
)
5429 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5430 struct dc
*dc
= adev
->dm
.dc
;
5431 struct dm_plane_state
*dm_plane_state
;
5432 struct dc_scaling_info scaling_info
;
5435 dm_plane_state
= to_dm_plane_state(state
);
5437 if (!dm_plane_state
->dc_state
)
5440 ret
= fill_dc_scaling_info(state
, &scaling_info
);
5444 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
5450 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
5451 struct drm_plane_state
*new_plane_state
)
5453 /* Only support async updates on cursor planes. */
5454 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5460 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
5461 struct drm_plane_state
*new_state
)
5463 struct drm_plane_state
*old_state
=
5464 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
5466 swap(plane
->state
->fb
, new_state
->fb
);
5468 plane
->state
->src_x
= new_state
->src_x
;
5469 plane
->state
->src_y
= new_state
->src_y
;
5470 plane
->state
->src_w
= new_state
->src_w
;
5471 plane
->state
->src_h
= new_state
->src_h
;
5472 plane
->state
->crtc_x
= new_state
->crtc_x
;
5473 plane
->state
->crtc_y
= new_state
->crtc_y
;
5474 plane
->state
->crtc_w
= new_state
->crtc_w
;
5475 plane
->state
->crtc_h
= new_state
->crtc_h
;
5477 handle_cursor_update(plane
, old_state
);
5480 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
5481 .prepare_fb
= dm_plane_helper_prepare_fb
,
5482 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
5483 .atomic_check
= dm_plane_atomic_check
,
5484 .atomic_async_check
= dm_plane_atomic_async_check
,
5485 .atomic_async_update
= dm_plane_atomic_async_update
5489 * TODO: these are currently initialized to rgb formats only.
5490 * For future use cases we should either initialize them dynamically based on
5491 * plane capabilities, or initialize this array to all formats, so internal drm
5492 * check will succeed, and let DC implement proper check
5494 static const uint32_t rgb_formats
[] = {
5495 DRM_FORMAT_XRGB8888
,
5496 DRM_FORMAT_ARGB8888
,
5497 DRM_FORMAT_RGBA8888
,
5498 DRM_FORMAT_XRGB2101010
,
5499 DRM_FORMAT_XBGR2101010
,
5500 DRM_FORMAT_ARGB2101010
,
5501 DRM_FORMAT_ABGR2101010
,
5502 DRM_FORMAT_XBGR8888
,
5503 DRM_FORMAT_ABGR8888
,
5507 static const uint32_t overlay_formats
[] = {
5508 DRM_FORMAT_XRGB8888
,
5509 DRM_FORMAT_ARGB8888
,
5510 DRM_FORMAT_RGBA8888
,
5511 DRM_FORMAT_XBGR8888
,
5512 DRM_FORMAT_ABGR8888
,
5516 static const u32 cursor_formats
[] = {
5520 static int get_plane_formats(const struct drm_plane
*plane
,
5521 const struct dc_plane_cap
*plane_cap
,
5522 uint32_t *formats
, int max_formats
)
5524 int i
, num_formats
= 0;
5527 * TODO: Query support for each group of formats directly from
5528 * DC plane caps. This will require adding more formats to the
5532 switch (plane
->type
) {
5533 case DRM_PLANE_TYPE_PRIMARY
:
5534 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
5535 if (num_formats
>= max_formats
)
5538 formats
[num_formats
++] = rgb_formats
[i
];
5541 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
5542 formats
[num_formats
++] = DRM_FORMAT_NV12
;
5543 if (plane_cap
&& plane_cap
->pixel_format_support
.p010
)
5544 formats
[num_formats
++] = DRM_FORMAT_P010
;
5547 case DRM_PLANE_TYPE_OVERLAY
:
5548 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
5549 if (num_formats
>= max_formats
)
5552 formats
[num_formats
++] = overlay_formats
[i
];
5556 case DRM_PLANE_TYPE_CURSOR
:
5557 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
5558 if (num_formats
>= max_formats
)
5561 formats
[num_formats
++] = cursor_formats
[i
];
5569 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
5570 struct drm_plane
*plane
,
5571 unsigned long possible_crtcs
,
5572 const struct dc_plane_cap
*plane_cap
)
5574 uint32_t formats
[32];
5578 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
5579 ARRAY_SIZE(formats
));
5581 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
5582 &dm_plane_funcs
, formats
, num_formats
,
5583 NULL
, plane
->type
, NULL
);
5587 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
5588 plane_cap
&& plane_cap
->per_pixel_alpha
) {
5589 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
5590 BIT(DRM_MODE_BLEND_PREMULTI
);
5592 drm_plane_create_alpha_property(plane
);
5593 drm_plane_create_blend_mode_property(plane
, blend_caps
);
5596 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
5598 (plane_cap
->pixel_format_support
.nv12
||
5599 plane_cap
->pixel_format_support
.p010
)) {
5600 /* This only affects YUV formats. */
5601 drm_plane_create_color_properties(
5603 BIT(DRM_COLOR_YCBCR_BT601
) |
5604 BIT(DRM_COLOR_YCBCR_BT709
) |
5605 BIT(DRM_COLOR_YCBCR_BT2020
),
5606 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
5607 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
5608 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
5611 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
5613 /* Create (reset) the plane state */
5614 if (plane
->funcs
->reset
)
5615 plane
->funcs
->reset(plane
);
5620 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
5621 struct drm_plane
*plane
,
5622 uint32_t crtc_index
)
5624 struct amdgpu_crtc
*acrtc
= NULL
;
5625 struct drm_plane
*cursor_plane
;
5629 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
5633 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
5634 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
5636 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
5640 res
= drm_crtc_init_with_planes(
5645 &amdgpu_dm_crtc_funcs
, NULL
);
5650 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
5652 /* Create (reset) the plane state */
5653 if (acrtc
->base
.funcs
->reset
)
5654 acrtc
->base
.funcs
->reset(&acrtc
->base
);
5656 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5657 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5659 acrtc
->crtc_id
= crtc_index
;
5660 acrtc
->base
.enabled
= false;
5661 acrtc
->otg_inst
= -1;
5663 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
5664 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
5665 true, MAX_COLOR_LUT_ENTRIES
);
5666 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
5672 kfree(cursor_plane
);
5677 static int to_drm_connector_type(enum signal_type st
)
5680 case SIGNAL_TYPE_HDMI_TYPE_A
:
5681 return DRM_MODE_CONNECTOR_HDMIA
;
5682 case SIGNAL_TYPE_EDP
:
5683 return DRM_MODE_CONNECTOR_eDP
;
5684 case SIGNAL_TYPE_LVDS
:
5685 return DRM_MODE_CONNECTOR_LVDS
;
5686 case SIGNAL_TYPE_RGB
:
5687 return DRM_MODE_CONNECTOR_VGA
;
5688 case SIGNAL_TYPE_DISPLAY_PORT
:
5689 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
5690 return DRM_MODE_CONNECTOR_DisplayPort
;
5691 case SIGNAL_TYPE_DVI_DUAL_LINK
:
5692 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
5693 return DRM_MODE_CONNECTOR_DVID
;
5694 case SIGNAL_TYPE_VIRTUAL
:
5695 return DRM_MODE_CONNECTOR_VIRTUAL
;
5698 return DRM_MODE_CONNECTOR_Unknown
;
5702 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
5704 struct drm_encoder
*encoder
;
5706 /* There is only one encoder per connector */
5707 drm_connector_for_each_possible_encoder(connector
, encoder
)
5713 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
5715 struct drm_encoder
*encoder
;
5716 struct amdgpu_encoder
*amdgpu_encoder
;
5718 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5720 if (encoder
== NULL
)
5723 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5725 amdgpu_encoder
->native_mode
.clock
= 0;
5727 if (!list_empty(&connector
->probed_modes
)) {
5728 struct drm_display_mode
*preferred_mode
= NULL
;
5730 list_for_each_entry(preferred_mode
,
5731 &connector
->probed_modes
,
5733 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
5734 amdgpu_encoder
->native_mode
= *preferred_mode
;
5742 static struct drm_display_mode
*
5743 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
5745 int hdisplay
, int vdisplay
)
5747 struct drm_device
*dev
= encoder
->dev
;
5748 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5749 struct drm_display_mode
*mode
= NULL
;
5750 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5752 mode
= drm_mode_duplicate(dev
, native_mode
);
5757 mode
->hdisplay
= hdisplay
;
5758 mode
->vdisplay
= vdisplay
;
5759 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
5760 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
5766 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
5767 struct drm_connector
*connector
)
5769 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5770 struct drm_display_mode
*mode
= NULL
;
5771 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5772 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5773 to_amdgpu_dm_connector(connector
);
5777 char name
[DRM_DISPLAY_MODE_LEN
];
5780 } common_modes
[] = {
5781 { "640x480", 640, 480},
5782 { "800x600", 800, 600},
5783 { "1024x768", 1024, 768},
5784 { "1280x720", 1280, 720},
5785 { "1280x800", 1280, 800},
5786 {"1280x1024", 1280, 1024},
5787 { "1440x900", 1440, 900},
5788 {"1680x1050", 1680, 1050},
5789 {"1600x1200", 1600, 1200},
5790 {"1920x1080", 1920, 1080},
5791 {"1920x1200", 1920, 1200}
5794 n
= ARRAY_SIZE(common_modes
);
5796 for (i
= 0; i
< n
; i
++) {
5797 struct drm_display_mode
*curmode
= NULL
;
5798 bool mode_existed
= false;
5800 if (common_modes
[i
].w
> native_mode
->hdisplay
||
5801 common_modes
[i
].h
> native_mode
->vdisplay
||
5802 (common_modes
[i
].w
== native_mode
->hdisplay
&&
5803 common_modes
[i
].h
== native_mode
->vdisplay
))
5806 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
5807 if (common_modes
[i
].w
== curmode
->hdisplay
&&
5808 common_modes
[i
].h
== curmode
->vdisplay
) {
5809 mode_existed
= true;
5817 mode
= amdgpu_dm_create_common_mode(encoder
,
5818 common_modes
[i
].name
, common_modes
[i
].w
,
5820 drm_mode_probed_add(connector
, mode
);
5821 amdgpu_dm_connector
->num_modes
++;
5825 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
5828 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5829 to_amdgpu_dm_connector(connector
);
5832 /* empty probed_modes */
5833 INIT_LIST_HEAD(&connector
->probed_modes
);
5834 amdgpu_dm_connector
->num_modes
=
5835 drm_add_edid_modes(connector
, edid
);
5837 /* sorting the probed modes before calling function
5838 * amdgpu_dm_get_native_mode() since EDID can have
5839 * more than one preferred mode. The modes that are
5840 * later in the probed mode list could be of higher
5841 * and preferred resolution. For example, 3840x2160
5842 * resolution in base EDID preferred timing and 4096x2160
5843 * preferred resolution in DID extension block later.
5845 drm_mode_sort(&connector
->probed_modes
);
5846 amdgpu_dm_get_native_mode(connector
);
5848 amdgpu_dm_connector
->num_modes
= 0;
5852 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
5854 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5855 to_amdgpu_dm_connector(connector
);
5856 struct drm_encoder
*encoder
;
5857 struct edid
*edid
= amdgpu_dm_connector
->edid
;
5859 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5861 if (!edid
|| !drm_edid_is_valid(edid
)) {
5862 amdgpu_dm_connector
->num_modes
=
5863 drm_add_modes_noedid(connector
, 640, 480);
5865 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
5866 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
5868 amdgpu_dm_fbc_init(connector
);
5870 return amdgpu_dm_connector
->num_modes
;
5873 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
5874 struct amdgpu_dm_connector
*aconnector
,
5876 struct dc_link
*link
,
5879 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
5882 * Some of the properties below require access to state, like bpc.
5883 * Allocate some default initial connector state with our reset helper.
5885 if (aconnector
->base
.funcs
->reset
)
5886 aconnector
->base
.funcs
->reset(&aconnector
->base
);
5888 aconnector
->connector_id
= link_index
;
5889 aconnector
->dc_link
= link
;
5890 aconnector
->base
.interlace_allowed
= false;
5891 aconnector
->base
.doublescan_allowed
= false;
5892 aconnector
->base
.stereo_allowed
= false;
5893 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
5894 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
5895 aconnector
->audio_inst
= -1;
5896 mutex_init(&aconnector
->hpd_lock
);
5899 * configure support HPD hot plug connector_>polled default value is 0
5900 * which means HPD hot plug not supported
5902 switch (connector_type
) {
5903 case DRM_MODE_CONNECTOR_HDMIA
:
5904 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5905 aconnector
->base
.ycbcr_420_allowed
=
5906 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
5908 case DRM_MODE_CONNECTOR_DisplayPort
:
5909 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5910 aconnector
->base
.ycbcr_420_allowed
=
5911 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
5913 case DRM_MODE_CONNECTOR_DVID
:
5914 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5920 drm_object_attach_property(&aconnector
->base
.base
,
5921 dm
->ddev
->mode_config
.scaling_mode_property
,
5922 DRM_MODE_SCALE_NONE
);
5924 drm_object_attach_property(&aconnector
->base
.base
,
5925 adev
->mode_info
.underscan_property
,
5927 drm_object_attach_property(&aconnector
->base
.base
,
5928 adev
->mode_info
.underscan_hborder_property
,
5930 drm_object_attach_property(&aconnector
->base
.base
,
5931 adev
->mode_info
.underscan_vborder_property
,
5934 if (!aconnector
->mst_port
)
5935 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
5937 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5938 aconnector
->base
.state
->max_bpc
= (connector_type
== DRM_MODE_CONNECTOR_eDP
) ? 16 : 8;
5939 aconnector
->base
.state
->max_requested_bpc
= aconnector
->base
.state
->max_bpc
;
5941 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
5942 dc_is_dmcu_initialized(adev
->dm
.dc
)) {
5943 drm_object_attach_property(&aconnector
->base
.base
,
5944 adev
->mode_info
.abm_level_property
, 0);
5947 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
5948 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5949 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
5950 drm_object_attach_property(
5951 &aconnector
->base
.base
,
5952 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
5954 if (!aconnector
->mst_port
)
5955 drm_connector_attach_vrr_capable_property(&aconnector
->base
);
5957 #ifdef CONFIG_DRM_AMD_DC_HDCP
5958 if (adev
->dm
.hdcp_workqueue
)
5959 drm_connector_attach_content_protection_property(&aconnector
->base
, true);
5964 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
5965 struct i2c_msg
*msgs
, int num
)
5967 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
5968 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
5969 struct i2c_command cmd
;
5973 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
5978 cmd
.number_of_payloads
= num
;
5979 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
5982 for (i
= 0; i
< num
; i
++) {
5983 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
5984 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
5985 cmd
.payloads
[i
].length
= msgs
[i
].len
;
5986 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
5990 ddc_service
->ctx
->dc
,
5991 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
5995 kfree(cmd
.payloads
);
5999 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
6001 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
6004 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
6005 .master_xfer
= amdgpu_dm_i2c_xfer
,
6006 .functionality
= amdgpu_dm_i2c_func
,
6009 static struct amdgpu_i2c_adapter
*
6010 create_i2c(struct ddc_service
*ddc_service
,
6014 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
6015 struct amdgpu_i2c_adapter
*i2c
;
6017 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
6020 i2c
->base
.owner
= THIS_MODULE
;
6021 i2c
->base
.class = I2C_CLASS_DDC
;
6022 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
6023 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
6024 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
6025 i2c_set_adapdata(&i2c
->base
, i2c
);
6026 i2c
->ddc_service
= ddc_service
;
6027 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
6034 * Note: this function assumes that dc_link_detect() was called for the
6035 * dc_link which will be represented by this aconnector.
6037 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
6038 struct amdgpu_dm_connector
*aconnector
,
6039 uint32_t link_index
,
6040 struct amdgpu_encoder
*aencoder
)
6044 struct dc
*dc
= dm
->dc
;
6045 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
6046 struct amdgpu_i2c_adapter
*i2c
;
6048 link
->priv
= aconnector
;
6050 DRM_DEBUG_DRIVER("%s()\n", __func__
);
6052 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
6054 DRM_ERROR("Failed to create i2c adapter data\n");
6058 aconnector
->i2c
= i2c
;
6059 res
= i2c_add_adapter(&i2c
->base
);
6062 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
6066 connector_type
= to_drm_connector_type(link
->connector_signal
);
6068 res
= drm_connector_init_with_ddc(
6071 &amdgpu_dm_connector_funcs
,
6076 DRM_ERROR("connector_init failed\n");
6077 aconnector
->connector_id
= -1;
6081 drm_connector_helper_add(
6083 &amdgpu_dm_connector_helper_funcs
);
6085 amdgpu_dm_connector_init_helper(
6092 drm_connector_attach_encoder(
6093 &aconnector
->base
, &aencoder
->base
);
6095 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
6096 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
6097 amdgpu_dm_initialize_dp_connector(dm
, aconnector
, link
->link_index
);
6102 aconnector
->i2c
= NULL
;
6107 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
6109 switch (adev
->mode_info
.num_crtc
) {
6126 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
6127 struct amdgpu_encoder
*aencoder
,
6128 uint32_t link_index
)
6130 struct amdgpu_device
*adev
= dev
->dev_private
;
6132 int res
= drm_encoder_init(dev
,
6134 &amdgpu_dm_encoder_funcs
,
6135 DRM_MODE_ENCODER_TMDS
,
6138 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
6141 aencoder
->encoder_id
= link_index
;
6143 aencoder
->encoder_id
= -1;
6145 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
6150 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
6151 struct amdgpu_crtc
*acrtc
,
6155 * this is not correct translation but will work as soon as VBLANK
6156 * constant is the same as PFLIP
6159 amdgpu_display_crtc_idx_to_irq_type(
6164 drm_crtc_vblank_on(&acrtc
->base
);
6167 &adev
->pageflip_irq
,
6173 &adev
->pageflip_irq
,
6175 drm_crtc_vblank_off(&acrtc
->base
);
6180 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
6181 const struct dm_connector_state
*old_dm_state
)
6183 if (dm_state
->scaling
!= old_dm_state
->scaling
)
6185 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
6186 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
6188 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
6189 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
6191 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
6192 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
6197 #ifdef CONFIG_DRM_AMD_DC_HDCP
6198 static bool is_content_protection_different(struct drm_connector_state
*state
,
6199 const struct drm_connector_state
*old_state
,
6200 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
6202 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6204 if (old_state
->hdcp_content_type
!= state
->hdcp_content_type
&&
6205 state
->content_protection
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
6206 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6210 /* CP is being re enabled, ignore this */
6211 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
6212 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
6213 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
6217 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6218 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
6219 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
6220 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6222 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6223 * hot-plug, headless s3, dpms
6225 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
6226 aconnector
->dc_sink
!= NULL
)
6229 if (old_state
->content_protection
== state
->content_protection
)
6232 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
6239 static void remove_stream(struct amdgpu_device
*adev
,
6240 struct amdgpu_crtc
*acrtc
,
6241 struct dc_stream_state
*stream
)
6243 /* this is the update mode case */
6245 acrtc
->otg_inst
= -1;
6246 acrtc
->enabled
= false;
6249 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
6250 struct dc_cursor_position
*position
)
6252 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6254 int xorigin
= 0, yorigin
= 0;
6256 position
->enable
= false;
6260 if (!crtc
|| !plane
->state
->fb
)
6263 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
6264 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
6265 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6267 plane
->state
->crtc_w
,
6268 plane
->state
->crtc_h
);
6272 x
= plane
->state
->crtc_x
;
6273 y
= plane
->state
->crtc_y
;
6275 if (x
<= -amdgpu_crtc
->max_cursor_width
||
6276 y
<= -amdgpu_crtc
->max_cursor_height
)
6280 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
6284 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
6287 position
->enable
= true;
6288 position
->translate_by_source
= true;
6291 position
->x_hotspot
= xorigin
;
6292 position
->y_hotspot
= yorigin
;
6297 static void handle_cursor_update(struct drm_plane
*plane
,
6298 struct drm_plane_state
*old_plane_state
)
6300 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
6301 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
6302 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
6303 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
6304 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6305 uint64_t address
= afb
? afb
->address
: 0;
6306 struct dc_cursor_position position
;
6307 struct dc_cursor_attributes attributes
;
6310 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
6313 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6315 amdgpu_crtc
->crtc_id
,
6316 plane
->state
->crtc_w
,
6317 plane
->state
->crtc_h
);
6319 ret
= get_cursor_position(plane
, crtc
, &position
);
6323 if (!position
.enable
) {
6324 /* turn off cursor */
6325 if (crtc_state
&& crtc_state
->stream
) {
6326 mutex_lock(&adev
->dm
.dc_lock
);
6327 dc_stream_set_cursor_position(crtc_state
->stream
,
6329 mutex_unlock(&adev
->dm
.dc_lock
);
6334 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
6335 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
6337 memset(&attributes
, 0, sizeof(attributes
));
6338 attributes
.address
.high_part
= upper_32_bits(address
);
6339 attributes
.address
.low_part
= lower_32_bits(address
);
6340 attributes
.width
= plane
->state
->crtc_w
;
6341 attributes
.height
= plane
->state
->crtc_h
;
6342 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
6343 attributes
.rotation_angle
= 0;
6344 attributes
.attribute_flags
.value
= 0;
6346 attributes
.pitch
= attributes
.width
;
6348 if (crtc_state
->stream
) {
6349 mutex_lock(&adev
->dm
.dc_lock
);
6350 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
6352 DRM_ERROR("DC failed to set cursor attributes\n");
6354 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
6356 DRM_ERROR("DC failed to set cursor position\n");
6357 mutex_unlock(&adev
->dm
.dc_lock
);
6361 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
6364 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
6365 WARN_ON(acrtc
->event
);
6367 acrtc
->event
= acrtc
->base
.state
->event
;
6369 /* Set the flip status */
6370 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
6372 /* Mark this event as consumed */
6373 acrtc
->base
.state
->event
= NULL
;
6375 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6379 static void update_freesync_state_on_stream(
6380 struct amdgpu_display_manager
*dm
,
6381 struct dm_crtc_state
*new_crtc_state
,
6382 struct dc_stream_state
*new_stream
,
6383 struct dc_plane_state
*surface
,
6384 u32 flip_timestamp_in_us
)
6386 struct mod_vrr_params vrr_params
;
6387 struct dc_info_packet vrr_infopacket
= {0};
6388 struct amdgpu_device
*adev
= dm
->adev
;
6389 unsigned long flags
;
6395 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6396 * For now it's sufficient to just guard against these conditions.
6399 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6402 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6403 vrr_params
= new_crtc_state
->vrr_params
;
6406 mod_freesync_handle_preflip(
6407 dm
->freesync_module
,
6410 flip_timestamp_in_us
,
6413 if (adev
->family
< AMDGPU_FAMILY_AI
&&
6414 amdgpu_dm_vrr_active(new_crtc_state
)) {
6415 mod_freesync_handle_v_update(dm
->freesync_module
,
6416 new_stream
, &vrr_params
);
6418 /* Need to call this before the frame ends. */
6419 dc_stream_adjust_vmin_vmax(dm
->dc
,
6420 new_crtc_state
->stream
,
6421 &vrr_params
.adjust
);
6425 mod_freesync_build_vrr_infopacket(
6426 dm
->freesync_module
,
6430 TRANSFER_FUNC_UNKNOWN
,
6433 new_crtc_state
->freesync_timing_changed
|=
6434 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6436 sizeof(vrr_params
.adjust
)) != 0);
6438 new_crtc_state
->freesync_vrr_info_changed
|=
6439 (memcmp(&new_crtc_state
->vrr_infopacket
,
6441 sizeof(vrr_infopacket
)) != 0);
6443 new_crtc_state
->vrr_params
= vrr_params
;
6444 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
6446 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
6447 new_stream
->vrr_infopacket
= vrr_infopacket
;
6449 if (new_crtc_state
->freesync_vrr_info_changed
)
6450 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6451 new_crtc_state
->base
.crtc
->base
.id
,
6452 (int)new_crtc_state
->base
.vrr_enabled
,
6453 (int)vrr_params
.state
);
6455 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6458 static void pre_update_freesync_state_on_stream(
6459 struct amdgpu_display_manager
*dm
,
6460 struct dm_crtc_state
*new_crtc_state
)
6462 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
6463 struct mod_vrr_params vrr_params
;
6464 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
6465 struct amdgpu_device
*adev
= dm
->adev
;
6466 unsigned long flags
;
6472 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6473 * For now it's sufficient to just guard against these conditions.
6475 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6478 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6479 vrr_params
= new_crtc_state
->vrr_params
;
6481 if (new_crtc_state
->vrr_supported
&&
6482 config
.min_refresh_in_uhz
&&
6483 config
.max_refresh_in_uhz
) {
6484 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6485 VRR_STATE_ACTIVE_VARIABLE
:
6488 config
.state
= VRR_STATE_UNSUPPORTED
;
6491 mod_freesync_build_vrr_params(dm
->freesync_module
,
6493 &config
, &vrr_params
);
6495 new_crtc_state
->freesync_timing_changed
|=
6496 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6498 sizeof(vrr_params
.adjust
)) != 0);
6500 new_crtc_state
->vrr_params
= vrr_params
;
6501 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6504 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
6505 struct dm_crtc_state
*new_state
)
6507 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
6508 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
6510 if (!old_vrr_active
&& new_vrr_active
) {
6511 /* Transition VRR inactive -> active:
6512 * While VRR is active, we must not disable vblank irq, as a
6513 * reenable after disable would compute bogus vblank/pflip
6514 * timestamps if it likely happened inside display front-porch.
6516 * We also need vupdate irq for the actual core vblank handling
6519 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
6520 drm_crtc_vblank_get(new_state
->base
.crtc
);
6521 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6522 __func__
, new_state
->base
.crtc
->base
.id
);
6523 } else if (old_vrr_active
&& !new_vrr_active
) {
6524 /* Transition VRR active -> inactive:
6525 * Allow vblank irq disable again for fixed refresh rate.
6527 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
6528 drm_crtc_vblank_put(new_state
->base
.crtc
);
6529 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6530 __func__
, new_state
->base
.crtc
->base
.id
);
6534 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
6536 struct drm_plane
*plane
;
6537 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6541 * TODO: Make this per-stream so we don't issue redundant updates for
6542 * commits with multiple streams.
6544 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
6546 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6547 handle_cursor_update(plane
, old_plane_state
);
6550 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
6551 struct dc_state
*dc_state
,
6552 struct drm_device
*dev
,
6553 struct amdgpu_display_manager
*dm
,
6554 struct drm_crtc
*pcrtc
,
6555 bool wait_for_vblank
)
6558 uint64_t timestamp_ns
;
6559 struct drm_plane
*plane
;
6560 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6561 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
6562 struct drm_crtc_state
*new_pcrtc_state
=
6563 drm_atomic_get_new_crtc_state(state
, pcrtc
);
6564 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
6565 struct dm_crtc_state
*dm_old_crtc_state
=
6566 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
6567 int planes_count
= 0, vpos
, hpos
;
6569 unsigned long flags
;
6570 struct amdgpu_bo
*abo
;
6571 uint64_t tiling_flags
;
6572 uint32_t target_vblank
, last_flip_vblank
;
6573 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
6574 bool pflip_present
= false;
6576 struct dc_surface_update surface_updates
[MAX_SURFACES
];
6577 struct dc_plane_info plane_infos
[MAX_SURFACES
];
6578 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
6579 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
6580 struct dc_stream_update stream_update
;
6583 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
6586 dm_error("Failed to allocate update bundle\n");
6591 * Disable the cursor first if we're disabling all the planes.
6592 * It'll remain on the screen after the planes are re-enabled
6595 if (acrtc_state
->active_planes
== 0)
6596 amdgpu_dm_commit_cursors(state
);
6598 /* update planes when needed */
6599 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
6600 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
6601 struct drm_crtc_state
*new_crtc_state
;
6602 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
6603 bool plane_needs_flip
;
6604 struct dc_plane_state
*dc_plane
;
6605 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
6607 /* Cursor plane is handled after stream updates */
6608 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6611 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
6614 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
6615 if (!new_crtc_state
->active
)
6618 dc_plane
= dm_new_plane_state
->dc_state
;
6620 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6621 if (new_pcrtc_state
->color_mgmt_changed
) {
6622 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
6623 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
6626 fill_dc_scaling_info(new_plane_state
,
6627 &bundle
->scaling_infos
[planes_count
]);
6629 bundle
->surface_updates
[planes_count
].scaling_info
=
6630 &bundle
->scaling_infos
[planes_count
];
6632 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
6634 pflip_present
= pflip_present
|| plane_needs_flip
;
6636 if (!plane_needs_flip
) {
6641 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
6644 * Wait for all fences on this FB. Do limited wait to avoid
6645 * deadlock during GPU reset when this fence will not signal
6646 * but we hold reservation lock for the BO.
6648 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
6650 msecs_to_jiffies(5000));
6651 if (unlikely(r
<= 0))
6652 DRM_ERROR("Waiting for fences timed out!");
6655 * TODO This might fail and hence better not used, wait
6656 * explicitly on fences instead
6657 * and in general should be called for
6658 * blocking commit to as per framework helpers
6660 r
= amdgpu_bo_reserve(abo
, true);
6661 if (unlikely(r
!= 0))
6662 DRM_ERROR("failed to reserve buffer before flip\n");
6664 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
6666 amdgpu_bo_unreserve(abo
);
6668 fill_dc_plane_info_and_addr(
6669 dm
->adev
, new_plane_state
, tiling_flags
,
6670 &bundle
->plane_infos
[planes_count
],
6671 &bundle
->flip_addrs
[planes_count
].address
,
6674 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6675 new_plane_state
->plane
->index
,
6676 bundle
->plane_infos
[planes_count
].dcc
.enable
);
6678 bundle
->surface_updates
[planes_count
].plane_info
=
6679 &bundle
->plane_infos
[planes_count
];
6682 * Only allow immediate flips for fast updates that don't
6683 * change FB pitch, DCC state, rotation or mirroing.
6685 bundle
->flip_addrs
[planes_count
].flip_immediate
=
6686 crtc
->state
->async_flip
&&
6687 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
6689 timestamp_ns
= ktime_get_ns();
6690 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
6691 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
6692 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6694 if (!bundle
->surface_updates
[planes_count
].surface
) {
6695 DRM_ERROR("No surface for CRTC: id=%d\n",
6696 acrtc_attach
->crtc_id
);
6700 if (plane
== pcrtc
->primary
)
6701 update_freesync_state_on_stream(
6704 acrtc_state
->stream
,
6706 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
6708 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6710 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
6711 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
6717 if (pflip_present
) {
6719 /* Use old throttling in non-vrr fixed refresh rate mode
6720 * to keep flip scheduling based on target vblank counts
6721 * working in a backwards compatible way, e.g., for
6722 * clients using the GLX_OML_sync_control extension or
6723 * DRI3/Present extension with defined target_msc.
6725 last_flip_vblank
= amdgpu_get_vblank_counter_kms(pcrtc
);
6728 /* For variable refresh rate mode only:
6729 * Get vblank of last completed flip to avoid > 1 vrr
6730 * flips per video frame by use of throttling, but allow
6731 * flip programming anywhere in the possibly large
6732 * variable vrr vblank interval for fine-grained flip
6733 * timing control and more opportunity to avoid stutter
6734 * on late submission of flips.
6736 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6737 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
6738 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6741 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
6744 * Wait until we're out of the vertical blank period before the one
6745 * targeted by the flip
6747 while ((acrtc_attach
->enabled
&&
6748 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
6749 0, &vpos
, &hpos
, NULL
,
6750 NULL
, &pcrtc
->hwmode
)
6751 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
6752 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
6753 (int)(target_vblank
-
6754 amdgpu_get_vblank_counter_kms(pcrtc
)) > 0)) {
6755 usleep_range(1000, 1100);
6758 if (acrtc_attach
->base
.state
->event
) {
6759 drm_crtc_vblank_get(pcrtc
);
6761 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6763 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
6764 prepare_flip_isr(acrtc_attach
);
6766 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6769 if (acrtc_state
->stream
) {
6770 if (acrtc_state
->freesync_vrr_info_changed
)
6771 bundle
->stream_update
.vrr_infopacket
=
6772 &acrtc_state
->stream
->vrr_infopacket
;
6776 /* Update the planes if changed or disable if we don't have any. */
6777 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
6778 acrtc_state
->stream
) {
6779 bundle
->stream_update
.stream
= acrtc_state
->stream
;
6780 if (new_pcrtc_state
->mode_changed
) {
6781 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
6782 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
6785 if (new_pcrtc_state
->color_mgmt_changed
) {
6787 * TODO: This isn't fully correct since we've actually
6788 * already modified the stream in place.
6790 bundle
->stream_update
.gamut_remap
=
6791 &acrtc_state
->stream
->gamut_remap_matrix
;
6792 bundle
->stream_update
.output_csc_transform
=
6793 &acrtc_state
->stream
->csc_color_matrix
;
6794 bundle
->stream_update
.out_transfer_func
=
6795 acrtc_state
->stream
->out_transfer_func
;
6798 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
6799 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
6800 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
6803 * If FreeSync state on the stream has changed then we need to
6804 * re-adjust the min/max bounds now that DC doesn't handle this
6805 * as part of commit.
6807 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
6808 amdgpu_dm_vrr_active(acrtc_state
)) {
6809 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6810 dc_stream_adjust_vmin_vmax(
6811 dm
->dc
, acrtc_state
->stream
,
6812 &acrtc_state
->vrr_params
.adjust
);
6813 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6815 mutex_lock(&dm
->dc_lock
);
6816 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6817 acrtc_state
->stream
->link
->psr_allow_active
)
6818 amdgpu_dm_psr_disable(acrtc_state
->stream
);
6820 dc_commit_updates_for_stream(dm
->dc
,
6821 bundle
->surface_updates
,
6823 acrtc_state
->stream
,
6824 &bundle
->stream_update
,
6827 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6828 acrtc_state
->stream
->psr_version
&&
6829 !acrtc_state
->stream
->link
->psr_feature_enabled
)
6830 amdgpu_dm_link_setup_psr(acrtc_state
->stream
);
6831 else if ((acrtc_state
->update_type
== UPDATE_TYPE_FAST
) &&
6832 acrtc_state
->stream
->link
->psr_feature_enabled
&&
6833 !acrtc_state
->stream
->link
->psr_allow_active
) {
6834 amdgpu_dm_psr_enable(acrtc_state
->stream
);
6837 mutex_unlock(&dm
->dc_lock
);
6841 * Update cursor state *after* programming all the planes.
6842 * This avoids redundant programming in the case where we're going
6843 * to be disabling a single plane - those pipes are being disabled.
6845 if (acrtc_state
->active_planes
)
6846 amdgpu_dm_commit_cursors(state
);
6852 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
6853 struct drm_atomic_state
*state
)
6855 struct amdgpu_device
*adev
= dev
->dev_private
;
6856 struct amdgpu_dm_connector
*aconnector
;
6857 struct drm_connector
*connector
;
6858 struct drm_connector_state
*old_con_state
, *new_con_state
;
6859 struct drm_crtc_state
*new_crtc_state
;
6860 struct dm_crtc_state
*new_dm_crtc_state
;
6861 const struct dc_stream_status
*status
;
6864 /* Notify device removals. */
6865 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6866 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
6867 /* CRTC changes require notification. */
6871 if (!new_con_state
->crtc
)
6874 new_crtc_state
= drm_atomic_get_new_crtc_state(
6875 state
, new_con_state
->crtc
);
6877 if (!new_crtc_state
)
6880 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6884 aconnector
= to_amdgpu_dm_connector(connector
);
6886 mutex_lock(&adev
->dm
.audio_lock
);
6887 inst
= aconnector
->audio_inst
;
6888 aconnector
->audio_inst
= -1;
6889 mutex_unlock(&adev
->dm
.audio_lock
);
6891 amdgpu_dm_audio_eld_notify(adev
, inst
);
6894 /* Notify audio device additions. */
6895 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
6896 if (!new_con_state
->crtc
)
6899 new_crtc_state
= drm_atomic_get_new_crtc_state(
6900 state
, new_con_state
->crtc
);
6902 if (!new_crtc_state
)
6905 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6908 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6909 if (!new_dm_crtc_state
->stream
)
6912 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
6916 aconnector
= to_amdgpu_dm_connector(connector
);
6918 mutex_lock(&adev
->dm
.audio_lock
);
6919 inst
= status
->audio_inst
;
6920 aconnector
->audio_inst
= inst
;
6921 mutex_unlock(&adev
->dm
.audio_lock
);
6923 amdgpu_dm_audio_eld_notify(adev
, inst
);
6928 * Enable interrupts on CRTCs that are newly active, undergone
6929 * a modeset, or have active planes again.
6931 * Done in two passes, based on the for_modeset flag:
6932 * Pass 1: For CRTCs going through modeset
6933 * Pass 2: For CRTCs going from 0 to n active planes
6935 * Interrupts can only be enabled after the planes are programmed,
6936 * so this requires a two-pass approach since we don't want to
6937 * just defer the interrupts until after commit planes every time.
6939 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device
*dev
,
6940 struct drm_atomic_state
*state
,
6943 struct amdgpu_device
*adev
= dev
->dev_private
;
6944 struct drm_crtc
*crtc
;
6945 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6947 #ifdef CONFIG_DEBUG_FS
6948 enum amdgpu_dm_pipe_crc_source source
;
6951 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
6952 new_crtc_state
, i
) {
6953 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6954 struct dm_crtc_state
*dm_new_crtc_state
=
6955 to_dm_crtc_state(new_crtc_state
);
6956 struct dm_crtc_state
*dm_old_crtc_state
=
6957 to_dm_crtc_state(old_crtc_state
);
6958 bool modeset
= drm_atomic_crtc_needs_modeset(new_crtc_state
);
6961 run_pass
= (for_modeset
&& modeset
) ||
6962 (!for_modeset
&& !modeset
&&
6963 !dm_old_crtc_state
->interrupts_enabled
);
6968 if (!dm_new_crtc_state
->interrupts_enabled
)
6971 manage_dm_interrupts(adev
, acrtc
, true);
6973 #ifdef CONFIG_DEBUG_FS
6974 /* The stream has changed so CRC capture needs to re-enabled. */
6975 source
= dm_new_crtc_state
->crc_src
;
6976 if (amdgpu_dm_is_valid_crc_source(source
)) {
6977 amdgpu_dm_crtc_configure_crc_source(
6978 crtc
, dm_new_crtc_state
,
6979 dm_new_crtc_state
->crc_src
);
6986 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6987 * @crtc_state: the DRM CRTC state
6988 * @stream_state: the DC stream state.
6990 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6991 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6993 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
6994 struct dc_stream_state
*stream_state
)
6996 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
6999 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
7000 struct drm_atomic_state
*state
,
7003 struct drm_crtc
*crtc
;
7004 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7005 struct amdgpu_device
*adev
= dev
->dev_private
;
7009 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7010 * a modeset, being disabled, or have no active planes.
7012 * It's done in atomic commit rather than commit tail for now since
7013 * some of these interrupt handlers access the current CRTC state and
7014 * potentially the stream pointer itself.
7016 * Since the atomic state is swapped within atomic commit and not within
7017 * commit tail this would leave to new state (that hasn't been committed yet)
7018 * being accesssed from within the handlers.
7020 * TODO: Fix this so we can do this in commit tail and not have to block
7023 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7024 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7025 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7026 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7028 if (dm_old_crtc_state
->interrupts_enabled
&&
7029 (!dm_new_crtc_state
->interrupts_enabled
||
7030 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
7031 manage_dm_interrupts(adev
, acrtc
, false);
7034 * Add check here for SoC's that support hardware cursor plane, to
7035 * unset legacy_cursor_update
7038 return drm_atomic_helper_commit(dev
, state
, nonblock
);
7040 /*TODO Handle EINTR, reenable IRQ*/
7044 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7045 * @state: The atomic state to commit
7047 * This will tell DC to commit the constructed DC state from atomic_check,
7048 * programming the hardware. Any failures here implies a hardware failure, since
7049 * atomic check should have filtered anything non-kosher.
7051 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
7053 struct drm_device
*dev
= state
->dev
;
7054 struct amdgpu_device
*adev
= dev
->dev_private
;
7055 struct amdgpu_display_manager
*dm
= &adev
->dm
;
7056 struct dm_atomic_state
*dm_state
;
7057 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
7059 struct drm_crtc
*crtc
;
7060 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7061 unsigned long flags
;
7062 bool wait_for_vblank
= true;
7063 struct drm_connector
*connector
;
7064 struct drm_connector_state
*old_con_state
, *new_con_state
;
7065 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7066 int crtc_disable_count
= 0;
7068 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
7070 dm_state
= dm_atomic_get_new_state(state
);
7071 if (dm_state
&& dm_state
->context
) {
7072 dc_state
= dm_state
->context
;
7074 /* No state changes, retain current state. */
7075 dc_state_temp
= dc_create_state(dm
->dc
);
7076 ASSERT(dc_state_temp
);
7077 dc_state
= dc_state_temp
;
7078 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
7081 /* update changed items */
7082 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7083 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7085 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7086 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7089 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7090 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7091 "connectors_changed:%d\n",
7093 new_crtc_state
->enable
,
7094 new_crtc_state
->active
,
7095 new_crtc_state
->planes_changed
,
7096 new_crtc_state
->mode_changed
,
7097 new_crtc_state
->active_changed
,
7098 new_crtc_state
->connectors_changed
);
7100 /* Copy all transient state flags into dc state */
7101 if (dm_new_crtc_state
->stream
) {
7102 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
7103 dm_new_crtc_state
->stream
);
7106 /* handles headless hotplug case, updating new_state and
7107 * aconnector as needed
7110 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
7112 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
7114 if (!dm_new_crtc_state
->stream
) {
7116 * this could happen because of issues with
7117 * userspace notifications delivery.
7118 * In this case userspace tries to set mode on
7119 * display which is disconnected in fact.
7120 * dc_sink is NULL in this case on aconnector.
7121 * We expect reset mode will come soon.
7123 * This can also happen when unplug is done
7124 * during resume sequence ended
7126 * In this case, we want to pretend we still
7127 * have a sink to keep the pipe running so that
7128 * hw state is consistent with the sw state
7130 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7131 __func__
, acrtc
->base
.base
.id
);
7135 if (dm_old_crtc_state
->stream
)
7136 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7138 pm_runtime_get_noresume(dev
->dev
);
7140 acrtc
->enabled
= true;
7141 acrtc
->hw_mode
= new_crtc_state
->mode
;
7142 crtc
->hwmode
= new_crtc_state
->mode
;
7143 } else if (modereset_required(new_crtc_state
)) {
7144 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
7145 /* i.e. reset mode */
7146 if (dm_old_crtc_state
->stream
) {
7147 if (dm_old_crtc_state
->stream
->link
->psr_allow_active
)
7148 amdgpu_dm_psr_disable(dm_old_crtc_state
->stream
);
7150 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7153 } /* for_each_crtc_in_state() */
7156 dm_enable_per_frame_crtc_master_sync(dc_state
);
7157 mutex_lock(&dm
->dc_lock
);
7158 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
7159 mutex_unlock(&dm
->dc_lock
);
7162 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7163 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7165 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7167 if (dm_new_crtc_state
->stream
!= NULL
) {
7168 const struct dc_stream_status
*status
=
7169 dc_stream_get_status(dm_new_crtc_state
->stream
);
7172 status
= dc_stream_get_status_from_state(dc_state
,
7173 dm_new_crtc_state
->stream
);
7176 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
7178 acrtc
->otg_inst
= status
->primary_otg_inst
;
7181 #ifdef CONFIG_DRM_AMD_DC_HDCP
7182 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7183 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7184 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7185 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7187 new_crtc_state
= NULL
;
7190 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7192 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7194 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
7195 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
7196 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
7197 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
7201 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
7202 hdcp_update_display(
7203 adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
, aconnector
,
7204 new_con_state
->hdcp_content_type
,
7205 new_con_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
? true
7210 /* Handle connector state changes */
7211 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7212 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7213 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
7214 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7215 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
7216 struct dc_stream_update stream_update
;
7217 struct dc_info_packet hdr_packet
;
7218 struct dc_stream_status
*status
= NULL
;
7219 bool abm_changed
, hdr_changed
, scaling_changed
;
7221 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
7222 memset(&stream_update
, 0, sizeof(stream_update
));
7225 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7226 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
7229 /* Skip any modesets/resets */
7230 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
7233 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7234 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7236 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
7239 abm_changed
= dm_new_crtc_state
->abm_level
!=
7240 dm_old_crtc_state
->abm_level
;
7243 is_hdr_metadata_different(old_con_state
, new_con_state
);
7245 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
7248 stream_update
.stream
= dm_new_crtc_state
->stream
;
7249 if (scaling_changed
) {
7250 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
7251 dm_new_con_state
, dm_new_crtc_state
->stream
);
7253 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
7254 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
7258 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
7260 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
7264 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
7265 stream_update
.hdr_static_metadata
= &hdr_packet
;
7268 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
7270 WARN_ON(!status
->plane_count
);
7273 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7274 * Here we create an empty update on each plane.
7275 * To fix this, DC should permit updating only stream properties.
7277 for (j
= 0; j
< status
->plane_count
; j
++)
7278 dummy_updates
[j
].surface
= status
->plane_states
[0];
7281 mutex_lock(&dm
->dc_lock
);
7282 dc_commit_updates_for_stream(dm
->dc
,
7284 status
->plane_count
,
7285 dm_new_crtc_state
->stream
,
7288 mutex_unlock(&dm
->dc_lock
);
7291 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7292 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
7293 new_crtc_state
, i
) {
7294 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
7295 crtc_disable_count
++;
7297 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7298 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7300 /* Update freesync active state. */
7301 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
7303 /* Handle vrr on->off / off->on transitions */
7304 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
7308 /* Enable interrupts for CRTCs going through a modeset. */
7309 amdgpu_dm_enable_crtc_interrupts(dev
, state
, true);
7311 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
7312 if (new_crtc_state
->async_flip
)
7313 wait_for_vblank
= false;
7315 /* update planes when needed per crtc*/
7316 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
7317 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7319 if (dm_new_crtc_state
->stream
)
7320 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
7321 dm
, crtc
, wait_for_vblank
);
7324 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7325 amdgpu_dm_enable_crtc_interrupts(dev
, state
, false);
7327 /* Update audio instances for each connector. */
7328 amdgpu_dm_commit_audio(dev
, state
);
7331 * send vblank event on all events not handled in flip and
7332 * mark consumed event for drm_atomic_helper_commit_hw_done
7334 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
7335 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7337 if (new_crtc_state
->event
)
7338 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
7340 new_crtc_state
->event
= NULL
;
7342 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
7344 /* Signal HW programming completion */
7345 drm_atomic_helper_commit_hw_done(state
);
7347 if (wait_for_vblank
)
7348 drm_atomic_helper_wait_for_flip_done(dev
, state
);
7350 drm_atomic_helper_cleanup_planes(dev
, state
);
7353 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7354 * so we can put the GPU into runtime suspend if we're not driving any
7357 for (i
= 0; i
< crtc_disable_count
; i
++)
7358 pm_runtime_put_autosuspend(dev
->dev
);
7359 pm_runtime_mark_last_busy(dev
->dev
);
7362 dc_release_state(dc_state_temp
);
7366 static int dm_force_atomic_commit(struct drm_connector
*connector
)
7369 struct drm_device
*ddev
= connector
->dev
;
7370 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
7371 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7372 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
7373 struct drm_connector_state
*conn_state
;
7374 struct drm_crtc_state
*crtc_state
;
7375 struct drm_plane_state
*plane_state
;
7380 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
7382 /* Construct an atomic state to restore previous display setting */
7385 * Attach connectors to drm_atomic_state
7387 conn_state
= drm_atomic_get_connector_state(state
, connector
);
7389 ret
= PTR_ERR_OR_ZERO(conn_state
);
7393 /* Attach crtc to drm_atomic_state*/
7394 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
7396 ret
= PTR_ERR_OR_ZERO(crtc_state
);
7400 /* force a restore */
7401 crtc_state
->mode_changed
= true;
7403 /* Attach plane to drm_atomic_state */
7404 plane_state
= drm_atomic_get_plane_state(state
, plane
);
7406 ret
= PTR_ERR_OR_ZERO(plane_state
);
7411 /* Call commit internally with the state we just constructed */
7412 ret
= drm_atomic_commit(state
);
7417 DRM_ERROR("Restoring old state failed with %i\n", ret
);
7418 drm_atomic_state_put(state
);
7424 * This function handles all cases when set mode does not come upon hotplug.
7425 * This includes when a display is unplugged then plugged back into the
7426 * same port and when running without usermode desktop manager supprot
7428 void dm_restore_drm_connector_state(struct drm_device
*dev
,
7429 struct drm_connector
*connector
)
7431 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7432 struct amdgpu_crtc
*disconnected_acrtc
;
7433 struct dm_crtc_state
*acrtc_state
;
7435 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
7438 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7439 if (!disconnected_acrtc
)
7442 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
7443 if (!acrtc_state
->stream
)
7447 * If the previous sink is not released and different from the current,
7448 * we deduce we are in a state where we can not rely on usermode call
7449 * to turn on the display, so we do it here
7451 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
7452 dm_force_atomic_commit(&aconnector
->base
);
7456 * Grabs all modesetting locks to serialize against any blocking commits,
7457 * Waits for completion of all non blocking commits.
7459 static int do_aquire_global_lock(struct drm_device
*dev
,
7460 struct drm_atomic_state
*state
)
7462 struct drm_crtc
*crtc
;
7463 struct drm_crtc_commit
*commit
;
7467 * Adding all modeset locks to aquire_ctx will
7468 * ensure that when the framework release it the
7469 * extra locks we are locking here will get released to
7471 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
7475 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7476 spin_lock(&crtc
->commit_lock
);
7477 commit
= list_first_entry_or_null(&crtc
->commit_list
,
7478 struct drm_crtc_commit
, commit_entry
);
7480 drm_crtc_commit_get(commit
);
7481 spin_unlock(&crtc
->commit_lock
);
7487 * Make sure all pending HW programming completed and
7490 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
7493 ret
= wait_for_completion_interruptible_timeout(
7494 &commit
->flip_done
, 10*HZ
);
7497 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7498 "timed out\n", crtc
->base
.id
, crtc
->name
);
7500 drm_crtc_commit_put(commit
);
7503 return ret
< 0 ? ret
: 0;
7506 static void get_freesync_config_for_crtc(
7507 struct dm_crtc_state
*new_crtc_state
,
7508 struct dm_connector_state
*new_con_state
)
7510 struct mod_freesync_config config
= {0};
7511 struct amdgpu_dm_connector
*aconnector
=
7512 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
7513 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
7514 int vrefresh
= drm_mode_vrefresh(mode
);
7516 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
7517 vrefresh
>= aconnector
->min_vfreq
&&
7518 vrefresh
<= aconnector
->max_vfreq
;
7520 if (new_crtc_state
->vrr_supported
) {
7521 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
7522 config
.state
= new_crtc_state
->base
.vrr_enabled
?
7523 VRR_STATE_ACTIVE_VARIABLE
:
7525 config
.min_refresh_in_uhz
=
7526 aconnector
->min_vfreq
* 1000000;
7527 config
.max_refresh_in_uhz
=
7528 aconnector
->max_vfreq
* 1000000;
7529 config
.vsif_supported
= true;
7533 new_crtc_state
->freesync_config
= config
;
7536 static void reset_freesync_config_for_crtc(
7537 struct dm_crtc_state
*new_crtc_state
)
7539 new_crtc_state
->vrr_supported
= false;
7541 memset(&new_crtc_state
->vrr_params
, 0,
7542 sizeof(new_crtc_state
->vrr_params
));
7543 memset(&new_crtc_state
->vrr_infopacket
, 0,
7544 sizeof(new_crtc_state
->vrr_infopacket
));
7547 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
7548 struct drm_atomic_state
*state
,
7549 struct drm_crtc
*crtc
,
7550 struct drm_crtc_state
*old_crtc_state
,
7551 struct drm_crtc_state
*new_crtc_state
,
7553 bool *lock_and_validation_needed
)
7555 struct dm_atomic_state
*dm_state
= NULL
;
7556 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7557 struct dc_stream_state
*new_stream
;
7561 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7562 * update changed items
7564 struct amdgpu_crtc
*acrtc
= NULL
;
7565 struct amdgpu_dm_connector
*aconnector
= NULL
;
7566 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
7567 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
7571 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7572 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7573 acrtc
= to_amdgpu_crtc(crtc
);
7574 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
7576 /* TODO This hack should go away */
7577 if (aconnector
&& enable
) {
7578 /* Make sure fake sink is created in plug-in scenario */
7579 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
7581 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
7584 if (IS_ERR(drm_new_conn_state
)) {
7585 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
7589 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
7590 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
7592 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7595 new_stream
= create_stream_for_sink(aconnector
,
7596 &new_crtc_state
->mode
,
7598 dm_old_crtc_state
->stream
);
7601 * we can have no stream on ACTION_SET if a display
7602 * was disconnected during S3, in this case it is not an
7603 * error, the OS will be updated after detection, and
7604 * will do the right thing on next atomic commit
7608 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7609 __func__
, acrtc
->base
.base
.id
);
7614 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7616 ret
= fill_hdr_info_packet(drm_new_conn_state
,
7617 &new_stream
->hdr_static_metadata
);
7622 * If we already removed the old stream from the context
7623 * (and set the new stream to NULL) then we can't reuse
7624 * the old stream even if the stream and scaling are unchanged.
7625 * We'll hit the BUG_ON and black screen.
7627 * TODO: Refactor this function to allow this check to work
7628 * in all conditions.
7630 if (dm_new_crtc_state
->stream
&&
7631 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
7632 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
7633 new_crtc_state
->mode_changed
= false;
7634 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7635 new_crtc_state
->mode_changed
);
7639 /* mode_changed flag may get updated above, need to check again */
7640 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7644 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7645 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7646 "connectors_changed:%d\n",
7648 new_crtc_state
->enable
,
7649 new_crtc_state
->active
,
7650 new_crtc_state
->planes_changed
,
7651 new_crtc_state
->mode_changed
,
7652 new_crtc_state
->active_changed
,
7653 new_crtc_state
->connectors_changed
);
7655 /* Remove stream for any changed/disabled CRTC */
7658 if (!dm_old_crtc_state
->stream
)
7661 ret
= dm_atomic_get_state(state
, &dm_state
);
7665 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7668 /* i.e. reset mode */
7669 if (dc_remove_stream_from_ctx(
7672 dm_old_crtc_state
->stream
) != DC_OK
) {
7677 dc_stream_release(dm_old_crtc_state
->stream
);
7678 dm_new_crtc_state
->stream
= NULL
;
7680 reset_freesync_config_for_crtc(dm_new_crtc_state
);
7682 *lock_and_validation_needed
= true;
7684 } else {/* Add stream for any updated/enabled CRTC */
7686 * Quick fix to prevent NULL pointer on new_stream when
7687 * added MST connectors not found in existing crtc_state in the chained mode
7688 * TODO: need to dig out the root cause of that
7690 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
7693 if (modereset_required(new_crtc_state
))
7696 if (modeset_required(new_crtc_state
, new_stream
,
7697 dm_old_crtc_state
->stream
)) {
7699 WARN_ON(dm_new_crtc_state
->stream
);
7701 ret
= dm_atomic_get_state(state
, &dm_state
);
7705 dm_new_crtc_state
->stream
= new_stream
;
7707 dc_stream_retain(new_stream
);
7709 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7712 if (dc_add_stream_to_ctx(
7715 dm_new_crtc_state
->stream
) != DC_OK
) {
7720 *lock_and_validation_needed
= true;
7725 /* Release extra reference */
7727 dc_stream_release(new_stream
);
7730 * We want to do dc stream updates that do not require a
7731 * full modeset below.
7733 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
7734 new_crtc_state
->active
))
7737 * Given above conditions, the dc state cannot be NULL because:
7738 * 1. We're in the process of enabling CRTCs (just been added
7739 * to the dc context, or already is on the context)
7740 * 2. Has a valid connector attached, and
7741 * 3. Is currently active and enabled.
7742 * => The dc stream state currently exists.
7744 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
7746 /* Scaling or underscan settings */
7747 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
7748 update_stream_scaling_settings(
7749 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
7752 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7755 * Color management settings. We also update color properties
7756 * when a modeset is needed, to ensure it gets reprogrammed.
7758 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
7759 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
7760 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
7765 /* Update Freesync settings. */
7766 get_freesync_config_for_crtc(dm_new_crtc_state
,
7773 dc_stream_release(new_stream
);
7777 static bool should_reset_plane(struct drm_atomic_state
*state
,
7778 struct drm_plane
*plane
,
7779 struct drm_plane_state
*old_plane_state
,
7780 struct drm_plane_state
*new_plane_state
)
7782 struct drm_plane
*other
;
7783 struct drm_plane_state
*old_other_state
, *new_other_state
;
7784 struct drm_crtc_state
*new_crtc_state
;
7788 * TODO: Remove this hack once the checks below are sufficient
7789 * enough to determine when we need to reset all the planes on
7792 if (state
->allow_modeset
)
7795 /* Exit early if we know that we're adding or removing the plane. */
7796 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
7799 /* old crtc == new_crtc == NULL, plane not in context. */
7800 if (!new_plane_state
->crtc
)
7804 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
7806 if (!new_crtc_state
)
7809 /* CRTC Degamma changes currently require us to recreate planes. */
7810 if (new_crtc_state
->color_mgmt_changed
)
7813 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
7817 * If there are any new primary or overlay planes being added or
7818 * removed then the z-order can potentially change. To ensure
7819 * correct z-order and pipe acquisition the current DC architecture
7820 * requires us to remove and recreate all existing planes.
7822 * TODO: Come up with a more elegant solution for this.
7824 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
7825 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
7828 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
7829 new_other_state
->crtc
!= new_plane_state
->crtc
)
7832 if (old_other_state
->crtc
!= new_other_state
->crtc
)
7835 /* TODO: Remove this once we can handle fast format changes. */
7836 if (old_other_state
->fb
&& new_other_state
->fb
&&
7837 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
7844 static int dm_update_plane_state(struct dc
*dc
,
7845 struct drm_atomic_state
*state
,
7846 struct drm_plane
*plane
,
7847 struct drm_plane_state
*old_plane_state
,
7848 struct drm_plane_state
*new_plane_state
,
7850 bool *lock_and_validation_needed
)
7853 struct dm_atomic_state
*dm_state
= NULL
;
7854 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
7855 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7856 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
7857 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
7858 struct amdgpu_crtc
*new_acrtc
;
7863 new_plane_crtc
= new_plane_state
->crtc
;
7864 old_plane_crtc
= old_plane_state
->crtc
;
7865 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
7866 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
7868 /*TODO Implement better atomic check for cursor plane */
7869 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
7870 if (!enable
|| !new_plane_crtc
||
7871 drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
7874 new_acrtc
= to_amdgpu_crtc(new_plane_crtc
);
7876 if ((new_plane_state
->crtc_w
> new_acrtc
->max_cursor_width
) ||
7877 (new_plane_state
->crtc_h
> new_acrtc
->max_cursor_height
)) {
7878 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7879 new_plane_state
->crtc_w
, new_plane_state
->crtc_h
);
7883 if (new_plane_state
->crtc_x
<= -new_acrtc
->max_cursor_width
||
7884 new_plane_state
->crtc_y
<= -new_acrtc
->max_cursor_height
) {
7885 DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
7886 new_plane_state
->crtc_x
, new_plane_state
->crtc_y
);
7893 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
7896 /* Remove any changed/removed planes */
7901 if (!old_plane_crtc
)
7904 old_crtc_state
= drm_atomic_get_old_crtc_state(
7905 state
, old_plane_crtc
);
7906 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7908 if (!dm_old_crtc_state
->stream
)
7911 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7912 plane
->base
.id
, old_plane_crtc
->base
.id
);
7914 ret
= dm_atomic_get_state(state
, &dm_state
);
7918 if (!dc_remove_plane_from_context(
7920 dm_old_crtc_state
->stream
,
7921 dm_old_plane_state
->dc_state
,
7922 dm_state
->context
)) {
7929 dc_plane_state_release(dm_old_plane_state
->dc_state
);
7930 dm_new_plane_state
->dc_state
= NULL
;
7932 *lock_and_validation_needed
= true;
7934 } else { /* Add new planes */
7935 struct dc_plane_state
*dc_new_plane_state
;
7937 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
7940 if (!new_plane_crtc
)
7943 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
7944 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7946 if (!dm_new_crtc_state
->stream
)
7952 WARN_ON(dm_new_plane_state
->dc_state
);
7954 dc_new_plane_state
= dc_create_plane_state(dc
);
7955 if (!dc_new_plane_state
)
7958 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7959 plane
->base
.id
, new_plane_crtc
->base
.id
);
7961 ret
= fill_dc_plane_attributes(
7962 new_plane_crtc
->dev
->dev_private
,
7967 dc_plane_state_release(dc_new_plane_state
);
7971 ret
= dm_atomic_get_state(state
, &dm_state
);
7973 dc_plane_state_release(dc_new_plane_state
);
7978 * Any atomic check errors that occur after this will
7979 * not need a release. The plane state will be attached
7980 * to the stream, and therefore part of the atomic
7981 * state. It'll be released when the atomic state is
7984 if (!dc_add_plane_to_context(
7986 dm_new_crtc_state
->stream
,
7988 dm_state
->context
)) {
7990 dc_plane_state_release(dc_new_plane_state
);
7994 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
7996 /* Tell DC to do a full surface update every time there
7997 * is a plane change. Inefficient, but works for now.
7999 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
8001 *lock_and_validation_needed
= true;
8009 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
8010 struct drm_atomic_state
*state
,
8011 enum surface_update_type
*out_type
)
8013 struct dc
*dc
= dm
->dc
;
8014 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
8015 int i
, j
, num_plane
, ret
= 0;
8016 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8017 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
8018 struct drm_crtc
*new_plane_crtc
;
8019 struct drm_plane
*plane
;
8021 struct drm_crtc
*crtc
;
8022 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
8023 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
8024 struct dc_stream_status
*status
= NULL
;
8025 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8026 struct surface_info_bundle
{
8027 struct dc_surface_update surface_updates
[MAX_SURFACES
];
8028 struct dc_plane_info plane_infos
[MAX_SURFACES
];
8029 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
8030 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
8031 struct dc_stream_update stream_update
;
8034 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
8037 DRM_ERROR("Failed to allocate update bundle\n");
8038 /* Set type to FULL to avoid crashing in DC*/
8039 update_type
= UPDATE_TYPE_FULL
;
8043 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8045 memset(bundle
, 0, sizeof(struct surface_info_bundle
));
8047 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8048 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8051 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
8052 update_type
= UPDATE_TYPE_FULL
;
8056 if (!new_dm_crtc_state
->stream
)
8059 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
8060 const struct amdgpu_framebuffer
*amdgpu_fb
=
8061 to_amdgpu_framebuffer(new_plane_state
->fb
);
8062 struct dc_plane_info
*plane_info
= &bundle
->plane_infos
[num_plane
];
8063 struct dc_flip_addrs
*flip_addr
= &bundle
->flip_addrs
[num_plane
];
8064 struct dc_scaling_info
*scaling_info
= &bundle
->scaling_infos
[num_plane
];
8065 uint64_t tiling_flags
;
8067 new_plane_crtc
= new_plane_state
->crtc
;
8068 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
8069 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
8071 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8074 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
8075 update_type
= UPDATE_TYPE_FULL
;
8079 if (crtc
!= new_plane_crtc
)
8082 bundle
->surface_updates
[num_plane
].surface
=
8083 new_dm_plane_state
->dc_state
;
8085 if (new_crtc_state
->mode_changed
) {
8086 bundle
->stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
8087 bundle
->stream_update
.src
= new_dm_crtc_state
->stream
->src
;
8090 if (new_crtc_state
->color_mgmt_changed
) {
8091 bundle
->surface_updates
[num_plane
].gamma
=
8092 new_dm_plane_state
->dc_state
->gamma_correction
;
8093 bundle
->surface_updates
[num_plane
].in_transfer_func
=
8094 new_dm_plane_state
->dc_state
->in_transfer_func
;
8095 bundle
->stream_update
.gamut_remap
=
8096 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
8097 bundle
->stream_update
.output_csc_transform
=
8098 &new_dm_crtc_state
->stream
->csc_color_matrix
;
8099 bundle
->stream_update
.out_transfer_func
=
8100 new_dm_crtc_state
->stream
->out_transfer_func
;
8103 ret
= fill_dc_scaling_info(new_plane_state
,
8108 bundle
->surface_updates
[num_plane
].scaling_info
= scaling_info
;
8111 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
);
8115 ret
= fill_dc_plane_info_and_addr(
8116 dm
->adev
, new_plane_state
, tiling_flags
,
8118 &flip_addr
->address
,
8123 bundle
->surface_updates
[num_plane
].plane_info
= plane_info
;
8124 bundle
->surface_updates
[num_plane
].flip_addr
= flip_addr
;
8133 ret
= dm_atomic_get_state(state
, &dm_state
);
8137 old_dm_state
= dm_atomic_get_old_state(state
);
8138 if (!old_dm_state
) {
8143 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
8144 new_dm_crtc_state
->stream
);
8145 bundle
->stream_update
.stream
= new_dm_crtc_state
->stream
;
8147 * TODO: DC modifies the surface during this call so we need
8148 * to lock here - find a way to do this without locking.
8150 mutex_lock(&dm
->dc_lock
);
8151 update_type
= dc_check_update_surfaces_for_stream(
8152 dc
, bundle
->surface_updates
, num_plane
,
8153 &bundle
->stream_update
, status
);
8154 mutex_unlock(&dm
->dc_lock
);
8156 if (update_type
> UPDATE_TYPE_MED
) {
8157 update_type
= UPDATE_TYPE_FULL
;
8165 *out_type
= update_type
;
8169 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
8171 struct drm_connector
*connector
;
8172 struct drm_connector_state
*conn_state
;
8173 struct amdgpu_dm_connector
*aconnector
= NULL
;
8175 for_each_new_connector_in_state(state
, connector
, conn_state
, i
) {
8176 if (conn_state
->crtc
!= crtc
)
8179 aconnector
= to_amdgpu_dm_connector(connector
);
8180 if (!aconnector
->port
|| !aconnector
->mst_port
)
8189 return drm_dp_mst_add_affected_dsc_crtcs(state
, &aconnector
->mst_port
->mst_mgr
);
8193 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8194 * @dev: The DRM device
8195 * @state: The atomic state to commit
8197 * Validate that the given atomic state is programmable by DC into hardware.
8198 * This involves constructing a &struct dc_state reflecting the new hardware
8199 * state we wish to commit, then querying DC to see if it is programmable. It's
8200 * important not to modify the existing DC state. Otherwise, atomic_check
8201 * may unexpectedly commit hardware changes.
8203 * When validating the DC state, it's important that the right locks are
8204 * acquired. For full updates case which removes/adds/updates streams on one
8205 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8206 * that any such full update commit will wait for completion of any outstanding
8207 * flip using DRMs synchronization events. See
8208 * dm_determine_update_type_for_commit()
8210 * Note that DM adds the affected connectors for all CRTCs in state, when that
8211 * might not seem necessary. This is because DC stream creation requires the
8212 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8213 * be possible but non-trivial - a possible TODO item.
8215 * Return: -Error code if validation failed.
8217 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
8218 struct drm_atomic_state
*state
)
8220 struct amdgpu_device
*adev
= dev
->dev_private
;
8221 struct dm_atomic_state
*dm_state
= NULL
;
8222 struct dc
*dc
= adev
->dm
.dc
;
8223 struct drm_connector
*connector
;
8224 struct drm_connector_state
*old_con_state
, *new_con_state
;
8225 struct drm_crtc
*crtc
;
8226 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8227 struct drm_plane
*plane
;
8228 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8229 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8230 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
8235 * This bool will be set for true for any modeset/reset
8236 * or plane update which implies non fast surface update.
8238 bool lock_and_validation_needed
= false;
8240 ret
= drm_atomic_helper_check_modeset(dev
, state
);
8244 if (adev
->asic_type
>= CHIP_NAVI10
) {
8245 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8246 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8247 ret
= add_affected_mst_dsc_crtcs(state
, crtc
);
8254 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8255 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
8256 !new_crtc_state
->color_mgmt_changed
&&
8257 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
8260 if (!new_crtc_state
->enable
)
8263 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
8267 ret
= drm_atomic_add_affected_planes(state
, crtc
);
8273 * Add all primary and overlay planes on the CRTC to the state
8274 * whenever a plane is enabled to maintain correct z-ordering
8275 * and to enable fast surface updates.
8277 drm_for_each_crtc(crtc
, dev
) {
8278 bool modified
= false;
8280 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8281 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8284 if (new_plane_state
->crtc
== crtc
||
8285 old_plane_state
->crtc
== crtc
) {
8294 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
8295 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8299 drm_atomic_get_plane_state(state
, plane
);
8301 if (IS_ERR(new_plane_state
)) {
8302 ret
= PTR_ERR(new_plane_state
);
8308 /* Remove exiting planes if they are modified */
8309 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8310 ret
= dm_update_plane_state(dc
, state
, plane
,
8314 &lock_and_validation_needed
);
8319 /* Disable all crtcs which require disable */
8320 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8321 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8325 &lock_and_validation_needed
);
8330 /* Enable all crtcs which require enable */
8331 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8332 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8336 &lock_and_validation_needed
);
8341 /* Add new/modified planes */
8342 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8343 ret
= dm_update_plane_state(dc
, state
, plane
,
8347 &lock_and_validation_needed
);
8352 /* Run this here since we want to validate the streams we created */
8353 ret
= drm_atomic_helper_check_planes(dev
, state
);
8357 if (state
->legacy_cursor_update
) {
8359 * This is a fast cursor update coming from the plane update
8360 * helper, check if it can be done asynchronously for better
8363 state
->async_update
=
8364 !drm_atomic_helper_async_check(dev
, state
);
8367 * Skip the remaining global validation if this is an async
8368 * update. Cursor updates can be done without affecting
8369 * state or bandwidth calcs and this avoids the performance
8370 * penalty of locking the private state object and
8371 * allocating a new dc_state.
8373 if (state
->async_update
)
8377 /* Check scaling and underscan changes*/
8378 /* TODO Removed scaling changes validation due to inability to commit
8379 * new stream into context w\o causing full reset. Need to
8380 * decide how to handle.
8382 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8383 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8384 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8385 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
8387 /* Skip any modesets/resets */
8388 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
8389 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
8392 /* Skip any thing not scale or underscan changes */
8393 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
8396 overall_update_type
= UPDATE_TYPE_FULL
;
8397 lock_and_validation_needed
= true;
8400 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
8404 if (overall_update_type
< update_type
)
8405 overall_update_type
= update_type
;
8408 * lock_and_validation_needed was an old way to determine if we need to set
8409 * the global lock. Leaving it in to check if we broke any corner cases
8410 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8411 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8413 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
8414 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8416 if (overall_update_type
> UPDATE_TYPE_FAST
) {
8417 ret
= dm_atomic_get_state(state
, &dm_state
);
8421 ret
= do_aquire_global_lock(dev
, state
);
8425 #if defined(CONFIG_DRM_AMD_DC_DCN)
8426 if (!compute_mst_dsc_configs_for_state(state
, dm_state
->context
))
8429 ret
= dm_update_mst_vcpi_slots_for_dsc(state
, dm_state
->context
);
8435 * Perform validation of MST topology in the state:
8436 * We need to perform MST atomic check before calling
8437 * dc_validate_global_state(), or there is a chance
8438 * to get stuck in an infinite loop and hang eventually.
8440 ret
= drm_dp_mst_atomic_check(state
);
8444 if (dc_validate_global_state(dc
, dm_state
->context
, false) != DC_OK
) {
8450 * The commit is a fast update. Fast updates shouldn't change
8451 * the DC context, affect global validation, and can have their
8452 * commit work done in parallel with other commits not touching
8453 * the same resource. If we have a new DC context as part of
8454 * the DM atomic state from validation we need to free it and
8455 * retain the existing one instead.
8457 struct dm_atomic_state
*new_dm_state
, *old_dm_state
;
8459 new_dm_state
= dm_atomic_get_new_state(state
);
8460 old_dm_state
= dm_atomic_get_old_state(state
);
8462 if (new_dm_state
&& old_dm_state
) {
8463 if (new_dm_state
->context
)
8464 dc_release_state(new_dm_state
->context
);
8466 new_dm_state
->context
= old_dm_state
->context
;
8468 if (old_dm_state
->context
)
8469 dc_retain_state(old_dm_state
->context
);
8473 /* Store the overall update type for use later in atomic check. */
8474 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
8475 struct dm_crtc_state
*dm_new_crtc_state
=
8476 to_dm_crtc_state(new_crtc_state
);
8478 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
8481 /* Must be success */
8486 if (ret
== -EDEADLK
)
8487 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8488 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
8489 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8491 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
8496 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
8497 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
8500 bool capable
= false;
8502 if (amdgpu_dm_connector
->dc_link
&&
8503 dm_helpers_dp_read_dpcd(
8505 amdgpu_dm_connector
->dc_link
,
8506 DP_DOWN_STREAM_PORT_COUNT
,
8508 sizeof(dpcd_data
))) {
8509 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
8514 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
8518 bool edid_check_required
;
8519 struct detailed_timing
*timing
;
8520 struct detailed_non_pixel
*data
;
8521 struct detailed_data_monitor_range
*range
;
8522 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
8523 to_amdgpu_dm_connector(connector
);
8524 struct dm_connector_state
*dm_con_state
= NULL
;
8526 struct drm_device
*dev
= connector
->dev
;
8527 struct amdgpu_device
*adev
= dev
->dev_private
;
8528 bool freesync_capable
= false;
8530 if (!connector
->state
) {
8531 DRM_ERROR("%s - Connector has no state", __func__
);
8536 dm_con_state
= to_dm_connector_state(connector
->state
);
8538 amdgpu_dm_connector
->min_vfreq
= 0;
8539 amdgpu_dm_connector
->max_vfreq
= 0;
8540 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
8545 dm_con_state
= to_dm_connector_state(connector
->state
);
8547 edid_check_required
= false;
8548 if (!amdgpu_dm_connector
->dc_sink
) {
8549 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8552 if (!adev
->dm
.freesync_module
)
8555 * if edid non zero restrict freesync only for dp and edp
8558 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
8559 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
8560 edid_check_required
= is_dp_capable_without_timing_msa(
8562 amdgpu_dm_connector
);
8565 if (edid_check_required
== true && (edid
->version
> 1 ||
8566 (edid
->version
== 1 && edid
->revision
> 1))) {
8567 for (i
= 0; i
< 4; i
++) {
8569 timing
= &edid
->detailed_timings
[i
];
8570 data
= &timing
->data
.other_data
;
8571 range
= &data
->data
.range
;
8573 * Check if monitor has continuous frequency mode
8575 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
8578 * Check for flag range limits only. If flag == 1 then
8579 * no additional timing information provided.
8580 * Default GTF, GTF Secondary curve and CVT are not
8583 if (range
->flags
!= 1)
8586 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
8587 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
8588 amdgpu_dm_connector
->pixel_clock_mhz
=
8589 range
->pixel_clock_mhz
* 10;
8593 if (amdgpu_dm_connector
->max_vfreq
-
8594 amdgpu_dm_connector
->min_vfreq
> 10) {
8596 freesync_capable
= true;
8602 dm_con_state
->freesync_capable
= freesync_capable
;
8604 if (connector
->vrr_capable_property
)
8605 drm_connector_set_vrr_capable_property(connector
,
8609 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
)
8611 uint8_t dpcd_data
[EDP_PSR_RECEIVER_CAP_SIZE
];
8613 if (!(link
->connector_signal
& SIGNAL_TYPE_EDP
))
8615 if (link
->type
== dc_connection_none
)
8617 if (dm_helpers_dp_read_dpcd(NULL
, link
, DP_PSR_SUPPORT
,
8618 dpcd_data
, sizeof(dpcd_data
))) {
8619 link
->psr_feature_enabled
= dpcd_data
[0] ? true:false;
8620 DRM_INFO("PSR support:%d\n", link
->psr_feature_enabled
);
8625 * amdgpu_dm_link_setup_psr() - configure psr link
8626 * @stream: stream state
8628 * Return: true if success
8630 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
)
8632 struct dc_link
*link
= NULL
;
8633 struct psr_config psr_config
= {0};
8634 struct psr_context psr_context
= {0};
8635 struct dc
*dc
= NULL
;
8641 link
= stream
->link
;
8644 psr_config
.psr_version
= dc
->res_pool
->dmcu
->dmcu_version
.psr_version
;
8646 if (psr_config
.psr_version
> 0) {
8647 psr_config
.psr_exit_link_training_required
= 0x1;
8648 psr_config
.psr_frame_capture_indication_req
= 0;
8649 psr_config
.psr_rfb_setup_time
= 0x37;
8650 psr_config
.psr_sdp_transmit_line_num_deadline
= 0x20;
8651 psr_config
.allow_smu_optimizations
= 0x0;
8653 ret
= dc_link_setup_psr(link
, stream
, &psr_config
, &psr_context
);
8656 DRM_DEBUG_DRIVER("PSR link: %d\n", link
->psr_feature_enabled
);
8662 * amdgpu_dm_psr_enable() - enable psr f/w
8663 * @stream: stream state
8665 * Return: true if success
8667 bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
)
8669 struct dc_link
*link
= stream
->link
;
8670 unsigned int vsync_rate_hz
= 0;
8671 struct dc_static_screen_params params
= {0};
8672 /* Calculate number of static frames before generating interrupt to
8675 // Init fail safe of 2 frames static
8676 unsigned int num_frames_static
= 2;
8678 DRM_DEBUG_DRIVER("Enabling psr...\n");
8680 vsync_rate_hz
= div64_u64(div64_u64((
8681 stream
->timing
.pix_clk_100hz
* 100),
8682 stream
->timing
.v_total
),
8683 stream
->timing
.h_total
);
8686 * Calculate number of frames such that at least 30 ms of time has
8689 if (vsync_rate_hz
!= 0) {
8690 unsigned int frame_time_microsec
= 1000000 / vsync_rate_hz
;
8691 num_frames_static
= (30000 / frame_time_microsec
) + 1;
8694 params
.triggers
.cursor_update
= true;
8695 params
.triggers
.overlay_update
= true;
8696 params
.triggers
.surface_update
= true;
8697 params
.num_frames
= num_frames_static
;
8699 dc_stream_set_static_screen_params(link
->ctx
->dc
,
8703 return dc_link_set_psr_allow_active(link
, true, false);
8707 * amdgpu_dm_psr_disable() - disable psr f/w
8708 * @stream: stream state
8710 * Return: true if success
8712 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
)
8715 DRM_DEBUG_DRIVER("Disabling psr...\n");
8717 return dc_link_set_psr_allow_active(stream
->link
, false, true);