]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
743b9786
NK
33#include "dmub/inc/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
2200eb9e 97
a94d5569
DF
98#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 100
5ea23931
RL
101#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
8c7aea40
NK
104/* Number of bytes in PSP header for firmware. */
105#define PSP_HEADER_BYTES 0x100
106
107/* Number of bytes in PSP footer for firmware. */
108#define PSP_FOOTER_BYTES 0x100
109
b8592b48
LL
110/**
111 * DOC: overview
112 *
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
116 *
117 * The root control structure is &struct amdgpu_display_manager.
118 */
119
7578ecda
AD
120/* basic init/fini API */
121static int amdgpu_dm_init(struct amdgpu_device *adev);
122static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
1f6010a9
DF
124/*
125 * initializes drm_device display related structures, based on the information
7578ecda
AD
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
128 *
129 * Returns 0 on success
130 */
131static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132/* removes and deallocates the drm structures, created by the above function */
133static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
7578ecda 135static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 136 struct drm_plane *plane,
cc1fec57
NK
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
7578ecda
AD
139static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
144 uint32_t link_index,
145 struct amdgpu_encoder *amdgpu_encoder);
146static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
149
150static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
154 bool nonblock);
155
156static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
160
674e78ac
NK
161static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
7578ecda 163
8c322309
RL
164static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
4562236b
HW
170/*
171 * dm_vblank_get_counter
172 *
173 * @brief
174 * Get counter for number of vertical blanks
175 *
176 * @param
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
179 *
180 * @return
181 * Counter for vertical blanks
182 */
183static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184{
185 if (crtc >= adev->mode_info.num_crtc)
186 return 0;
187 else {
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 acrtc->base.state);
4562236b 191
da5c47f6
AG
192
193 if (acrtc_state->stream == NULL) {
0971c40e
HW
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 crtc);
4562236b
HW
196 return 0;
197 }
198
da5c47f6 199 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
200 }
201}
202
203static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 204 u32 *vbl, u32 *position)
4562236b 205{
81c50963
ST
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
4562236b
HW
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210 else {
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 acrtc->base.state);
4562236b 214
da5c47f6 215 if (acrtc_state->stream == NULL) {
0971c40e
HW
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 crtc);
4562236b
HW
218 return 0;
219 }
220
81c50963
ST
221 /*
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
224 */
da5c47f6 225 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
226 &v_blank_start,
227 &v_blank_end,
228 &h_position,
229 &v_position);
230
e806208d
AG
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
233 }
234
235 return 0;
236}
237
238static bool dm_is_idle(void *handle)
239{
240 /* XXX todo */
241 return true;
242}
243
244static int dm_wait_for_idle(void *handle)
245{
246 /* XXX todo */
247 return 0;
248}
249
250static bool dm_check_soft_reset(void *handle)
251{
252 return false;
253}
254
255static int dm_soft_reset(void *handle)
256{
257 /* XXX todo */
258 return 0;
259}
260
3ee6b26b
AD
261static struct amdgpu_crtc *
262get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 int otg_inst)
4562236b
HW
264{
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
268
4562236b
HW
269 if (otg_inst == -1) {
270 WARN_ON(1);
271 return adev->mode_info.crtcs[0];
272 }
273
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277 if (amdgpu_crtc->otg_inst == otg_inst)
278 return amdgpu_crtc;
279 }
280
281 return NULL;
282}
283
66b0c973
MK
284static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285{
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288}
289
b8e8c934
HW
290/**
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
293 *
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
296 */
4562236b
HW
297static void dm_pflip_high_irq(void *interrupt_params)
298{
4562236b
HW
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
302 unsigned long flags;
71bbe51a
MK
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 bool vrr_active;
4562236b
HW
307
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310 /* IRQ could occur when in initial stage */
1f6010a9 311 /* TODO work and BO cleanup */
4562236b
HW
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 return;
315 }
316
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
318
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
324 amdgpu_crtc);
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 return;
327 }
328
71bbe51a
MK
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
4562236b 332
71bbe51a
MK
333 if (!e)
334 WARN_ON(1);
1159898a 335
71bbe51a
MK
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 if (!vrr_active ||
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
347 */
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 349
71bbe51a
MK
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
352 */
353 if (e) {
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
358 }
359 } else if (e) {
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
366 *
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
371 */
372
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
376
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 e = NULL;
379 }
4562236b 380
fdd1fe57
MK
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
385 */
e3eff4b5
TZ
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 388
54f5499a 389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
71bbe51a
MK
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
4562236b
HW
395}
396
d2574c33
MK
397static void dm_vupdate_high_irq(void *interrupt_params)
398{
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
09aef2c4 403 unsigned long flags;
d2574c33
MK
404
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407 if (acrtc) {
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
7f2be468
LP
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 acrtc->crtc_id,
412 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
413
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
419 */
09aef2c4 420 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 421 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
422
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
429 acrtc_state->stream,
430 &acrtc_state->vrr_params);
431
432 dc_stream_adjust_vmin_vmax(
433 adev->dm.dc,
434 acrtc_state->stream,
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 }
438 }
d2574c33
MK
439 }
440}
441
b8e8c934
HW
442/**
443 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 444 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
445 *
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447 * event handler.
448 */
4562236b
HW
449static void dm_crtc_high_irq(void *interrupt_params)
450{
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
4562236b 453 struct amdgpu_crtc *acrtc;
180db303 454 struct dm_crtc_state *acrtc_state;
09aef2c4 455 unsigned long flags;
4562236b 456
b57de80a 457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
458 if (!acrtc)
459 return;
460
461 acrtc_state = to_dm_crtc_state(acrtc->base.state);
462
2b5aed9a
MK
463 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state),
465 acrtc_state->active_planes);
16f17eda 466
2346ef47
NK
467 /**
468 * Core vblank handling at start of front-porch is only possible
469 * in non-vrr mode, as only there vblank timestamping will give
470 * valid results while done in front-porch. Otherwise defer it
471 * to dm_vupdate_high_irq after end of front-porch.
472 */
473 if (!amdgpu_dm_vrr_active(acrtc_state))
474 drm_crtc_handle_vblank(&acrtc->base);
475
476 /**
477 * Following stuff must happen at start of vblank, for crc
478 * computation and below-the-range btr support in vrr mode.
479 */
16f17eda 480 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
481
482 /* BTR updates need to happen before VUPDATE on Vega and above. */
483 if (adev->family < AMDGPU_FAMILY_AI)
484 return;
16f17eda
LL
485
486 spin_lock_irqsave(&adev->ddev->event_lock, flags);
487
2346ef47 488 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
16f17eda 489 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
2346ef47
NK
490 mod_freesync_handle_v_update(adev->dm.freesync_module,
491 acrtc_state->stream,
492 &acrtc_state->vrr_params);
16f17eda 493
2346ef47
NK
494 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 &acrtc_state->vrr_params.adjust);
16f17eda
LL
496 }
497
2b5aed9a
MK
498 /*
499 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 * In that case, pageflip completion interrupts won't fire and pageflip
501 * completion events won't get delivered. Prevent this by sending
502 * pending pageflip events from here if a flip is still pending.
503 *
504 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 * avoid race conditions between flip programming and completion,
506 * which could cause too early flip completion events.
507 */
2346ef47
NK
508 if (adev->family >= AMDGPU_FAMILY_RV &&
509 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
2b5aed9a 510 acrtc_state->active_planes == 0) {
16f17eda
LL
511 if (acrtc->event) {
512 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 acrtc->event = NULL;
514 drm_crtc_vblank_put(&acrtc->base);
515 }
516 acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 }
518
519 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520}
521
4562236b
HW
522static int dm_set_clockgating_state(void *handle,
523 enum amd_clockgating_state state)
524{
525 return 0;
526}
527
528static int dm_set_powergating_state(void *handle,
529 enum amd_powergating_state state)
530{
531 return 0;
532}
533
534/* Prototypes of private functions */
535static int dm_early_init(void* handle);
536
a32e24b4 537/* Allocate memory for FBC compressed data */
3e332d3a 538static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 539{
3e332d3a
RL
540 struct drm_device *dev = connector->dev;
541 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 542 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
543 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 struct drm_display_mode *mode;
42e67c3b
RL
545 unsigned long max_size = 0;
546
547 if (adev->dm.dc->fbc_compressor == NULL)
548 return;
a32e24b4 549
3e332d3a 550 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
551 return;
552
3e332d3a
RL
553 if (compressor->bo_ptr)
554 return;
42e67c3b 555
42e67c3b 556
3e332d3a
RL
557 list_for_each_entry(mode, &connector->modes, head) {
558 if (max_size < mode->htotal * mode->vtotal)
559 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
560 }
561
562 if (max_size) {
563 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 564 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 565 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
566
567 if (r)
42e67c3b
RL
568 DRM_ERROR("DM: Failed to initialize FBC\n");
569 else {
570 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 }
573
a32e24b4
RL
574 }
575
576}
a32e24b4 577
6ce8f316
NK
578static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 int pipe, bool *enabled,
580 unsigned char *buf, int max_bytes)
581{
582 struct drm_device *dev = dev_get_drvdata(kdev);
583 struct amdgpu_device *adev = dev->dev_private;
584 struct drm_connector *connector;
585 struct drm_connector_list_iter conn_iter;
586 struct amdgpu_dm_connector *aconnector;
587 int ret = 0;
588
589 *enabled = false;
590
591 mutex_lock(&adev->dm.audio_lock);
592
593 drm_connector_list_iter_begin(dev, &conn_iter);
594 drm_for_each_connector_iter(connector, &conn_iter) {
595 aconnector = to_amdgpu_dm_connector(connector);
596 if (aconnector->audio_inst != port)
597 continue;
598
599 *enabled = true;
600 ret = drm_eld_size(connector->eld);
601 memcpy(buf, connector->eld, min(max_bytes, ret));
602
603 break;
604 }
605 drm_connector_list_iter_end(&conn_iter);
606
607 mutex_unlock(&adev->dm.audio_lock);
608
609 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610
611 return ret;
612}
613
614static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 .get_eld = amdgpu_dm_audio_component_get_eld,
616};
617
618static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 struct device *hda_kdev, void *data)
620{
621 struct drm_device *dev = dev_get_drvdata(kdev);
622 struct amdgpu_device *adev = dev->dev_private;
623 struct drm_audio_component *acomp = data;
624
625 acomp->ops = &amdgpu_dm_audio_component_ops;
626 acomp->dev = kdev;
627 adev->dm.audio_component = acomp;
628
629 return 0;
630}
631
632static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 struct device *hda_kdev, void *data)
634{
635 struct drm_device *dev = dev_get_drvdata(kdev);
636 struct amdgpu_device *adev = dev->dev_private;
637 struct drm_audio_component *acomp = data;
638
639 acomp->ops = NULL;
640 acomp->dev = NULL;
641 adev->dm.audio_component = NULL;
642}
643
644static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 .bind = amdgpu_dm_audio_component_bind,
646 .unbind = amdgpu_dm_audio_component_unbind,
647};
648
649static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650{
651 int i, ret;
652
653 if (!amdgpu_audio)
654 return 0;
655
656 adev->mode_info.audio.enabled = true;
657
658 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659
660 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 adev->mode_info.audio.pin[i].channels = -1;
662 adev->mode_info.audio.pin[i].rate = -1;
663 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 adev->mode_info.audio.pin[i].status_bits = 0;
665 adev->mode_info.audio.pin[i].category_code = 0;
666 adev->mode_info.audio.pin[i].connected = false;
667 adev->mode_info.audio.pin[i].id =
668 adev->dm.dc->res_pool->audios[i]->inst;
669 adev->mode_info.audio.pin[i].offset = 0;
670 }
671
672 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673 if (ret < 0)
674 return ret;
675
676 adev->dm.audio_registered = true;
677
678 return 0;
679}
680
681static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682{
683 if (!amdgpu_audio)
684 return;
685
686 if (!adev->mode_info.audio.enabled)
687 return;
688
689 if (adev->dm.audio_registered) {
690 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 adev->dm.audio_registered = false;
692 }
693
694 /* TODO: Disable audio? */
695
696 adev->mode_info.audio.enabled = false;
697}
698
699void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700{
701 struct drm_audio_component *acomp = adev->dm.audio_component;
702
703 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705
706 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707 pin, -1);
708 }
709}
710
743b9786
NK
711static int dm_dmub_hw_init(struct amdgpu_device *adev)
712{
743b9786
NK
713 const struct dmcub_firmware_header_v1_0 *hdr;
714 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 715 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
716 const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
719 struct dmub_srv_hw_params hw_params;
720 enum dmub_status status;
721 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 722 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
723 bool has_hw_support;
724
725 if (!dmub_srv)
726 /* DMUB isn't supported on the ASIC. */
727 return 0;
728
8c7aea40
NK
729 if (!fb_info) {
730 DRM_ERROR("No framebuffer info for DMUB service.\n");
731 return -EINVAL;
732 }
733
743b9786
NK
734 if (!dmub_fw) {
735 /* Firmware required for DMUB support. */
736 DRM_ERROR("No firmware provided for DMUB.\n");
737 return -EINVAL;
738 }
739
740 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 if (status != DMUB_STATUS_OK) {
742 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743 return -EINVAL;
744 }
745
746 if (!has_hw_support) {
747 DRM_INFO("DMUB unsupported on ASIC\n");
748 return 0;
749 }
750
751 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752
743b9786
NK
753 fw_inst_const = dmub_fw->data +
754 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 755 PSP_HEADER_BYTES;
743b9786
NK
756
757 fw_bss_data = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 le32_to_cpu(hdr->inst_const_bytes);
760
761 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
762 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764
765 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766
ddde28a5
HW
767 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 * amdgpu_ucode_init_single_fw will load dmub firmware
769 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 * will be done by dm_dmub_hw_init
771 */
772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774 fw_inst_const_size);
775 }
776
8c7aea40
NK
777 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
778 fw_bss_data_size);
ddde28a5
HW
779
780 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
781 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
782 adev->bios_size);
783
784 /* Reset regions that need to be reset. */
785 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
786 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
787
788 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
789 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
790
791 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
792 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
793
794 /* Initialize hardware. */
795 memset(&hw_params, 0, sizeof(hw_params));
796 hw_params.fb_base = adev->gmc.fb_start;
797 hw_params.fb_offset = adev->gmc.aper_base;
798
31a7f4bb
HW
799 /* backdoor load firmware and trigger dmub running */
800 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
801 hw_params.load_inst_const = true;
802
743b9786
NK
803 if (dmcu)
804 hw_params.psp_version = dmcu->psp_version;
805
8c7aea40
NK
806 for (i = 0; i < fb_info->num_fb; ++i)
807 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
808
809 status = dmub_srv_hw_init(dmub_srv, &hw_params);
810 if (status != DMUB_STATUS_OK) {
811 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
812 return -EINVAL;
813 }
814
815 /* Wait for firmware load to finish. */
816 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
817 if (status != DMUB_STATUS_OK)
818 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
819
820 /* Init DMCU and ABM if available. */
821 if (dmcu && abm) {
822 dmcu->funcs->dmcu_init(dmcu);
823 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
824 }
825
9a71c7d3
NK
826 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
827 if (!adev->dm.dc->ctx->dmub_srv) {
828 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
829 return -ENOMEM;
830 }
831
743b9786
NK
832 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
833 adev->dm.dmcub_fw_version);
834
835 return 0;
836}
837
7578ecda 838static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
839{
840 struct dc_init_data init_data;
52704fca
BL
841#ifdef CONFIG_DRM_AMD_DC_HDCP
842 struct dc_callback_init init_params;
843#endif
743b9786 844 int r;
52704fca 845
4562236b
HW
846 adev->dm.ddev = adev->ddev;
847 adev->dm.adev = adev;
848
4562236b
HW
849 /* Zero all the fields */
850 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
851#ifdef CONFIG_DRM_AMD_DC_HDCP
852 memset(&init_params, 0, sizeof(init_params));
853#endif
4562236b 854
674e78ac 855 mutex_init(&adev->dm.dc_lock);
6ce8f316 856 mutex_init(&adev->dm.audio_lock);
674e78ac 857
4562236b
HW
858 if(amdgpu_dm_irq_init(adev)) {
859 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
860 goto error;
861 }
862
863 init_data.asic_id.chip_family = adev->family;
864
2dc31ca1 865 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
866 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
867
770d13b1 868 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
869 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
870 init_data.asic_id.atombios_base_address =
871 adev->mode_info.atom_context->bios;
872
873 init_data.driver = adev;
874
875 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
876
877 if (!adev->dm.cgs_device) {
878 DRM_ERROR("amdgpu: failed to create cgs device.\n");
879 goto error;
880 }
881
882 init_data.cgs_device = adev->dm.cgs_device;
883
4562236b
HW
884 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
885
60fb100b
AD
886 switch (adev->asic_type) {
887 case CHIP_CARRIZO:
888 case CHIP_STONEY:
889 case CHIP_RAVEN:
fe3db437 890 case CHIP_RENOIR:
6e227308 891 init_data.flags.gpu_vm_support = true;
60fb100b
AD
892 break;
893 default:
894 break;
895 }
6e227308 896
04b94af4
AD
897 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
898 init_data.flags.fbc_support = true;
899
d99f38ae
AD
900 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
901 init_data.flags.multi_mon_pp_mclk_switch = true;
902
eaf56410
LL
903 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
904 init_data.flags.disable_fractional_pwm = true;
905
27eaa492 906 init_data.flags.power_down_display_on_boot = true;
78ad75f8 907
48321c3d 908 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 909
4562236b
HW
910 /* Display Core create. */
911 adev->dm.dc = dc_create(&init_data);
912
423788c7 913 if (adev->dm.dc) {
76121231 914 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 915 } else {
76121231 916 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
917 goto error;
918 }
4562236b 919
743b9786
NK
920 r = dm_dmub_hw_init(adev);
921 if (r) {
922 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
923 goto error;
924 }
925
bb6785c1
NK
926 dc_hardware_init(adev->dm.dc);
927
4562236b
HW
928 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
929 if (!adev->dm.freesync_module) {
930 DRM_ERROR(
931 "amdgpu: failed to initialize freesync_module.\n");
932 } else
f1ad2f5e 933 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
934 adev->dm.freesync_module);
935
e277adc5
LSL
936 amdgpu_dm_init_color_mod();
937
52704fca 938#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 939 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 940 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 941
96a3b32e
BL
942 if (!adev->dm.hdcp_workqueue)
943 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
944 else
945 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 946
96a3b32e
BL
947 dc_init_callbacks(adev->dm.dc, &init_params);
948 }
52704fca 949#endif
4562236b
HW
950 if (amdgpu_dm_initialize_drm_device(adev)) {
951 DRM_ERROR(
952 "amdgpu: failed to initialize sw for display support.\n");
953 goto error;
954 }
955
956 /* Update the actual used number of crtc */
957 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
958
959 /* TODO: Add_display_info? */
960
961 /* TODO use dynamic cursor width */
ce75805e
AG
962 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
963 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
964
965 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
966 DRM_ERROR(
967 "amdgpu: failed to initialize sw for display support.\n");
968 goto error;
969 }
970
f1ad2f5e 971 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
972
973 return 0;
974error:
975 amdgpu_dm_fini(adev);
976
59d0f396 977 return -EINVAL;
4562236b
HW
978}
979
7578ecda 980static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 981{
6ce8f316
NK
982 amdgpu_dm_audio_fini(adev);
983
4562236b 984 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 985
52704fca
BL
986#ifdef CONFIG_DRM_AMD_DC_HDCP
987 if (adev->dm.hdcp_workqueue) {
988 hdcp_destroy(adev->dm.hdcp_workqueue);
989 adev->dm.hdcp_workqueue = NULL;
990 }
991
992 if (adev->dm.dc)
993 dc_deinit_callbacks(adev->dm.dc);
994#endif
9a71c7d3
NK
995 if (adev->dm.dc->ctx->dmub_srv) {
996 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
997 adev->dm.dc->ctx->dmub_srv = NULL;
998 }
999
743b9786
NK
1000 if (adev->dm.dmub_bo)
1001 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1002 &adev->dm.dmub_bo_gpu_addr,
1003 &adev->dm.dmub_bo_cpu_addr);
52704fca 1004
c8bdf2b6
ED
1005 /* DC Destroy TODO: Replace destroy DAL */
1006 if (adev->dm.dc)
1007 dc_destroy(&adev->dm.dc);
4562236b
HW
1008 /*
1009 * TODO: pageflip, vlank interrupt
1010 *
1011 * amdgpu_dm_irq_fini(adev);
1012 */
1013
1014 if (adev->dm.cgs_device) {
1015 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1016 adev->dm.cgs_device = NULL;
1017 }
1018 if (adev->dm.freesync_module) {
1019 mod_freesync_destroy(adev->dm.freesync_module);
1020 adev->dm.freesync_module = NULL;
1021 }
674e78ac 1022
6ce8f316 1023 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1024 mutex_destroy(&adev->dm.dc_lock);
1025
4562236b
HW
1026 return;
1027}
1028
a94d5569 1029static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1030{
a7669aff 1031 const char *fw_name_dmcu = NULL;
a94d5569
DF
1032 int r;
1033 const struct dmcu_firmware_header_v1_0 *hdr;
1034
1035 switch(adev->asic_type) {
1036 case CHIP_BONAIRE:
1037 case CHIP_HAWAII:
1038 case CHIP_KAVERI:
1039 case CHIP_KABINI:
1040 case CHIP_MULLINS:
1041 case CHIP_TONGA:
1042 case CHIP_FIJI:
1043 case CHIP_CARRIZO:
1044 case CHIP_STONEY:
1045 case CHIP_POLARIS11:
1046 case CHIP_POLARIS10:
1047 case CHIP_POLARIS12:
1048 case CHIP_VEGAM:
1049 case CHIP_VEGA10:
1050 case CHIP_VEGA12:
1051 case CHIP_VEGA20:
476e955d 1052 case CHIP_NAVI10:
baebcf2e 1053 case CHIP_NAVI14:
30221ad8 1054 case CHIP_RENOIR:
a94d5569 1055 return 0;
5ea23931
RL
1056 case CHIP_NAVI12:
1057 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1058 break;
a94d5569 1059 case CHIP_RAVEN:
a7669aff
HW
1060 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1061 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1062 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1063 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1064 else
a7669aff 1065 return 0;
a94d5569
DF
1066 break;
1067 default:
1068 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1069 return -EINVAL;
a94d5569
DF
1070 }
1071
1072 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1073 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1074 return 0;
1075 }
1076
1077 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1078 if (r == -ENOENT) {
1079 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1080 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1081 adev->dm.fw_dmcu = NULL;
1082 return 0;
1083 }
1084 if (r) {
1085 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1086 fw_name_dmcu);
1087 return r;
1088 }
1089
1090 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1091 if (r) {
1092 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1093 fw_name_dmcu);
1094 release_firmware(adev->dm.fw_dmcu);
1095 adev->dm.fw_dmcu = NULL;
1096 return r;
1097 }
1098
1099 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1100 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1101 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1102 adev->firmware.fw_size +=
1103 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1104
1105 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1106 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1107 adev->firmware.fw_size +=
1108 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1109
ee6e89c0
DF
1110 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1111
a94d5569
DF
1112 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1113
4562236b
HW
1114 return 0;
1115}
1116
743b9786
NK
1117static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1118{
1119 struct amdgpu_device *adev = ctx;
1120
1121 return dm_read_reg(adev->dm.dc->ctx, address);
1122}
1123
1124static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1125 uint32_t value)
1126{
1127 struct amdgpu_device *adev = ctx;
1128
1129 return dm_write_reg(adev->dm.dc->ctx, address, value);
1130}
1131
1132static int dm_dmub_sw_init(struct amdgpu_device *adev)
1133{
1134 struct dmub_srv_create_params create_params;
8c7aea40
NK
1135 struct dmub_srv_region_params region_params;
1136 struct dmub_srv_region_info region_info;
1137 struct dmub_srv_fb_params fb_params;
1138 struct dmub_srv_fb_info *fb_info;
1139 struct dmub_srv *dmub_srv;
743b9786
NK
1140 const struct dmcub_firmware_header_v1_0 *hdr;
1141 const char *fw_name_dmub;
1142 enum dmub_asic dmub_asic;
1143 enum dmub_status status;
1144 int r;
1145
1146 switch (adev->asic_type) {
1147 case CHIP_RENOIR:
1148 dmub_asic = DMUB_ASIC_DCN21;
1149 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1150 break;
1151
1152 default:
1153 /* ASIC doesn't support DMUB. */
1154 return 0;
1155 }
1156
743b9786
NK
1157 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1158 if (r) {
1159 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1160 return 0;
1161 }
1162
1163 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1164 if (r) {
1165 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1166 return 0;
1167 }
1168
743b9786 1169 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1170
9a6ed547
NK
1171 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1172 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1173 AMDGPU_UCODE_ID_DMCUB;
1174 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1175 adev->dm.dmub_fw;
1176 adev->firmware.fw_size +=
1177 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1178
9a6ed547
NK
1179 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1180 adev->dm.dmcub_fw_version);
1181 }
1182
1183 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1184
8c7aea40
NK
1185 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1186 dmub_srv = adev->dm.dmub_srv;
1187
1188 if (!dmub_srv) {
1189 DRM_ERROR("Failed to allocate DMUB service!\n");
1190 return -ENOMEM;
1191 }
1192
1193 memset(&create_params, 0, sizeof(create_params));
1194 create_params.user_ctx = adev;
1195 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1196 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1197 create_params.asic = dmub_asic;
1198
1199 /* Create the DMUB service. */
1200 status = dmub_srv_create(dmub_srv, &create_params);
1201 if (status != DMUB_STATUS_OK) {
1202 DRM_ERROR("Error creating DMUB service: %d\n", status);
1203 return -EINVAL;
1204 }
1205
1206 /* Calculate the size of all the regions for the DMUB service. */
1207 memset(&region_params, 0, sizeof(region_params));
1208
1209 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1210 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1211 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1212 region_params.vbios_size = adev->bios_size;
1f0674fd
NK
1213 region_params.fw_bss_data =
1214 adev->dm.dmub_fw->data +
1215 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1216 le32_to_cpu(hdr->inst_const_bytes);
8c7aea40
NK
1217
1218 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1219 &region_info);
1220
1221 if (status != DMUB_STATUS_OK) {
1222 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1223 return -EINVAL;
1224 }
1225
1226 /*
1227 * Allocate a framebuffer based on the total size of all the regions.
1228 * TODO: Move this into GART.
1229 */
1230 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1231 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1232 &adev->dm.dmub_bo_gpu_addr,
1233 &adev->dm.dmub_bo_cpu_addr);
1234 if (r)
1235 return r;
1236
1237 /* Rebase the regions on the framebuffer address. */
1238 memset(&fb_params, 0, sizeof(fb_params));
1239 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1240 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1241 fb_params.region_info = &region_info;
1242
1243 adev->dm.dmub_fb_info =
1244 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1245 fb_info = adev->dm.dmub_fb_info;
1246
1247 if (!fb_info) {
1248 DRM_ERROR(
1249 "Failed to allocate framebuffer info for DMUB service!\n");
1250 return -ENOMEM;
1251 }
1252
1253 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1254 if (status != DMUB_STATUS_OK) {
1255 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1256 return -EINVAL;
1257 }
1258
743b9786
NK
1259 return 0;
1260}
1261
a94d5569
DF
1262static int dm_sw_init(void *handle)
1263{
1264 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1265 int r;
1266
1267 r = dm_dmub_sw_init(adev);
1268 if (r)
1269 return r;
a94d5569
DF
1270
1271 return load_dmcu_fw(adev);
1272}
1273
4562236b
HW
1274static int dm_sw_fini(void *handle)
1275{
a94d5569
DF
1276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1277
8c7aea40
NK
1278 kfree(adev->dm.dmub_fb_info);
1279 adev->dm.dmub_fb_info = NULL;
1280
743b9786
NK
1281 if (adev->dm.dmub_srv) {
1282 dmub_srv_destroy(adev->dm.dmub_srv);
1283 adev->dm.dmub_srv = NULL;
1284 }
1285
1286 if (adev->dm.dmub_fw) {
1287 release_firmware(adev->dm.dmub_fw);
1288 adev->dm.dmub_fw = NULL;
1289 }
1290
a94d5569
DF
1291 if(adev->dm.fw_dmcu) {
1292 release_firmware(adev->dm.fw_dmcu);
1293 adev->dm.fw_dmcu = NULL;
1294 }
1295
4562236b
HW
1296 return 0;
1297}
1298
7abcf6b5 1299static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1300{
c84dec2f 1301 struct amdgpu_dm_connector *aconnector;
4562236b 1302 struct drm_connector *connector;
f8d2d39e 1303 struct drm_connector_list_iter iter;
7abcf6b5 1304 int ret = 0;
4562236b 1305
f8d2d39e
LP
1306 drm_connector_list_iter_begin(dev, &iter);
1307 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1308 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1309 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1310 aconnector->mst_mgr.aux) {
f1ad2f5e 1311 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1312 aconnector,
1313 aconnector->base.base.id);
7abcf6b5
AG
1314
1315 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1316 if (ret < 0) {
1317 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1318 aconnector->dc_link->type =
1319 dc_connection_single;
1320 break;
7abcf6b5 1321 }
f8d2d39e 1322 }
4562236b 1323 }
f8d2d39e 1324 drm_connector_list_iter_end(&iter);
4562236b 1325
7abcf6b5
AG
1326 return ret;
1327}
1328
1329static int dm_late_init(void *handle)
1330{
42e67c3b 1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1332
bbf854dc
DF
1333 struct dmcu_iram_parameters params;
1334 unsigned int linear_lut[16];
1335 int i;
1336 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
96cb7cf1 1337 bool ret = false;
bbf854dc
DF
1338
1339 for (i = 0; i < 16; i++)
1340 linear_lut[i] = 0xFFFF * i / 15;
1341
1342 params.set = 0;
1343 params.backlight_ramping_start = 0xCCCC;
1344 params.backlight_ramping_reduction = 0xCCCCCCCC;
1345 params.backlight_lut_array_size = 16;
1346 params.backlight_lut_array = linear_lut;
1347
2ad0cdf9
AK
1348 /* Min backlight level after ABM reduction, Don't allow below 1%
1349 * 0xFFFF x 0.01 = 0x28F
1350 */
1351 params.min_abm_backlight = 0x28F;
1352
96cb7cf1 1353 /* todo will enable for navi10 */
1354 if (adev->asic_type <= CHIP_RAVEN) {
1355 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1356
96cb7cf1 1357 if (!ret)
1358 return -EINVAL;
1359 }
bbf854dc 1360
42e67c3b 1361 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1362}
1363
1364static void s3_handle_mst(struct drm_device *dev, bool suspend)
1365{
c84dec2f 1366 struct amdgpu_dm_connector *aconnector;
4562236b 1367 struct drm_connector *connector;
f8d2d39e 1368 struct drm_connector_list_iter iter;
fe7553be
LP
1369 struct drm_dp_mst_topology_mgr *mgr;
1370 int ret;
1371 bool need_hotplug = false;
4562236b 1372
f8d2d39e
LP
1373 drm_connector_list_iter_begin(dev, &iter);
1374 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1375 aconnector = to_amdgpu_dm_connector(connector);
1376 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1377 aconnector->mst_port)
1378 continue;
1379
1380 mgr = &aconnector->mst_mgr;
1381
1382 if (suspend) {
1383 drm_dp_mst_topology_mgr_suspend(mgr);
1384 } else {
6f85f738 1385 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1386 if (ret < 0) {
1387 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1388 need_hotplug = true;
1389 }
1390 }
4562236b 1391 }
f8d2d39e 1392 drm_connector_list_iter_end(&iter);
fe7553be
LP
1393
1394 if (need_hotplug)
1395 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1396}
1397
9340dfd3
HW
1398static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1399{
1400 struct smu_context *smu = &adev->smu;
1401 int ret = 0;
1402
1403 if (!is_support_sw_smu(adev))
1404 return 0;
1405
1406 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1407 * on window driver dc implementation.
1408 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1409 * should be passed to smu during boot up and resume from s3.
1410 * boot up: dc calculate dcn watermark clock settings within dc_create,
1411 * dcn20_resource_construct
1412 * then call pplib functions below to pass the settings to smu:
1413 * smu_set_watermarks_for_clock_ranges
1414 * smu_set_watermarks_table
1415 * navi10_set_watermarks_table
1416 * smu_write_watermarks_table
1417 *
1418 * For Renoir, clock settings of dcn watermark are also fixed values.
1419 * dc has implemented different flow for window driver:
1420 * dc_hardware_init / dc_set_power_state
1421 * dcn10_init_hw
1422 * notify_wm_ranges
1423 * set_wm_ranges
1424 * -- Linux
1425 * smu_set_watermarks_for_clock_ranges
1426 * renoir_set_watermarks_table
1427 * smu_write_watermarks_table
1428 *
1429 * For Linux,
1430 * dc_hardware_init -> amdgpu_dm_init
1431 * dc_set_power_state --> dm_resume
1432 *
1433 * therefore, this function apply to navi10/12/14 but not Renoir
1434 * *
1435 */
1436 switch(adev->asic_type) {
1437 case CHIP_NAVI10:
1438 case CHIP_NAVI14:
1439 case CHIP_NAVI12:
1440 break;
1441 default:
1442 return 0;
1443 }
1444
1445 mutex_lock(&smu->mutex);
1446
1447 /* pass data to smu controller */
1448 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1449 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1450 ret = smu_write_watermarks_table(smu);
1451
1452 if (ret) {
1453 mutex_unlock(&smu->mutex);
1454 DRM_ERROR("Failed to update WMTABLE!\n");
1455 return ret;
1456 }
1457 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1458 }
1459
1460 mutex_unlock(&smu->mutex);
1461
1462 return 0;
1463}
1464
b8592b48
LL
1465/**
1466 * dm_hw_init() - Initialize DC device
28d687ea 1467 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1468 *
1469 * Initialize the &struct amdgpu_display_manager device. This involves calling
1470 * the initializers of each DM component, then populating the struct with them.
1471 *
1472 * Although the function implies hardware initialization, both hardware and
1473 * software are initialized here. Splitting them out to their relevant init
1474 * hooks is a future TODO item.
1475 *
1476 * Some notable things that are initialized here:
1477 *
1478 * - Display Core, both software and hardware
1479 * - DC modules that we need (freesync and color management)
1480 * - DRM software states
1481 * - Interrupt sources and handlers
1482 * - Vblank support
1483 * - Debug FS entries, if enabled
1484 */
4562236b
HW
1485static int dm_hw_init(void *handle)
1486{
1487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 /* Create DAL display manager */
1489 amdgpu_dm_init(adev);
4562236b
HW
1490 amdgpu_dm_hpd_init(adev);
1491
4562236b
HW
1492 return 0;
1493}
1494
b8592b48
LL
1495/**
1496 * dm_hw_fini() - Teardown DC device
28d687ea 1497 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1498 *
1499 * Teardown components within &struct amdgpu_display_manager that require
1500 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1501 * were loaded. Also flush IRQ workqueues and disable them.
1502 */
4562236b
HW
1503static int dm_hw_fini(void *handle)
1504{
1505 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506
1507 amdgpu_dm_hpd_fini(adev);
1508
1509 amdgpu_dm_irq_fini(adev);
21de3396 1510 amdgpu_dm_fini(adev);
4562236b
HW
1511 return 0;
1512}
1513
1514static int dm_suspend(void *handle)
1515{
1516 struct amdgpu_device *adev = handle;
1517 struct amdgpu_display_manager *dm = &adev->dm;
1518 int ret = 0;
4562236b 1519
d2f0b53b
LHM
1520 WARN_ON(adev->dm.cached_state);
1521 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1522
4562236b
HW
1523 s3_handle_mst(adev->ddev, true);
1524
4562236b
HW
1525 amdgpu_dm_irq_suspend(adev);
1526
a3621485 1527
32f5062d 1528 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
1529
1530 return ret;
1531}
1532
1daf8c63
AD
1533static struct amdgpu_dm_connector *
1534amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1535 struct drm_crtc *crtc)
4562236b
HW
1536{
1537 uint32_t i;
c2cea706 1538 struct drm_connector_state *new_con_state;
4562236b
HW
1539 struct drm_connector *connector;
1540 struct drm_crtc *crtc_from_state;
1541
c2cea706
LSL
1542 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1543 crtc_from_state = new_con_state->crtc;
4562236b
HW
1544
1545 if (crtc_from_state == crtc)
c84dec2f 1546 return to_amdgpu_dm_connector(connector);
4562236b
HW
1547 }
1548
1549 return NULL;
1550}
1551
fbbdadf2
BL
1552static void emulated_link_detect(struct dc_link *link)
1553{
1554 struct dc_sink_init_data sink_init_data = { 0 };
1555 struct display_sink_capability sink_caps = { 0 };
1556 enum dc_edid_status edid_status;
1557 struct dc_context *dc_ctx = link->ctx;
1558 struct dc_sink *sink = NULL;
1559 struct dc_sink *prev_sink = NULL;
1560
1561 link->type = dc_connection_none;
1562 prev_sink = link->local_sink;
1563
1564 if (prev_sink != NULL)
1565 dc_sink_retain(prev_sink);
1566
1567 switch (link->connector_signal) {
1568 case SIGNAL_TYPE_HDMI_TYPE_A: {
1569 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1570 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1571 break;
1572 }
1573
1574 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1575 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1576 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1577 break;
1578 }
1579
1580 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1581 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1582 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1583 break;
1584 }
1585
1586 case SIGNAL_TYPE_LVDS: {
1587 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1588 sink_caps.signal = SIGNAL_TYPE_LVDS;
1589 break;
1590 }
1591
1592 case SIGNAL_TYPE_EDP: {
1593 sink_caps.transaction_type =
1594 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1595 sink_caps.signal = SIGNAL_TYPE_EDP;
1596 break;
1597 }
1598
1599 case SIGNAL_TYPE_DISPLAY_PORT: {
1600 sink_caps.transaction_type =
1601 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1602 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1603 break;
1604 }
1605
1606 default:
1607 DC_ERROR("Invalid connector type! signal:%d\n",
1608 link->connector_signal);
1609 return;
1610 }
1611
1612 sink_init_data.link = link;
1613 sink_init_data.sink_signal = sink_caps.signal;
1614
1615 sink = dc_sink_create(&sink_init_data);
1616 if (!sink) {
1617 DC_ERROR("Failed to create sink!\n");
1618 return;
1619 }
1620
dcd5fb82 1621 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1622 link->local_sink = sink;
1623
1624 edid_status = dm_helpers_read_local_edid(
1625 link->ctx,
1626 link,
1627 sink);
1628
1629 if (edid_status != EDID_OK)
1630 DC_ERROR("Failed to read EDID");
1631
1632}
1633
4562236b
HW
1634static int dm_resume(void *handle)
1635{
1636 struct amdgpu_device *adev = handle;
4562236b
HW
1637 struct drm_device *ddev = adev->ddev;
1638 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1639 struct amdgpu_dm_connector *aconnector;
4562236b 1640 struct drm_connector *connector;
f8d2d39e 1641 struct drm_connector_list_iter iter;
4562236b 1642 struct drm_crtc *crtc;
c2cea706 1643 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1644 struct dm_crtc_state *dm_new_crtc_state;
1645 struct drm_plane *plane;
1646 struct drm_plane_state *new_plane_state;
1647 struct dm_plane_state *dm_new_plane_state;
113b7a01 1648 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1649 enum dc_connection_type new_connection_type = dc_connection_none;
8c7aea40 1650 int i, r;
4562236b 1651
113b7a01
LL
1652 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1653 dc_release_state(dm_state->context);
1654 dm_state->context = dc_create_state(dm->dc);
1655 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1656 dc_resource_state_construct(dm->dc, dm_state->context);
1657
8c7aea40
NK
1658 /* Before powering on DC we need to re-initialize DMUB. */
1659 r = dm_dmub_hw_init(adev);
1660 if (r)
1661 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1662
a80aa93d
ML
1663 /* power on hardware */
1664 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1665
4562236b
HW
1666 /* program HPD filter */
1667 dc_resume(dm->dc);
1668
4562236b
HW
1669 /*
1670 * early enable HPD Rx IRQ, should be done before set mode as short
1671 * pulse interrupts are used for MST
1672 */
1673 amdgpu_dm_irq_resume_early(adev);
1674
d20ebea8 1675 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1676 s3_handle_mst(ddev, false);
1677
4562236b 1678 /* Do detection*/
f8d2d39e
LP
1679 drm_connector_list_iter_begin(ddev, &iter);
1680 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1681 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1682
1683 /*
1684 * this is the case when traversing through already created
1685 * MST connectors, should be skipped
1686 */
1687 if (aconnector->mst_port)
1688 continue;
1689
03ea364c 1690 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1691 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1692 DRM_ERROR("KMS: Failed to detect connector\n");
1693
1694 if (aconnector->base.force && new_connection_type == dc_connection_none)
1695 emulated_link_detect(aconnector->dc_link);
1696 else
1697 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1698
1699 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1700 aconnector->fake_enable = false;
1701
dcd5fb82
MF
1702 if (aconnector->dc_sink)
1703 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1704 aconnector->dc_sink = NULL;
1705 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1706 mutex_unlock(&aconnector->hpd_lock);
4562236b 1707 }
f8d2d39e 1708 drm_connector_list_iter_end(&iter);
4562236b 1709
1f6010a9 1710 /* Force mode set in atomic commit */
a80aa93d 1711 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1712 new_crtc_state->active_changed = true;
4f346e65 1713
fcb4019e
LSL
1714 /*
1715 * atomic_check is expected to create the dc states. We need to release
1716 * them here, since they were duplicated as part of the suspend
1717 * procedure.
1718 */
a80aa93d 1719 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1720 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1721 if (dm_new_crtc_state->stream) {
1722 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1723 dc_stream_release(dm_new_crtc_state->stream);
1724 dm_new_crtc_state->stream = NULL;
1725 }
1726 }
1727
a80aa93d 1728 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1729 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1730 if (dm_new_plane_state->dc_state) {
1731 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1732 dc_plane_state_release(dm_new_plane_state->dc_state);
1733 dm_new_plane_state->dc_state = NULL;
1734 }
1735 }
1736
2d1af6a1 1737 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1738
a80aa93d 1739 dm->cached_state = NULL;
0a214e2f 1740
9faa4237 1741 amdgpu_dm_irq_resume_late(adev);
4562236b 1742
9340dfd3
HW
1743 amdgpu_dm_smu_write_watermarks_table(adev);
1744
2d1af6a1 1745 return 0;
4562236b
HW
1746}
1747
b8592b48
LL
1748/**
1749 * DOC: DM Lifecycle
1750 *
1751 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1752 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1753 * the base driver's device list to be initialized and torn down accordingly.
1754 *
1755 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1756 */
1757
4562236b
HW
1758static const struct amd_ip_funcs amdgpu_dm_funcs = {
1759 .name = "dm",
1760 .early_init = dm_early_init,
7abcf6b5 1761 .late_init = dm_late_init,
4562236b
HW
1762 .sw_init = dm_sw_init,
1763 .sw_fini = dm_sw_fini,
1764 .hw_init = dm_hw_init,
1765 .hw_fini = dm_hw_fini,
1766 .suspend = dm_suspend,
1767 .resume = dm_resume,
1768 .is_idle = dm_is_idle,
1769 .wait_for_idle = dm_wait_for_idle,
1770 .check_soft_reset = dm_check_soft_reset,
1771 .soft_reset = dm_soft_reset,
1772 .set_clockgating_state = dm_set_clockgating_state,
1773 .set_powergating_state = dm_set_powergating_state,
1774};
1775
1776const struct amdgpu_ip_block_version dm_ip_block =
1777{
1778 .type = AMD_IP_BLOCK_TYPE_DCE,
1779 .major = 1,
1780 .minor = 0,
1781 .rev = 0,
1782 .funcs = &amdgpu_dm_funcs,
1783};
1784
ca3268c4 1785
b8592b48
LL
1786/**
1787 * DOC: atomic
1788 *
1789 * *WIP*
1790 */
0a323b84 1791
b3663f70 1792static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1793 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1794 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1795 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1796 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1797};
1798
1799static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1800 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1801};
1802
94562810
RS
1803static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1804{
1805 u32 max_cll, min_cll, max, min, q, r;
1806 struct amdgpu_dm_backlight_caps *caps;
1807 struct amdgpu_display_manager *dm;
1808 struct drm_connector *conn_base;
1809 struct amdgpu_device *adev;
1810 static const u8 pre_computed_values[] = {
1811 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1812 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1813
1814 if (!aconnector || !aconnector->dc_link)
1815 return;
1816
1817 conn_base = &aconnector->base;
1818 adev = conn_base->dev->dev_private;
1819 dm = &adev->dm;
1820 caps = &dm->backlight_caps;
1821 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1822 caps->aux_support = false;
1823 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1824 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1825
1826 if (caps->ext_caps->bits.oled == 1 ||
1827 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1828 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1829 caps->aux_support = true;
1830
1831 /* From the specification (CTA-861-G), for calculating the maximum
1832 * luminance we need to use:
1833 * Luminance = 50*2**(CV/32)
1834 * Where CV is a one-byte value.
1835 * For calculating this expression we may need float point precision;
1836 * to avoid this complexity level, we take advantage that CV is divided
1837 * by a constant. From the Euclids division algorithm, we know that CV
1838 * can be written as: CV = 32*q + r. Next, we replace CV in the
1839 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1840 * need to pre-compute the value of r/32. For pre-computing the values
1841 * We just used the following Ruby line:
1842 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1843 * The results of the above expressions can be verified at
1844 * pre_computed_values.
1845 */
1846 q = max_cll >> 5;
1847 r = max_cll % 32;
1848 max = (1 << q) * pre_computed_values[r];
1849
1850 // min luminance: maxLum * (CV/255)^2 / 100
1851 q = DIV_ROUND_CLOSEST(min_cll, 255);
1852 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1853
1854 caps->aux_max_input_signal = max;
1855 caps->aux_min_input_signal = min;
1856}
1857
97e51c16
HW
1858void amdgpu_dm_update_connector_after_detect(
1859 struct amdgpu_dm_connector *aconnector)
4562236b
HW
1860{
1861 struct drm_connector *connector = &aconnector->base;
1862 struct drm_device *dev = connector->dev;
b73a22d3 1863 struct dc_sink *sink;
4562236b
HW
1864
1865 /* MST handled by drm_mst framework */
1866 if (aconnector->mst_mgr.mst_state == true)
1867 return;
1868
1869
1870 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1871 if (sink)
1872 dc_sink_retain(sink);
4562236b 1873
1f6010a9
DF
1874 /*
1875 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1876 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1877 * Skip if already done during boot.
4562236b
HW
1878 */
1879 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1880 && aconnector->dc_em_sink) {
1881
1f6010a9
DF
1882 /*
1883 * For S3 resume with headless use eml_sink to fake stream
1884 * because on resume connector->sink is set to NULL
4562236b
HW
1885 */
1886 mutex_lock(&dev->mode_config.mutex);
1887
1888 if (sink) {
922aa1e1 1889 if (aconnector->dc_sink) {
98e6436d 1890 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1891 /*
1892 * retain and release below are used to
1893 * bump up refcount for sink because the link doesn't point
1894 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1895 * reshuffle by UMD we will get into unwanted dc_sink release
1896 */
dcd5fb82 1897 dc_sink_release(aconnector->dc_sink);
922aa1e1 1898 }
4562236b 1899 aconnector->dc_sink = sink;
dcd5fb82 1900 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1901 amdgpu_dm_update_freesync_caps(connector,
1902 aconnector->edid);
4562236b 1903 } else {
98e6436d 1904 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1905 if (!aconnector->dc_sink) {
4562236b 1906 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1907 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1908 }
4562236b
HW
1909 }
1910
1911 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1912
1913 if (sink)
1914 dc_sink_release(sink);
4562236b
HW
1915 return;
1916 }
1917
1918 /*
1919 * TODO: temporary guard to look for proper fix
1920 * if this sink is MST sink, we should not do anything
1921 */
dcd5fb82
MF
1922 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1923 dc_sink_release(sink);
4562236b 1924 return;
dcd5fb82 1925 }
4562236b
HW
1926
1927 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1928 /*
1929 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1930 * Do nothing!!
1931 */
f1ad2f5e 1932 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1933 aconnector->connector_id);
dcd5fb82
MF
1934 if (sink)
1935 dc_sink_release(sink);
4562236b
HW
1936 return;
1937 }
1938
f1ad2f5e 1939 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1940 aconnector->connector_id, aconnector->dc_sink, sink);
1941
1942 mutex_lock(&dev->mode_config.mutex);
1943
1f6010a9
DF
1944 /*
1945 * 1. Update status of the drm connector
1946 * 2. Send an event and let userspace tell us what to do
1947 */
4562236b 1948 if (sink) {
1f6010a9
DF
1949 /*
1950 * TODO: check if we still need the S3 mode update workaround.
1951 * If yes, put it here.
1952 */
4562236b 1953 if (aconnector->dc_sink)
98e6436d 1954 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
1955
1956 aconnector->dc_sink = sink;
dcd5fb82 1957 dc_sink_retain(aconnector->dc_sink);
900b3cb1 1958 if (sink->dc_edid.length == 0) {
4562236b 1959 aconnector->edid = NULL;
e6142dd5
AP
1960 if (aconnector->dc_link->aux_mode) {
1961 drm_dp_cec_unset_edid(
1962 &aconnector->dm_dp_aux.aux);
1963 }
900b3cb1 1964 } else {
4562236b 1965 aconnector->edid =
e6142dd5 1966 (struct edid *)sink->dc_edid.raw_edid;
4562236b 1967
c555f023 1968 drm_connector_update_edid_property(connector,
e6142dd5
AP
1969 aconnector->edid);
1970
1971 if (aconnector->dc_link->aux_mode)
1972 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1973 aconnector->edid);
4562236b 1974 }
e6142dd5 1975
98e6436d 1976 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 1977 update_connector_ext_caps(aconnector);
4562236b 1978 } else {
e86e8947 1979 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 1980 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 1981 drm_connector_update_edid_property(connector, NULL);
4562236b 1982 aconnector->num_modes = 0;
dcd5fb82 1983 dc_sink_release(aconnector->dc_sink);
4562236b 1984 aconnector->dc_sink = NULL;
5326c452 1985 aconnector->edid = NULL;
0c8620d6
BL
1986#ifdef CONFIG_DRM_AMD_DC_HDCP
1987 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1988 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1989 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1990#endif
4562236b
HW
1991 }
1992
1993 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1994
1995 if (sink)
1996 dc_sink_release(sink);
4562236b
HW
1997}
1998
1999static void handle_hpd_irq(void *param)
2000{
c84dec2f 2001 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2002 struct drm_connector *connector = &aconnector->base;
2003 struct drm_device *dev = connector->dev;
fbbdadf2 2004 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2005#ifdef CONFIG_DRM_AMD_DC_HDCP
2006 struct amdgpu_device *adev = dev->dev_private;
2007#endif
4562236b 2008
1f6010a9
DF
2009 /*
2010 * In case of failure or MST no need to update connector status or notify the OS
2011 * since (for MST case) MST does this in its own context.
4562236b
HW
2012 */
2013 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2014
0c8620d6 2015#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2016 if (adev->dm.hdcp_workqueue)
96a3b32e 2017 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2018#endif
2e0ac3d6
HW
2019 if (aconnector->fake_enable)
2020 aconnector->fake_enable = false;
2021
fbbdadf2
BL
2022 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2023 DRM_ERROR("KMS: Failed to detect connector\n");
2024
2025 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2026 emulated_link_detect(aconnector->dc_link);
2027
2028
2029 drm_modeset_lock_all(dev);
2030 dm_restore_drm_connector_state(dev, connector);
2031 drm_modeset_unlock_all(dev);
2032
2033 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2034 drm_kms_helper_hotplug_event(dev);
2035
2036 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2037 amdgpu_dm_update_connector_after_detect(aconnector);
2038
2039
2040 drm_modeset_lock_all(dev);
2041 dm_restore_drm_connector_state(dev, connector);
2042 drm_modeset_unlock_all(dev);
2043
2044 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2045 drm_kms_helper_hotplug_event(dev);
2046 }
2047 mutex_unlock(&aconnector->hpd_lock);
2048
2049}
2050
c84dec2f 2051static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2052{
2053 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2054 uint8_t dret;
2055 bool new_irq_handled = false;
2056 int dpcd_addr;
2057 int dpcd_bytes_to_read;
2058
2059 const int max_process_count = 30;
2060 int process_count = 0;
2061
2062 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2063
2064 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2065 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2066 /* DPCD 0x200 - 0x201 for downstream IRQ */
2067 dpcd_addr = DP_SINK_COUNT;
2068 } else {
2069 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2070 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2071 dpcd_addr = DP_SINK_COUNT_ESI;
2072 }
2073
2074 dret = drm_dp_dpcd_read(
2075 &aconnector->dm_dp_aux.aux,
2076 dpcd_addr,
2077 esi,
2078 dpcd_bytes_to_read);
2079
2080 while (dret == dpcd_bytes_to_read &&
2081 process_count < max_process_count) {
2082 uint8_t retry;
2083 dret = 0;
2084
2085 process_count++;
2086
f1ad2f5e 2087 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2088 /* handle HPD short pulse irq */
2089 if (aconnector->mst_mgr.mst_state)
2090 drm_dp_mst_hpd_irq(
2091 &aconnector->mst_mgr,
2092 esi,
2093 &new_irq_handled);
4562236b
HW
2094
2095 if (new_irq_handled) {
2096 /* ACK at DPCD to notify down stream */
2097 const int ack_dpcd_bytes_to_write =
2098 dpcd_bytes_to_read - 1;
2099
2100 for (retry = 0; retry < 3; retry++) {
2101 uint8_t wret;
2102
2103 wret = drm_dp_dpcd_write(
2104 &aconnector->dm_dp_aux.aux,
2105 dpcd_addr + 1,
2106 &esi[1],
2107 ack_dpcd_bytes_to_write);
2108 if (wret == ack_dpcd_bytes_to_write)
2109 break;
2110 }
2111
1f6010a9 2112 /* check if there is new irq to be handled */
4562236b
HW
2113 dret = drm_dp_dpcd_read(
2114 &aconnector->dm_dp_aux.aux,
2115 dpcd_addr,
2116 esi,
2117 dpcd_bytes_to_read);
2118
2119 new_irq_handled = false;
d4a6e8a9 2120 } else {
4562236b 2121 break;
d4a6e8a9 2122 }
4562236b
HW
2123 }
2124
2125 if (process_count == max_process_count)
f1ad2f5e 2126 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2127}
2128
2129static void handle_hpd_rx_irq(void *param)
2130{
c84dec2f 2131 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2132 struct drm_connector *connector = &aconnector->base;
2133 struct drm_device *dev = connector->dev;
53cbf65c 2134 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2135 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2136 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2137#ifdef CONFIG_DRM_AMD_DC_HDCP
2138 union hpd_irq_data hpd_irq_data;
2139 struct amdgpu_device *adev = dev->dev_private;
2140
2141 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2142#endif
4562236b 2143
1f6010a9
DF
2144 /*
2145 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2146 * conflict, after implement i2c helper, this mutex should be
2147 * retired.
2148 */
53cbf65c 2149 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2150 mutex_lock(&aconnector->hpd_lock);
2151
2a0f9270
BL
2152
2153#ifdef CONFIG_DRM_AMD_DC_HDCP
2154 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2155#else
4e18814e 2156 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2157#endif
4562236b
HW
2158 !is_mst_root_connector) {
2159 /* Downstream Port status changed. */
fbbdadf2
BL
2160 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2161 DRM_ERROR("KMS: Failed to detect connector\n");
2162
2163 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2164 emulated_link_detect(dc_link);
2165
2166 if (aconnector->fake_enable)
2167 aconnector->fake_enable = false;
2168
2169 amdgpu_dm_update_connector_after_detect(aconnector);
2170
2171
2172 drm_modeset_lock_all(dev);
2173 dm_restore_drm_connector_state(dev, connector);
2174 drm_modeset_unlock_all(dev);
2175
2176 drm_kms_helper_hotplug_event(dev);
2177 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2178
2179 if (aconnector->fake_enable)
2180 aconnector->fake_enable = false;
2181
4562236b
HW
2182 amdgpu_dm_update_connector_after_detect(aconnector);
2183
2184
2185 drm_modeset_lock_all(dev);
2186 dm_restore_drm_connector_state(dev, connector);
2187 drm_modeset_unlock_all(dev);
2188
2189 drm_kms_helper_hotplug_event(dev);
2190 }
2191 }
2a0f9270 2192#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2193 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2194 if (adev->dm.hdcp_workqueue)
2195 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2196 }
2a0f9270 2197#endif
4562236b 2198 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2199 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2200 dm_handle_hpd_rx_irq(aconnector);
2201
e86e8947
HV
2202 if (dc_link->type != dc_connection_mst_branch) {
2203 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2204 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2205 }
4562236b
HW
2206}
2207
2208static void register_hpd_handlers(struct amdgpu_device *adev)
2209{
2210 struct drm_device *dev = adev->ddev;
2211 struct drm_connector *connector;
c84dec2f 2212 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2213 const struct dc_link *dc_link;
2214 struct dc_interrupt_params int_params = {0};
2215
2216 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2217 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2218
2219 list_for_each_entry(connector,
2220 &dev->mode_config.connector_list, head) {
2221
c84dec2f 2222 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2223 dc_link = aconnector->dc_link;
2224
2225 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2226 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2227 int_params.irq_source = dc_link->irq_source_hpd;
2228
2229 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2230 handle_hpd_irq,
2231 (void *) aconnector);
2232 }
2233
2234 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2235
2236 /* Also register for DP short pulse (hpd_rx). */
2237 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2238 int_params.irq_source = dc_link->irq_source_hpd_rx;
2239
2240 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2241 handle_hpd_rx_irq,
2242 (void *) aconnector);
2243 }
2244 }
2245}
2246
2247/* Register IRQ sources and initialize IRQ callbacks */
2248static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2249{
2250 struct dc *dc = adev->dm.dc;
2251 struct common_irq_params *c_irq_params;
2252 struct dc_interrupt_params int_params = {0};
2253 int r;
2254 int i;
1ffdeca6 2255 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2256
84374725 2257 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2258 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2259
2260 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2261 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2262
1f6010a9
DF
2263 /*
2264 * Actions of amdgpu_irq_add_id():
4562236b
HW
2265 * 1. Register a set() function with base driver.
2266 * Base driver will call set() function to enable/disable an
2267 * interrupt in DC hardware.
2268 * 2. Register amdgpu_dm_irq_handler().
2269 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2270 * coming from DC hardware.
2271 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2272 * for acknowledging and handling. */
2273
b57de80a 2274 /* Use VBLANK interrupt */
e9029155 2275 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2276 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2277 if (r) {
2278 DRM_ERROR("Failed to add crtc irq id!\n");
2279 return r;
2280 }
2281
2282 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2283 int_params.irq_source =
3d761e79 2284 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2285
b57de80a 2286 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2287
2288 c_irq_params->adev = adev;
2289 c_irq_params->irq_src = int_params.irq_source;
2290
2291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 dm_crtc_high_irq, c_irq_params);
2293 }
2294
d2574c33
MK
2295 /* Use VUPDATE interrupt */
2296 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2297 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2298 if (r) {
2299 DRM_ERROR("Failed to add vupdate irq id!\n");
2300 return r;
2301 }
2302
2303 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2304 int_params.irq_source =
2305 dc_interrupt_to_irq_source(dc, i, 0);
2306
2307 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2308
2309 c_irq_params->adev = adev;
2310 c_irq_params->irq_src = int_params.irq_source;
2311
2312 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2313 dm_vupdate_high_irq, c_irq_params);
2314 }
2315
3d761e79 2316 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2317 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2318 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2319 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2320 if (r) {
2321 DRM_ERROR("Failed to add page flip irq id!\n");
2322 return r;
2323 }
2324
2325 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2326 int_params.irq_source =
2327 dc_interrupt_to_irq_source(dc, i, 0);
2328
2329 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2330
2331 c_irq_params->adev = adev;
2332 c_irq_params->irq_src = int_params.irq_source;
2333
2334 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2335 dm_pflip_high_irq, c_irq_params);
2336
2337 }
2338
2339 /* HPD */
2c8ad2d5
AD
2340 r = amdgpu_irq_add_id(adev, client_id,
2341 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2342 if (r) {
2343 DRM_ERROR("Failed to add hpd irq id!\n");
2344 return r;
2345 }
2346
2347 register_hpd_handlers(adev);
2348
2349 return 0;
2350}
2351
b86a1aa3 2352#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2353/* Register IRQ sources and initialize IRQ callbacks */
2354static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2355{
2356 struct dc *dc = adev->dm.dc;
2357 struct common_irq_params *c_irq_params;
2358 struct dc_interrupt_params int_params = {0};
2359 int r;
2360 int i;
2361
2362 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2363 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2364
1f6010a9
DF
2365 /*
2366 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2367 * 1. Register a set() function with base driver.
2368 * Base driver will call set() function to enable/disable an
2369 * interrupt in DC hardware.
2370 * 2. Register amdgpu_dm_irq_handler().
2371 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2372 * coming from DC hardware.
2373 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2374 * for acknowledging and handling.
1f6010a9 2375 */
ff5ef992
AD
2376
2377 /* Use VSTARTUP interrupt */
2378 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2379 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2380 i++) {
3760f76c 2381 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2382
2383 if (r) {
2384 DRM_ERROR("Failed to add crtc irq id!\n");
2385 return r;
2386 }
2387
2388 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2389 int_params.irq_source =
2390 dc_interrupt_to_irq_source(dc, i, 0);
2391
2392 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2393
2394 c_irq_params->adev = adev;
2395 c_irq_params->irq_src = int_params.irq_source;
2396
2346ef47
NK
2397 amdgpu_dm_irq_register_interrupt(
2398 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2399 }
2400
2401 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2402 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2403 * to trigger at end of each vblank, regardless of state of the lock,
2404 * matching DCE behaviour.
2405 */
2406 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2407 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2408 i++) {
2409 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2410
2411 if (r) {
2412 DRM_ERROR("Failed to add vupdate irq id!\n");
2413 return r;
2414 }
2415
2416 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2417 int_params.irq_source =
2418 dc_interrupt_to_irq_source(dc, i, 0);
2419
2420 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2421
2422 c_irq_params->adev = adev;
2423 c_irq_params->irq_src = int_params.irq_source;
2424
ff5ef992 2425 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2426 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2427 }
2428
ff5ef992
AD
2429 /* Use GRPH_PFLIP interrupt */
2430 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2431 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2432 i++) {
3760f76c 2433 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2434 if (r) {
2435 DRM_ERROR("Failed to add page flip irq id!\n");
2436 return r;
2437 }
2438
2439 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 int_params.irq_source =
2441 dc_interrupt_to_irq_source(dc, i, 0);
2442
2443 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2444
2445 c_irq_params->adev = adev;
2446 c_irq_params->irq_src = int_params.irq_source;
2447
2448 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449 dm_pflip_high_irq, c_irq_params);
2450
2451 }
2452
2453 /* HPD */
3760f76c 2454 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2455 &adev->hpd_irq);
2456 if (r) {
2457 DRM_ERROR("Failed to add hpd irq id!\n");
2458 return r;
2459 }
2460
2461 register_hpd_handlers(adev);
2462
2463 return 0;
2464}
2465#endif
2466
eb3dc897
NK
2467/*
2468 * Acquires the lock for the atomic state object and returns
2469 * the new atomic state.
2470 *
2471 * This should only be called during atomic check.
2472 */
2473static int dm_atomic_get_state(struct drm_atomic_state *state,
2474 struct dm_atomic_state **dm_state)
2475{
2476 struct drm_device *dev = state->dev;
2477 struct amdgpu_device *adev = dev->dev_private;
2478 struct amdgpu_display_manager *dm = &adev->dm;
2479 struct drm_private_state *priv_state;
eb3dc897
NK
2480
2481 if (*dm_state)
2482 return 0;
2483
eb3dc897
NK
2484 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2485 if (IS_ERR(priv_state))
2486 return PTR_ERR(priv_state);
2487
2488 *dm_state = to_dm_atomic_state(priv_state);
2489
2490 return 0;
2491}
2492
2493struct dm_atomic_state *
2494dm_atomic_get_new_state(struct drm_atomic_state *state)
2495{
2496 struct drm_device *dev = state->dev;
2497 struct amdgpu_device *adev = dev->dev_private;
2498 struct amdgpu_display_manager *dm = &adev->dm;
2499 struct drm_private_obj *obj;
2500 struct drm_private_state *new_obj_state;
2501 int i;
2502
2503 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2504 if (obj->funcs == dm->atomic_obj.funcs)
2505 return to_dm_atomic_state(new_obj_state);
2506 }
2507
2508 return NULL;
2509}
2510
2511struct dm_atomic_state *
2512dm_atomic_get_old_state(struct drm_atomic_state *state)
2513{
2514 struct drm_device *dev = state->dev;
2515 struct amdgpu_device *adev = dev->dev_private;
2516 struct amdgpu_display_manager *dm = &adev->dm;
2517 struct drm_private_obj *obj;
2518 struct drm_private_state *old_obj_state;
2519 int i;
2520
2521 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2522 if (obj->funcs == dm->atomic_obj.funcs)
2523 return to_dm_atomic_state(old_obj_state);
2524 }
2525
2526 return NULL;
2527}
2528
2529static struct drm_private_state *
2530dm_atomic_duplicate_state(struct drm_private_obj *obj)
2531{
2532 struct dm_atomic_state *old_state, *new_state;
2533
2534 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2535 if (!new_state)
2536 return NULL;
2537
2538 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2539
813d20dc
AW
2540 old_state = to_dm_atomic_state(obj->state);
2541
2542 if (old_state && old_state->context)
2543 new_state->context = dc_copy_state(old_state->context);
2544
eb3dc897
NK
2545 if (!new_state->context) {
2546 kfree(new_state);
2547 return NULL;
2548 }
2549
eb3dc897
NK
2550 return &new_state->base;
2551}
2552
2553static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2554 struct drm_private_state *state)
2555{
2556 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2557
2558 if (dm_state && dm_state->context)
2559 dc_release_state(dm_state->context);
2560
2561 kfree(dm_state);
2562}
2563
2564static struct drm_private_state_funcs dm_atomic_state_funcs = {
2565 .atomic_duplicate_state = dm_atomic_duplicate_state,
2566 .atomic_destroy_state = dm_atomic_destroy_state,
2567};
2568
4562236b
HW
2569static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2570{
eb3dc897 2571 struct dm_atomic_state *state;
4562236b
HW
2572 int r;
2573
2574 adev->mode_info.mode_config_initialized = true;
2575
4562236b 2576 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2577 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2578
2579 adev->ddev->mode_config.max_width = 16384;
2580 adev->ddev->mode_config.max_height = 16384;
2581
2582 adev->ddev->mode_config.preferred_depth = 24;
2583 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2584 /* indicates support for immediate flip */
4562236b
HW
2585 adev->ddev->mode_config.async_page_flip = true;
2586
770d13b1 2587 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2588
eb3dc897
NK
2589 state = kzalloc(sizeof(*state), GFP_KERNEL);
2590 if (!state)
2591 return -ENOMEM;
2592
813d20dc 2593 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2594 if (!state->context) {
2595 kfree(state);
2596 return -ENOMEM;
2597 }
2598
2599 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2600
8c1a765b
DA
2601 drm_atomic_private_obj_init(adev->ddev,
2602 &adev->dm.atomic_obj,
eb3dc897
NK
2603 &state->base,
2604 &dm_atomic_state_funcs);
2605
3dc9b1ce 2606 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2607 if (r)
2608 return r;
2609
6ce8f316
NK
2610 r = amdgpu_dm_audio_init(adev);
2611 if (r)
2612 return r;
2613
4562236b
HW
2614 return 0;
2615}
2616
206bbafe
DF
2617#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2618#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2619#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2620
4562236b
HW
2621#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2622 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2623
206bbafe
DF
2624static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2625{
2626#if defined(CONFIG_ACPI)
2627 struct amdgpu_dm_backlight_caps caps;
2628
2629 if (dm->backlight_caps.caps_valid)
2630 return;
2631
2632 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2633 if (caps.caps_valid) {
94562810
RS
2634 dm->backlight_caps.caps_valid = true;
2635 if (caps.aux_support)
2636 return;
206bbafe
DF
2637 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2638 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2639 } else {
2640 dm->backlight_caps.min_input_signal =
2641 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2642 dm->backlight_caps.max_input_signal =
2643 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2644 }
2645#else
94562810
RS
2646 if (dm->backlight_caps.aux_support)
2647 return;
2648
8bcbc9ef
DF
2649 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2650 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2651#endif
2652}
2653
94562810
RS
2654static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2655{
2656 bool rc;
2657
2658 if (!link)
2659 return 1;
2660
2661 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2662 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2663
2664 return rc ? 0 : 1;
2665}
2666
2667static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2668 const uint32_t user_brightness)
2669{
2670 u32 min, max, conversion_pace;
2671 u32 brightness = user_brightness;
2672
2673 if (!caps)
2674 goto out;
2675
2676 if (!caps->aux_support) {
2677 max = caps->max_input_signal;
2678 min = caps->min_input_signal;
2679 /*
2680 * The brightness input is in the range 0-255
2681 * It needs to be rescaled to be between the
2682 * requested min and max input signal
2683 * It also needs to be scaled up by 0x101 to
2684 * match the DC interface which has a range of
2685 * 0 to 0xffff
2686 */
2687 conversion_pace = 0x101;
2688 brightness =
2689 user_brightness
2690 * conversion_pace
2691 * (max - min)
2692 / AMDGPU_MAX_BL_LEVEL
2693 + min * conversion_pace;
2694 } else {
2695 /* TODO
2696 * We are doing a linear interpolation here, which is OK but
2697 * does not provide the optimal result. We probably want
2698 * something close to the Perceptual Quantizer (PQ) curve.
2699 */
2700 max = caps->aux_max_input_signal;
2701 min = caps->aux_min_input_signal;
2702
2703 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2704 + user_brightness * max;
2705 // Multiple the value by 1000 since we use millinits
2706 brightness *= 1000;
2707 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2708 }
2709
2710out:
2711 return brightness;
2712}
2713
4562236b
HW
2714static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2715{
2716 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2717 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2718 struct dc_link *link = NULL;
2719 u32 brightness;
2720 bool rc;
4562236b 2721
206bbafe
DF
2722 amdgpu_dm_update_backlight_caps(dm);
2723 caps = dm->backlight_caps;
94562810
RS
2724
2725 link = (struct dc_link *)dm->backlight_link;
2726
2727 brightness = convert_brightness(&caps, bd->props.brightness);
2728 // Change brightness based on AUX property
2729 if (caps.aux_support)
2730 return set_backlight_via_aux(link, brightness);
2731
2732 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2733
2734 return rc ? 0 : 1;
4562236b
HW
2735}
2736
2737static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2738{
620a0d27
DF
2739 struct amdgpu_display_manager *dm = bl_get_data(bd);
2740 int ret = dc_link_get_backlight_level(dm->backlight_link);
2741
2742 if (ret == DC_ERROR_UNEXPECTED)
2743 return bd->props.brightness;
2744 return ret;
4562236b
HW
2745}
2746
2747static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2748 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2749 .get_brightness = amdgpu_dm_backlight_get_brightness,
2750 .update_status = amdgpu_dm_backlight_update_status,
2751};
2752
7578ecda
AD
2753static void
2754amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2755{
2756 char bl_name[16];
2757 struct backlight_properties props = { 0 };
2758
206bbafe
DF
2759 amdgpu_dm_update_backlight_caps(dm);
2760
4562236b 2761 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2762 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2763 props.type = BACKLIGHT_RAW;
2764
2765 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2766 dm->adev->ddev->primary->index);
2767
2768 dm->backlight_dev = backlight_device_register(bl_name,
2769 dm->adev->ddev->dev,
2770 dm,
2771 &amdgpu_dm_backlight_ops,
2772 &props);
2773
74baea42 2774 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2775 DRM_ERROR("DM: Backlight registration failed!\n");
2776 else
f1ad2f5e 2777 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2778}
2779
2780#endif
2781
df534fff 2782static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2783 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2784 enum drm_plane_type plane_type,
2785 const struct dc_plane_cap *plane_cap)
df534fff 2786{
f180b4bc 2787 struct drm_plane *plane;
df534fff
S
2788 unsigned long possible_crtcs;
2789 int ret = 0;
2790
f180b4bc 2791 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
2792 if (!plane) {
2793 DRM_ERROR("KMS: Failed to allocate plane\n");
2794 return -ENOMEM;
2795 }
b2fddb13 2796 plane->type = plane_type;
df534fff
S
2797
2798 /*
b2fddb13
NK
2799 * HACK: IGT tests expect that the primary plane for a CRTC
2800 * can only have one possible CRTC. Only expose support for
2801 * any CRTC if they're not going to be used as a primary plane
2802 * for a CRTC - like overlay or underlay planes.
df534fff
S
2803 */
2804 possible_crtcs = 1 << plane_id;
2805 if (plane_id >= dm->dc->caps.max_streams)
2806 possible_crtcs = 0xff;
2807
cc1fec57 2808 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
2809
2810 if (ret) {
2811 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 2812 kfree(plane);
df534fff
S
2813 return ret;
2814 }
2815
54087768
NK
2816 if (mode_info)
2817 mode_info->planes[plane_id] = plane;
2818
df534fff
S
2819 return ret;
2820}
2821
89fc8d4e
HW
2822
2823static void register_backlight_device(struct amdgpu_display_manager *dm,
2824 struct dc_link *link)
2825{
2826#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2827 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2828
2829 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2830 link->type != dc_connection_none) {
1f6010a9
DF
2831 /*
2832 * Event if registration failed, we should continue with
89fc8d4e
HW
2833 * DM initialization because not having a backlight control
2834 * is better then a black screen.
2835 */
2836 amdgpu_dm_register_backlight_device(dm);
2837
2838 if (dm->backlight_dev)
2839 dm->backlight_link = link;
2840 }
2841#endif
2842}
2843
2844
1f6010a9
DF
2845/*
2846 * In this architecture, the association
4562236b
HW
2847 * connector -> encoder -> crtc
2848 * id not really requried. The crtc and connector will hold the
2849 * display_index as an abstraction to use with DAL component
2850 *
2851 * Returns 0 on success
2852 */
7578ecda 2853static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2854{
2855 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2856 int32_t i;
c84dec2f 2857 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2858 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2859 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2860 uint32_t link_cnt;
cc1fec57 2861 int32_t primary_planes;
fbbdadf2 2862 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 2863 const struct dc_plane_cap *plane;
4562236b
HW
2864
2865 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2866 if (amdgpu_dm_mode_config_init(dm->adev)) {
2867 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2868 return -EINVAL;
4562236b
HW
2869 }
2870
b2fddb13
NK
2871 /* There is one primary plane per CRTC */
2872 primary_planes = dm->dc->caps.max_streams;
54087768 2873 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2874
b2fddb13
NK
2875 /*
2876 * Initialize primary planes, implicit planes for legacy IOCTLS.
2877 * Order is reversed to match iteration order in atomic check.
2878 */
2879 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
2880 plane = &dm->dc->caps.planes[i];
2881
b2fddb13 2882 if (initialize_plane(dm, mode_info, i,
cc1fec57 2883 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 2884 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2885 goto fail;
d4e13b0d 2886 }
df534fff 2887 }
92f3ac40 2888
0d579c7e
NK
2889 /*
2890 * Initialize overlay planes, index starting after primary planes.
2891 * These planes have a higher DRM index than the primary planes since
2892 * they should be considered as having a higher z-order.
2893 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
2894 *
2895 * Only support DCN for now, and only expose one so we don't encourage
2896 * userspace to use up all the pipes.
0d579c7e 2897 */
cc1fec57
NK
2898 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2899 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2900
2901 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2902 continue;
2903
2904 if (!plane->blends_with_above || !plane->blends_with_below)
2905 continue;
2906
ea36ad34 2907 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
2908 continue;
2909
54087768 2910 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 2911 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 2912 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2913 goto fail;
d4e13b0d 2914 }
cc1fec57
NK
2915
2916 /* Only create one overlay plane. */
2917 break;
d4e13b0d 2918 }
4562236b 2919
d4e13b0d 2920 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2921 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2922 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2923 goto fail;
4562236b 2924 }
4562236b 2925
ab2541b6 2926 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2927
2928 /* loops over all connectors on the board */
2929 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2930 struct dc_link *link = NULL;
4562236b
HW
2931
2932 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2933 DRM_ERROR(
2934 "KMS: Cannot support more than %d display indexes\n",
2935 AMDGPU_DM_MAX_DISPLAY_INDEX);
2936 continue;
2937 }
2938
2939 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2940 if (!aconnector)
cd8a2ae8 2941 goto fail;
4562236b
HW
2942
2943 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2944 if (!aencoder)
cd8a2ae8 2945 goto fail;
4562236b
HW
2946
2947 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2948 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2949 goto fail;
4562236b
HW
2950 }
2951
2952 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2953 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2954 goto fail;
4562236b
HW
2955 }
2956
89fc8d4e
HW
2957 link = dc_get_link_at_index(dm->dc, i);
2958
fbbdadf2
BL
2959 if (!dc_link_detect_sink(link, &new_connection_type))
2960 DRM_ERROR("KMS: Failed to detect connector\n");
2961
2962 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2963 emulated_link_detect(link);
2964 amdgpu_dm_update_connector_after_detect(aconnector);
2965
2966 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2967 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 2968 register_backlight_device(dm, link);
397a9bc5
RL
2969 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2970 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
2971 }
2972
2973
4562236b
HW
2974 }
2975
2976 /* Software is initialized. Now we can register interrupt handlers. */
2977 switch (adev->asic_type) {
2978 case CHIP_BONAIRE:
2979 case CHIP_HAWAII:
cd4b356f
AD
2980 case CHIP_KAVERI:
2981 case CHIP_KABINI:
2982 case CHIP_MULLINS:
4562236b
HW
2983 case CHIP_TONGA:
2984 case CHIP_FIJI:
2985 case CHIP_CARRIZO:
2986 case CHIP_STONEY:
2987 case CHIP_POLARIS11:
2988 case CHIP_POLARIS10:
b264d345 2989 case CHIP_POLARIS12:
7737de91 2990 case CHIP_VEGAM:
2c8ad2d5 2991 case CHIP_VEGA10:
2325ff30 2992 case CHIP_VEGA12:
1fe6bf2f 2993 case CHIP_VEGA20:
4562236b
HW
2994 if (dce110_register_irq_handlers(dm->adev)) {
2995 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2996 goto fail;
4562236b
HW
2997 }
2998 break;
b86a1aa3 2999#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3000 case CHIP_RAVEN:
fbd2afe5 3001 case CHIP_NAVI12:
476e955d 3002 case CHIP_NAVI10:
fce651e3 3003 case CHIP_NAVI14:
30221ad8 3004 case CHIP_RENOIR:
ff5ef992
AD
3005 if (dcn10_register_irq_handlers(dm->adev)) {
3006 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3007 goto fail;
ff5ef992
AD
3008 }
3009 break;
3010#endif
4562236b 3011 default:
e63f8673 3012 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3013 goto fail;
4562236b
HW
3014 }
3015
1bc460a4
HW
3016 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3017 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3018
2d673560
NK
3019 /* No userspace support. */
3020 dm->dc->debug.disable_tri_buf = true;
3021
4562236b 3022 return 0;
cd8a2ae8 3023fail:
4562236b 3024 kfree(aencoder);
4562236b 3025 kfree(aconnector);
54087768 3026
59d0f396 3027 return -EINVAL;
4562236b
HW
3028}
3029
7578ecda 3030static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3031{
3032 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3033 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3034 return;
3035}
3036
3037/******************************************************************************
3038 * amdgpu_display_funcs functions
3039 *****************************************************************************/
3040
1f6010a9 3041/*
4562236b
HW
3042 * dm_bandwidth_update - program display watermarks
3043 *
3044 * @adev: amdgpu_device pointer
3045 *
3046 * Calculate and program the display watermarks and line buffer allocation.
3047 */
3048static void dm_bandwidth_update(struct amdgpu_device *adev)
3049{
49c07a99 3050 /* TODO: implement later */
4562236b
HW
3051}
3052
39cc5be2 3053static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3054 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3055 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3056 .backlight_set_level = NULL, /* never called for DC */
3057 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3058 .hpd_sense = NULL,/* called unconditionally */
3059 .hpd_set_polarity = NULL, /* called unconditionally */
3060 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3061 .page_flip_get_scanoutpos =
3062 dm_crtc_get_scanoutpos,/* called unconditionally */
3063 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3064 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3065};
3066
3067#if defined(CONFIG_DEBUG_KERNEL_DC)
3068
3ee6b26b
AD
3069static ssize_t s3_debug_store(struct device *device,
3070 struct device_attribute *attr,
3071 const char *buf,
3072 size_t count)
4562236b
HW
3073{
3074 int ret;
3075 int s3_state;
ef1de361 3076 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3077 struct amdgpu_device *adev = drm_dev->dev_private;
3078
3079 ret = kstrtoint(buf, 0, &s3_state);
3080
3081 if (ret == 0) {
3082 if (s3_state) {
3083 dm_resume(adev);
4562236b
HW
3084 drm_kms_helper_hotplug_event(adev->ddev);
3085 } else
3086 dm_suspend(adev);
3087 }
3088
3089 return ret == 0 ? count : 0;
3090}
3091
3092DEVICE_ATTR_WO(s3_debug);
3093
3094#endif
3095
3096static int dm_early_init(void *handle)
3097{
3098 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3099
4562236b
HW
3100 switch (adev->asic_type) {
3101 case CHIP_BONAIRE:
3102 case CHIP_HAWAII:
3103 adev->mode_info.num_crtc = 6;
3104 adev->mode_info.num_hpd = 6;
3105 adev->mode_info.num_dig = 6;
4562236b 3106 break;
cd4b356f
AD
3107 case CHIP_KAVERI:
3108 adev->mode_info.num_crtc = 4;
3109 adev->mode_info.num_hpd = 6;
3110 adev->mode_info.num_dig = 7;
cd4b356f
AD
3111 break;
3112 case CHIP_KABINI:
3113 case CHIP_MULLINS:
3114 adev->mode_info.num_crtc = 2;
3115 adev->mode_info.num_hpd = 6;
3116 adev->mode_info.num_dig = 6;
cd4b356f 3117 break;
4562236b
HW
3118 case CHIP_FIJI:
3119 case CHIP_TONGA:
3120 adev->mode_info.num_crtc = 6;
3121 adev->mode_info.num_hpd = 6;
3122 adev->mode_info.num_dig = 7;
4562236b
HW
3123 break;
3124 case CHIP_CARRIZO:
3125 adev->mode_info.num_crtc = 3;
3126 adev->mode_info.num_hpd = 6;
3127 adev->mode_info.num_dig = 9;
4562236b
HW
3128 break;
3129 case CHIP_STONEY:
3130 adev->mode_info.num_crtc = 2;
3131 adev->mode_info.num_hpd = 6;
3132 adev->mode_info.num_dig = 9;
4562236b
HW
3133 break;
3134 case CHIP_POLARIS11:
b264d345 3135 case CHIP_POLARIS12:
4562236b
HW
3136 adev->mode_info.num_crtc = 5;
3137 adev->mode_info.num_hpd = 5;
3138 adev->mode_info.num_dig = 5;
4562236b
HW
3139 break;
3140 case CHIP_POLARIS10:
7737de91 3141 case CHIP_VEGAM:
4562236b
HW
3142 adev->mode_info.num_crtc = 6;
3143 adev->mode_info.num_hpd = 6;
3144 adev->mode_info.num_dig = 6;
4562236b 3145 break;
2c8ad2d5 3146 case CHIP_VEGA10:
2325ff30 3147 case CHIP_VEGA12:
1fe6bf2f 3148 case CHIP_VEGA20:
2c8ad2d5
AD
3149 adev->mode_info.num_crtc = 6;
3150 adev->mode_info.num_hpd = 6;
3151 adev->mode_info.num_dig = 6;
3152 break;
b86a1aa3 3153#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3154 case CHIP_RAVEN:
3155 adev->mode_info.num_crtc = 4;
3156 adev->mode_info.num_hpd = 4;
3157 adev->mode_info.num_dig = 4;
ff5ef992 3158 break;
476e955d 3159#endif
476e955d 3160 case CHIP_NAVI10:
fbd2afe5 3161 case CHIP_NAVI12:
476e955d
HW
3162 adev->mode_info.num_crtc = 6;
3163 adev->mode_info.num_hpd = 6;
3164 adev->mode_info.num_dig = 6;
3165 break;
fce651e3
BL
3166 case CHIP_NAVI14:
3167 adev->mode_info.num_crtc = 5;
3168 adev->mode_info.num_hpd = 5;
3169 adev->mode_info.num_dig = 5;
3170 break;
30221ad8
BL
3171 case CHIP_RENOIR:
3172 adev->mode_info.num_crtc = 4;
3173 adev->mode_info.num_hpd = 4;
3174 adev->mode_info.num_dig = 4;
3175 break;
4562236b 3176 default:
e63f8673 3177 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3178 return -EINVAL;
3179 }
3180
c8dd5715
MD
3181 amdgpu_dm_set_irq_funcs(adev);
3182
39cc5be2
AD
3183 if (adev->mode_info.funcs == NULL)
3184 adev->mode_info.funcs = &dm_display_funcs;
3185
1f6010a9
DF
3186 /*
3187 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3188 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3189 * amdgpu_device_init()
3190 */
4562236b
HW
3191#if defined(CONFIG_DEBUG_KERNEL_DC)
3192 device_create_file(
3193 adev->ddev->dev,
3194 &dev_attr_s3_debug);
3195#endif
3196
3197 return 0;
3198}
3199
9b690ef3 3200static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3201 struct dc_stream_state *new_stream,
3202 struct dc_stream_state *old_stream)
9b690ef3 3203{
e7b07cee
HW
3204 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3205 return false;
3206
3207 if (!crtc_state->enable)
3208 return false;
3209
3210 return crtc_state->active;
3211}
3212
3213static bool modereset_required(struct drm_crtc_state *crtc_state)
3214{
3215 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3216 return false;
3217
3218 return !crtc_state->enable || !crtc_state->active;
3219}
3220
7578ecda 3221static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3222{
3223 drm_encoder_cleanup(encoder);
3224 kfree(encoder);
3225}
3226
3227static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3228 .destroy = amdgpu_dm_encoder_destroy,
3229};
3230
e7b07cee 3231
695af5f9
NK
3232static int fill_dc_scaling_info(const struct drm_plane_state *state,
3233 struct dc_scaling_info *scaling_info)
e7b07cee 3234{
6491f0c0 3235 int scale_w, scale_h;
e7b07cee 3236
695af5f9 3237 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3238
695af5f9
NK
3239 /* Source is fixed 16.16 but we ignore mantissa for now... */
3240 scaling_info->src_rect.x = state->src_x >> 16;
3241 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3242
695af5f9
NK
3243 scaling_info->src_rect.width = state->src_w >> 16;
3244 if (scaling_info->src_rect.width == 0)
3245 return -EINVAL;
3246
3247 scaling_info->src_rect.height = state->src_h >> 16;
3248 if (scaling_info->src_rect.height == 0)
3249 return -EINVAL;
3250
3251 scaling_info->dst_rect.x = state->crtc_x;
3252 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3253
3254 if (state->crtc_w == 0)
695af5f9 3255 return -EINVAL;
e7b07cee 3256
695af5f9 3257 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3258
3259 if (state->crtc_h == 0)
695af5f9 3260 return -EINVAL;
e7b07cee 3261
695af5f9 3262 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3263
695af5f9
NK
3264 /* DRM doesn't specify clipping on destination output. */
3265 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3266
6491f0c0
NK
3267 /* TODO: Validate scaling per-format with DC plane caps */
3268 scale_w = scaling_info->dst_rect.width * 1000 /
3269 scaling_info->src_rect.width;
e7b07cee 3270
6491f0c0
NK
3271 if (scale_w < 250 || scale_w > 16000)
3272 return -EINVAL;
3273
3274 scale_h = scaling_info->dst_rect.height * 1000 /
3275 scaling_info->src_rect.height;
3276
3277 if (scale_h < 250 || scale_h > 16000)
3278 return -EINVAL;
3279
695af5f9
NK
3280 /*
3281 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3282 * assume reasonable defaults based on the format.
3283 */
e7b07cee 3284
695af5f9 3285 return 0;
4562236b 3286}
695af5f9 3287
3ee6b26b 3288static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 3289 uint64_t *tiling_flags)
e7b07cee 3290{
e68d14dd 3291 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3292 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3293
e7b07cee 3294 if (unlikely(r)) {
1f6010a9 3295 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3296 if (r != -ERESTARTSYS)
3297 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3298 return r;
3299 }
3300
e7b07cee
HW
3301 if (tiling_flags)
3302 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3303
3304 amdgpu_bo_unreserve(rbo);
3305
3306 return r;
3307}
3308
7df7e505
NK
3309static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3310{
3311 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3312
3313 return offset ? (address + offset * 256) : 0;
3314}
3315
695af5f9
NK
3316static int
3317fill_plane_dcc_attributes(struct amdgpu_device *adev,
3318 const struct amdgpu_framebuffer *afb,
3319 const enum surface_pixel_format format,
3320 const enum dc_rotation_angle rotation,
12e2b2d4 3321 const struct plane_size *plane_size,
695af5f9
NK
3322 const union dc_tiling_info *tiling_info,
3323 const uint64_t info,
3324 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3325 struct dc_plane_address *address,
3326 bool force_disable_dcc)
7df7e505
NK
3327{
3328 struct dc *dc = adev->dm.dc;
8daa1218
NC
3329 struct dc_dcc_surface_param input;
3330 struct dc_surface_dcc_cap output;
7df7e505
NK
3331 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3332 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3333 uint64_t dcc_address;
3334
8daa1218
NC
3335 memset(&input, 0, sizeof(input));
3336 memset(&output, 0, sizeof(output));
3337
87b7ebc2
RS
3338 if (force_disable_dcc)
3339 return 0;
3340
7df7e505 3341 if (!offset)
09e5665a
NK
3342 return 0;
3343
695af5f9 3344 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3345 return 0;
7df7e505
NK
3346
3347 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3348 return -EINVAL;
7df7e505 3349
695af5f9 3350 input.format = format;
12e2b2d4
DL
3351 input.surface_size.width = plane_size->surface_size.width;
3352 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3353 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3354
695af5f9 3355 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3356 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3357 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3358 input.scan = SCAN_DIRECTION_VERTICAL;
3359
3360 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3361 return -EINVAL;
7df7e505
NK
3362
3363 if (!output.capable)
09e5665a 3364 return -EINVAL;
7df7e505
NK
3365
3366 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3367 return -EINVAL;
7df7e505 3368
09e5665a 3369 dcc->enable = 1;
12e2b2d4 3370 dcc->meta_pitch =
7df7e505 3371 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3372 dcc->independent_64b_blks = i64b;
7df7e505
NK
3373
3374 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3375 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3376 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3377
09e5665a
NK
3378 return 0;
3379}
3380
3381static int
320932bf 3382fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3383 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3384 const enum surface_pixel_format format,
3385 const enum dc_rotation_angle rotation,
3386 const uint64_t tiling_flags,
09e5665a 3387 union dc_tiling_info *tiling_info,
12e2b2d4 3388 struct plane_size *plane_size,
09e5665a 3389 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3390 struct dc_plane_address *address,
3391 bool force_disable_dcc)
09e5665a 3392{
320932bf 3393 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3394 int ret;
3395
3396 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3397 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3398 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3399 memset(address, 0, sizeof(*address));
3400
695af5f9 3401 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3402 plane_size->surface_size.x = 0;
3403 plane_size->surface_size.y = 0;
3404 plane_size->surface_size.width = fb->width;
3405 plane_size->surface_size.height = fb->height;
3406 plane_size->surface_pitch =
320932bf
NK
3407 fb->pitches[0] / fb->format->cpp[0];
3408
e0634e8d
NK
3409 address->type = PLN_ADDR_TYPE_GRAPHICS;
3410 address->grph.addr.low_part = lower_32_bits(afb->address);
3411 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3412 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3413 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3414
12e2b2d4
DL
3415 plane_size->surface_size.x = 0;
3416 plane_size->surface_size.y = 0;
3417 plane_size->surface_size.width = fb->width;
3418 plane_size->surface_size.height = fb->height;
3419 plane_size->surface_pitch =
320932bf
NK
3420 fb->pitches[0] / fb->format->cpp[0];
3421
12e2b2d4
DL
3422 plane_size->chroma_size.x = 0;
3423 plane_size->chroma_size.y = 0;
320932bf 3424 /* TODO: set these based on surface format */
12e2b2d4
DL
3425 plane_size->chroma_size.width = fb->width / 2;
3426 plane_size->chroma_size.height = fb->height / 2;
320932bf 3427
12e2b2d4 3428 plane_size->chroma_pitch =
320932bf
NK
3429 fb->pitches[1] / fb->format->cpp[1];
3430
e0634e8d
NK
3431 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3432 address->video_progressive.luma_addr.low_part =
3433 lower_32_bits(afb->address);
3434 address->video_progressive.luma_addr.high_part =
3435 upper_32_bits(afb->address);
3436 address->video_progressive.chroma_addr.low_part =
3437 lower_32_bits(chroma_addr);
3438 address->video_progressive.chroma_addr.high_part =
3439 upper_32_bits(chroma_addr);
3440 }
09e5665a
NK
3441
3442 /* Fill GFX8 params */
3443 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3444 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3445
3446 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3447 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3448 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3449 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3450 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3451
3452 /* XXX fix me for VI */
3453 tiling_info->gfx8.num_banks = num_banks;
3454 tiling_info->gfx8.array_mode =
3455 DC_ARRAY_2D_TILED_THIN1;
3456 tiling_info->gfx8.tile_split = tile_split;
3457 tiling_info->gfx8.bank_width = bankw;
3458 tiling_info->gfx8.bank_height = bankh;
3459 tiling_info->gfx8.tile_aspect = mtaspect;
3460 tiling_info->gfx8.tile_mode =
3461 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3462 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3463 == DC_ARRAY_1D_TILED_THIN1) {
3464 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3465 }
3466
3467 tiling_info->gfx8.pipe_config =
3468 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3469
3470 if (adev->asic_type == CHIP_VEGA10 ||
3471 adev->asic_type == CHIP_VEGA12 ||
3472 adev->asic_type == CHIP_VEGA20 ||
476e955d 3473 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3474 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3475 adev->asic_type == CHIP_NAVI12 ||
30221ad8 3476 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3477 adev->asic_type == CHIP_RAVEN) {
3478 /* Fill GFX9 params */
3479 tiling_info->gfx9.num_pipes =
3480 adev->gfx.config.gb_addr_config_fields.num_pipes;
3481 tiling_info->gfx9.num_banks =
3482 adev->gfx.config.gb_addr_config_fields.num_banks;
3483 tiling_info->gfx9.pipe_interleave =
3484 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3485 tiling_info->gfx9.num_shader_engines =
3486 adev->gfx.config.gb_addr_config_fields.num_se;
3487 tiling_info->gfx9.max_compressed_frags =
3488 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3489 tiling_info->gfx9.num_rb_per_se =
3490 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3491 tiling_info->gfx9.swizzle =
3492 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3493 tiling_info->gfx9.shaderEnable = 1;
3494
695af5f9
NK
3495 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3496 plane_size, tiling_info,
87b7ebc2
RS
3497 tiling_flags, dcc, address,
3498 force_disable_dcc);
09e5665a
NK
3499 if (ret)
3500 return ret;
3501 }
3502
3503 return 0;
7df7e505
NK
3504}
3505
d74004b6 3506static void
695af5f9 3507fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3508 bool *per_pixel_alpha, bool *global_alpha,
3509 int *global_alpha_value)
3510{
3511 *per_pixel_alpha = false;
3512 *global_alpha = false;
3513 *global_alpha_value = 0xff;
3514
3515 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3516 return;
3517
3518 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3519 static const uint32_t alpha_formats[] = {
3520 DRM_FORMAT_ARGB8888,
3521 DRM_FORMAT_RGBA8888,
3522 DRM_FORMAT_ABGR8888,
3523 };
3524 uint32_t format = plane_state->fb->format->format;
3525 unsigned int i;
3526
3527 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3528 if (format == alpha_formats[i]) {
3529 *per_pixel_alpha = true;
3530 break;
3531 }
3532 }
3533 }
3534
3535 if (plane_state->alpha < 0xffff) {
3536 *global_alpha = true;
3537 *global_alpha_value = plane_state->alpha >> 8;
3538 }
3539}
3540
004fefa3
NK
3541static int
3542fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3543 const enum surface_pixel_format format,
004fefa3
NK
3544 enum dc_color_space *color_space)
3545{
3546 bool full_range;
3547
3548 *color_space = COLOR_SPACE_SRGB;
3549
3550 /* DRM color properties only affect non-RGB formats. */
695af5f9 3551 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3552 return 0;
3553
3554 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3555
3556 switch (plane_state->color_encoding) {
3557 case DRM_COLOR_YCBCR_BT601:
3558 if (full_range)
3559 *color_space = COLOR_SPACE_YCBCR601;
3560 else
3561 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3562 break;
3563
3564 case DRM_COLOR_YCBCR_BT709:
3565 if (full_range)
3566 *color_space = COLOR_SPACE_YCBCR709;
3567 else
3568 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3569 break;
3570
3571 case DRM_COLOR_YCBCR_BT2020:
3572 if (full_range)
3573 *color_space = COLOR_SPACE_2020_YCBCR;
3574 else
3575 return -EINVAL;
3576 break;
3577
3578 default:
3579 return -EINVAL;
3580 }
3581
3582 return 0;
3583}
3584
695af5f9
NK
3585static int
3586fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3587 const struct drm_plane_state *plane_state,
3588 const uint64_t tiling_flags,
3589 struct dc_plane_info *plane_info,
87b7ebc2
RS
3590 struct dc_plane_address *address,
3591 bool force_disable_dcc)
695af5f9
NK
3592{
3593 const struct drm_framebuffer *fb = plane_state->fb;
3594 const struct amdgpu_framebuffer *afb =
3595 to_amdgpu_framebuffer(plane_state->fb);
3596 struct drm_format_name_buf format_name;
3597 int ret;
3598
3599 memset(plane_info, 0, sizeof(*plane_info));
3600
3601 switch (fb->format->format) {
3602 case DRM_FORMAT_C8:
3603 plane_info->format =
3604 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3605 break;
3606 case DRM_FORMAT_RGB565:
3607 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3608 break;
3609 case DRM_FORMAT_XRGB8888:
3610 case DRM_FORMAT_ARGB8888:
3611 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3612 break;
3613 case DRM_FORMAT_XRGB2101010:
3614 case DRM_FORMAT_ARGB2101010:
3615 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3616 break;
3617 case DRM_FORMAT_XBGR2101010:
3618 case DRM_FORMAT_ABGR2101010:
3619 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3620 break;
3621 case DRM_FORMAT_XBGR8888:
3622 case DRM_FORMAT_ABGR8888:
3623 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3624 break;
3625 case DRM_FORMAT_NV21:
3626 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3627 break;
3628 case DRM_FORMAT_NV12:
3629 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3630 break;
cbec6477
SW
3631 case DRM_FORMAT_P010:
3632 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3633 break;
695af5f9
NK
3634 default:
3635 DRM_ERROR(
3636 "Unsupported screen format %s\n",
3637 drm_get_format_name(fb->format->format, &format_name));
3638 return -EINVAL;
3639 }
3640
3641 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3642 case DRM_MODE_ROTATE_0:
3643 plane_info->rotation = ROTATION_ANGLE_0;
3644 break;
3645 case DRM_MODE_ROTATE_90:
3646 plane_info->rotation = ROTATION_ANGLE_90;
3647 break;
3648 case DRM_MODE_ROTATE_180:
3649 plane_info->rotation = ROTATION_ANGLE_180;
3650 break;
3651 case DRM_MODE_ROTATE_270:
3652 plane_info->rotation = ROTATION_ANGLE_270;
3653 break;
3654 default:
3655 plane_info->rotation = ROTATION_ANGLE_0;
3656 break;
3657 }
3658
3659 plane_info->visible = true;
3660 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3661
6d83a32d
MS
3662 plane_info->layer_index = 0;
3663
695af5f9
NK
3664 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3665 &plane_info->color_space);
3666 if (ret)
3667 return ret;
3668
3669 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3670 plane_info->rotation, tiling_flags,
3671 &plane_info->tiling_info,
3672 &plane_info->plane_size,
87b7ebc2
RS
3673 &plane_info->dcc, address,
3674 force_disable_dcc);
695af5f9
NK
3675 if (ret)
3676 return ret;
3677
3678 fill_blending_from_plane_state(
3679 plane_state, &plane_info->per_pixel_alpha,
3680 &plane_info->global_alpha, &plane_info->global_alpha_value);
3681
3682 return 0;
3683}
3684
3685static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3686 struct dc_plane_state *dc_plane_state,
3687 struct drm_plane_state *plane_state,
3688 struct drm_crtc_state *crtc_state)
e7b07cee 3689{
cf020d49 3690 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3691 const struct amdgpu_framebuffer *amdgpu_fb =
3692 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3693 struct dc_scaling_info scaling_info;
3694 struct dc_plane_info plane_info;
3695 uint64_t tiling_flags;
3696 int ret;
87b7ebc2 3697 bool force_disable_dcc = false;
e7b07cee 3698
695af5f9
NK
3699 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3700 if (ret)
3701 return ret;
e7b07cee 3702
695af5f9
NK
3703 dc_plane_state->src_rect = scaling_info.src_rect;
3704 dc_plane_state->dst_rect = scaling_info.dst_rect;
3705 dc_plane_state->clip_rect = scaling_info.clip_rect;
3706 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3707
695af5f9 3708 ret = get_fb_info(amdgpu_fb, &tiling_flags);
e7b07cee
HW
3709 if (ret)
3710 return ret;
3711
87b7ebc2 3712 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
695af5f9
NK
3713 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3714 &plane_info,
87b7ebc2
RS
3715 &dc_plane_state->address,
3716 force_disable_dcc);
004fefa3
NK
3717 if (ret)
3718 return ret;
3719
695af5f9
NK
3720 dc_plane_state->format = plane_info.format;
3721 dc_plane_state->color_space = plane_info.color_space;
3722 dc_plane_state->format = plane_info.format;
3723 dc_plane_state->plane_size = plane_info.plane_size;
3724 dc_plane_state->rotation = plane_info.rotation;
3725 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3726 dc_plane_state->stereo_format = plane_info.stereo_format;
3727 dc_plane_state->tiling_info = plane_info.tiling_info;
3728 dc_plane_state->visible = plane_info.visible;
3729 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3730 dc_plane_state->global_alpha = plane_info.global_alpha;
3731 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3732 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3733 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3734
e277adc5
LSL
3735 /*
3736 * Always set input transfer function, since plane state is refreshed
3737 * every time.
3738 */
cf020d49
NK
3739 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3740 if (ret)
3741 return ret;
e7b07cee 3742
cf020d49 3743 return 0;
e7b07cee
HW
3744}
3745
3ee6b26b
AD
3746static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3747 const struct dm_connector_state *dm_state,
3748 struct dc_stream_state *stream)
e7b07cee
HW
3749{
3750 enum amdgpu_rmx_type rmx_type;
3751
3752 struct rect src = { 0 }; /* viewport in composition space*/
3753 struct rect dst = { 0 }; /* stream addressable area */
3754
3755 /* no mode. nothing to be done */
3756 if (!mode)
3757 return;
3758
3759 /* Full screen scaling by default */
3760 src.width = mode->hdisplay;
3761 src.height = mode->vdisplay;
3762 dst.width = stream->timing.h_addressable;
3763 dst.height = stream->timing.v_addressable;
3764
f4791779
HW
3765 if (dm_state) {
3766 rmx_type = dm_state->scaling;
3767 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3768 if (src.width * dst.height <
3769 src.height * dst.width) {
3770 /* height needs less upscaling/more downscaling */
3771 dst.width = src.width *
3772 dst.height / src.height;
3773 } else {
3774 /* width needs less upscaling/more downscaling */
3775 dst.height = src.height *
3776 dst.width / src.width;
3777 }
3778 } else if (rmx_type == RMX_CENTER) {
3779 dst = src;
e7b07cee 3780 }
e7b07cee 3781
f4791779
HW
3782 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3783 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 3784
f4791779
HW
3785 if (dm_state->underscan_enable) {
3786 dst.x += dm_state->underscan_hborder / 2;
3787 dst.y += dm_state->underscan_vborder / 2;
3788 dst.width -= dm_state->underscan_hborder;
3789 dst.height -= dm_state->underscan_vborder;
3790 }
e7b07cee
HW
3791 }
3792
3793 stream->src = src;
3794 stream->dst = dst;
3795
f1ad2f5e 3796 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
3797 dst.x, dst.y, dst.width, dst.height);
3798
3799}
3800
3ee6b26b 3801static enum dc_color_depth
42ba01fc 3802convert_color_depth_from_display_info(const struct drm_connector *connector,
1bc22f20
SW
3803 const struct drm_connector_state *state,
3804 bool is_y420)
e7b07cee 3805{
1bc22f20 3806 uint8_t bpc;
01c22997 3807
1bc22f20
SW
3808 if (is_y420) {
3809 bpc = 8;
3810
3811 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3812 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3813 bpc = 16;
3814 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3815 bpc = 12;
3816 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3817 bpc = 10;
3818 } else {
3819 bpc = (uint8_t)connector->display_info.bpc;
3820 /* Assume 8 bpc by default if no bpc is specified. */
3821 bpc = bpc ? bpc : 8;
3822 }
e7b07cee 3823
01933ba4
NK
3824 if (!state)
3825 state = connector->state;
3826
42ba01fc 3827 if (state) {
01c22997
NK
3828 /*
3829 * Cap display bpc based on the user requested value.
3830 *
3831 * The value for state->max_bpc may not correctly updated
3832 * depending on when the connector gets added to the state
3833 * or if this was called outside of atomic check, so it
3834 * can't be used directly.
3835 */
3836 bpc = min(bpc, state->max_requested_bpc);
3837
1825fd34
NK
3838 /* Round down to the nearest even number. */
3839 bpc = bpc - (bpc & 1);
3840 }
07e3a1cf 3841
e7b07cee
HW
3842 switch (bpc) {
3843 case 0:
1f6010a9
DF
3844 /*
3845 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
3846 * EDID revision before 1.4
3847 * TODO: Fix edid parsing
3848 */
3849 return COLOR_DEPTH_888;
3850 case 6:
3851 return COLOR_DEPTH_666;
3852 case 8:
3853 return COLOR_DEPTH_888;
3854 case 10:
3855 return COLOR_DEPTH_101010;
3856 case 12:
3857 return COLOR_DEPTH_121212;
3858 case 14:
3859 return COLOR_DEPTH_141414;
3860 case 16:
3861 return COLOR_DEPTH_161616;
3862 default:
3863 return COLOR_DEPTH_UNDEFINED;
3864 }
3865}
3866
3ee6b26b
AD
3867static enum dc_aspect_ratio
3868get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 3869{
e11d4147
LSL
3870 /* 1-1 mapping, since both enums follow the HDMI spec. */
3871 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
3872}
3873
3ee6b26b
AD
3874static enum dc_color_space
3875get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
3876{
3877 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3878
3879 switch (dc_crtc_timing->pixel_encoding) {
3880 case PIXEL_ENCODING_YCBCR422:
3881 case PIXEL_ENCODING_YCBCR444:
3882 case PIXEL_ENCODING_YCBCR420:
3883 {
3884 /*
3885 * 27030khz is the separation point between HDTV and SDTV
3886 * according to HDMI spec, we use YCbCr709 and YCbCr601
3887 * respectively
3888 */
380604e2 3889 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
3890 if (dc_crtc_timing->flags.Y_ONLY)
3891 color_space =
3892 COLOR_SPACE_YCBCR709_LIMITED;
3893 else
3894 color_space = COLOR_SPACE_YCBCR709;
3895 } else {
3896 if (dc_crtc_timing->flags.Y_ONLY)
3897 color_space =
3898 COLOR_SPACE_YCBCR601_LIMITED;
3899 else
3900 color_space = COLOR_SPACE_YCBCR601;
3901 }
3902
3903 }
3904 break;
3905 case PIXEL_ENCODING_RGB:
3906 color_space = COLOR_SPACE_SRGB;
3907 break;
3908
3909 default:
3910 WARN_ON(1);
3911 break;
3912 }
3913
3914 return color_space;
3915}
3916
ea117312
TA
3917static bool adjust_colour_depth_from_display_info(
3918 struct dc_crtc_timing *timing_out,
3919 const struct drm_display_info *info)
400443e8 3920{
ea117312 3921 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 3922 int normalized_clk;
400443e8 3923 do {
380604e2 3924 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
3925 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3926 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3927 normalized_clk /= 2;
3928 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
3929 switch (depth) {
3930 case COLOR_DEPTH_888:
3931 break;
400443e8
ML
3932 case COLOR_DEPTH_101010:
3933 normalized_clk = (normalized_clk * 30) / 24;
3934 break;
3935 case COLOR_DEPTH_121212:
3936 normalized_clk = (normalized_clk * 36) / 24;
3937 break;
3938 case COLOR_DEPTH_161616:
3939 normalized_clk = (normalized_clk * 48) / 24;
3940 break;
3941 default:
ea117312
TA
3942 /* The above depths are the only ones valid for HDMI. */
3943 return false;
400443e8 3944 }
ea117312
TA
3945 if (normalized_clk <= info->max_tmds_clock) {
3946 timing_out->display_color_depth = depth;
3947 return true;
3948 }
3949 } while (--depth > COLOR_DEPTH_666);
3950 return false;
400443e8 3951}
e7b07cee 3952
42ba01fc
NK
3953static void fill_stream_properties_from_drm_display_mode(
3954 struct dc_stream_state *stream,
3955 const struct drm_display_mode *mode_in,
3956 const struct drm_connector *connector,
3957 const struct drm_connector_state *connector_state,
3958 const struct dc_stream_state *old_stream)
e7b07cee
HW
3959{
3960 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 3961 const struct drm_display_info *info = &connector->display_info;
d4252eee 3962 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
3963 struct hdmi_vendor_infoframe hv_frame;
3964 struct hdmi_avi_infoframe avi_frame;
e7b07cee 3965
acf83f86
WL
3966 memset(&hv_frame, 0, sizeof(hv_frame));
3967 memset(&avi_frame, 0, sizeof(avi_frame));
3968
e7b07cee
HW
3969 timing_out->h_border_left = 0;
3970 timing_out->h_border_right = 0;
3971 timing_out->v_border_top = 0;
3972 timing_out->v_border_bottom = 0;
3973 /* TODO: un-hardcode */
fe61a2f1 3974 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 3975 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 3976 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
3977 else if (drm_mode_is_420_also(info, mode_in)
3978 && aconnector->force_yuv420_output)
3979 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 3980 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 3981 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
3982 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3983 else
3984 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3985
3986 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3987 timing_out->display_color_depth = convert_color_depth_from_display_info(
1bc22f20
SW
3988 connector, connector_state,
3989 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
e7b07cee
HW
3990 timing_out->scan_type = SCANNING_TYPE_NODATA;
3991 timing_out->hdmi_vic = 0;
b333730d
BL
3992
3993 if(old_stream) {
3994 timing_out->vic = old_stream->timing.vic;
3995 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3996 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3997 } else {
3998 timing_out->vic = drm_match_cea_mode(mode_in);
3999 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4000 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4001 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4002 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4003 }
e7b07cee 4004
1cb1d477
WL
4005 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4006 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4007 timing_out->vic = avi_frame.video_code;
4008 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4009 timing_out->hdmi_vic = hv_frame.vic;
4010 }
4011
e7b07cee
HW
4012 timing_out->h_addressable = mode_in->crtc_hdisplay;
4013 timing_out->h_total = mode_in->crtc_htotal;
4014 timing_out->h_sync_width =
4015 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4016 timing_out->h_front_porch =
4017 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4018 timing_out->v_total = mode_in->crtc_vtotal;
4019 timing_out->v_addressable = mode_in->crtc_vdisplay;
4020 timing_out->v_front_porch =
4021 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4022 timing_out->v_sync_width =
4023 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4024 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4025 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4026
4027 stream->output_color_space = get_output_color_space(timing_out);
4028
e43a432c
AK
4029 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4030 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4031 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4032 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4033 drm_mode_is_420_also(info, mode_in) &&
4034 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4035 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4036 adjust_colour_depth_from_display_info(timing_out, info);
4037 }
4038 }
e7b07cee
HW
4039}
4040
3ee6b26b
AD
4041static void fill_audio_info(struct audio_info *audio_info,
4042 const struct drm_connector *drm_connector,
4043 const struct dc_sink *dc_sink)
e7b07cee
HW
4044{
4045 int i = 0;
4046 int cea_revision = 0;
4047 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4048
4049 audio_info->manufacture_id = edid_caps->manufacturer_id;
4050 audio_info->product_id = edid_caps->product_id;
4051
4052 cea_revision = drm_connector->display_info.cea_rev;
4053
090afc1e 4054 strscpy(audio_info->display_name,
d2b2562c 4055 edid_caps->display_name,
090afc1e 4056 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4057
b830ebc9 4058 if (cea_revision >= 3) {
e7b07cee
HW
4059 audio_info->mode_count = edid_caps->audio_mode_count;
4060
4061 for (i = 0; i < audio_info->mode_count; ++i) {
4062 audio_info->modes[i].format_code =
4063 (enum audio_format_code)
4064 (edid_caps->audio_modes[i].format_code);
4065 audio_info->modes[i].channel_count =
4066 edid_caps->audio_modes[i].channel_count;
4067 audio_info->modes[i].sample_rates.all =
4068 edid_caps->audio_modes[i].sample_rate;
4069 audio_info->modes[i].sample_size =
4070 edid_caps->audio_modes[i].sample_size;
4071 }
4072 }
4073
4074 audio_info->flags.all = edid_caps->speaker_flags;
4075
4076 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4077 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4078 audio_info->video_latency = drm_connector->video_latency[0];
4079 audio_info->audio_latency = drm_connector->audio_latency[0];
4080 }
4081
4082 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4083
4084}
4085
3ee6b26b
AD
4086static void
4087copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4088 struct drm_display_mode *dst_mode)
e7b07cee
HW
4089{
4090 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4091 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4092 dst_mode->crtc_clock = src_mode->crtc_clock;
4093 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4094 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4095 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4096 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4097 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4098 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4099 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4100 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4101 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4102 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4103 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4104}
4105
3ee6b26b
AD
4106static void
4107decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4108 const struct drm_display_mode *native_mode,
4109 bool scale_enabled)
e7b07cee
HW
4110{
4111 if (scale_enabled) {
4112 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4113 } else if (native_mode->clock == drm_mode->clock &&
4114 native_mode->htotal == drm_mode->htotal &&
4115 native_mode->vtotal == drm_mode->vtotal) {
4116 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4117 } else {
4118 /* no scaling nor amdgpu inserted, no need to patch */
4119 }
4120}
4121
aed15309
ML
4122static struct dc_sink *
4123create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4124{
2e0ac3d6 4125 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4126 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4127 sink_init_data.link = aconnector->dc_link;
4128 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4129
4130 sink = dc_sink_create(&sink_init_data);
423788c7 4131 if (!sink) {
2e0ac3d6 4132 DRM_ERROR("Failed to create sink!\n");
aed15309 4133 return NULL;
423788c7 4134 }
2e0ac3d6 4135 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4136
aed15309 4137 return sink;
2e0ac3d6
HW
4138}
4139
fa2123db
ML
4140static void set_multisync_trigger_params(
4141 struct dc_stream_state *stream)
4142{
4143 if (stream->triggered_crtc_reset.enabled) {
4144 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4145 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4146 }
4147}
4148
4149static void set_master_stream(struct dc_stream_state *stream_set[],
4150 int stream_count)
4151{
4152 int j, highest_rfr = 0, master_stream = 0;
4153
4154 for (j = 0; j < stream_count; j++) {
4155 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4156 int refresh_rate = 0;
4157
380604e2 4158 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4159 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4160 if (refresh_rate > highest_rfr) {
4161 highest_rfr = refresh_rate;
4162 master_stream = j;
4163 }
4164 }
4165 }
4166 for (j = 0; j < stream_count; j++) {
03736f4c 4167 if (stream_set[j])
fa2123db
ML
4168 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4169 }
4170}
4171
4172static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4173{
4174 int i = 0;
4175
4176 if (context->stream_count < 2)
4177 return;
4178 for (i = 0; i < context->stream_count ; i++) {
4179 if (!context->streams[i])
4180 continue;
1f6010a9
DF
4181 /*
4182 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4183 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4184 * For now it's set to false
fa2123db
ML
4185 */
4186 set_multisync_trigger_params(context->streams[i]);
4187 }
4188 set_master_stream(context->streams, context->stream_count);
4189}
4190
3ee6b26b
AD
4191static struct dc_stream_state *
4192create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4193 const struct drm_display_mode *drm_mode,
b333730d
BL
4194 const struct dm_connector_state *dm_state,
4195 const struct dc_stream_state *old_stream)
e7b07cee
HW
4196{
4197 struct drm_display_mode *preferred_mode = NULL;
391ef035 4198 struct drm_connector *drm_connector;
42ba01fc
NK
4199 const struct drm_connector_state *con_state =
4200 dm_state ? &dm_state->base : NULL;
0971c40e 4201 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4202 struct drm_display_mode mode = *drm_mode;
4203 bool native_mode_found = false;
b333730d
BL
4204 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4205 int mode_refresh;
58124bf8 4206 int preferred_refresh = 0;
defeb878 4207#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4208 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4209#endif
df2f1015 4210 uint32_t link_bandwidth_kbps;
b333730d 4211
aed15309 4212 struct dc_sink *sink = NULL;
b830ebc9 4213 if (aconnector == NULL) {
e7b07cee 4214 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4215 return stream;
e7b07cee
HW
4216 }
4217
e7b07cee 4218 drm_connector = &aconnector->base;
2e0ac3d6 4219
f4ac176e 4220 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4221 sink = create_fake_sink(aconnector);
4222 if (!sink)
4223 return stream;
aed15309
ML
4224 } else {
4225 sink = aconnector->dc_sink;
dcd5fb82 4226 dc_sink_retain(sink);
f4ac176e 4227 }
2e0ac3d6 4228
aed15309 4229 stream = dc_create_stream_for_sink(sink);
4562236b 4230
b830ebc9 4231 if (stream == NULL) {
e7b07cee 4232 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4233 goto finish;
e7b07cee
HW
4234 }
4235
ceb3dbb4
JL
4236 stream->dm_stream_context = aconnector;
4237
4a36fcba
WL
4238 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4239 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4240
e7b07cee
HW
4241 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4242 /* Search for preferred mode */
4243 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4244 native_mode_found = true;
4245 break;
4246 }
4247 }
4248 if (!native_mode_found)
4249 preferred_mode = list_first_entry_or_null(
4250 &aconnector->base.modes,
4251 struct drm_display_mode,
4252 head);
4253
b333730d
BL
4254 mode_refresh = drm_mode_vrefresh(&mode);
4255
b830ebc9 4256 if (preferred_mode == NULL) {
1f6010a9
DF
4257 /*
4258 * This may not be an error, the use case is when we have no
e7b07cee
HW
4259 * usermode calls to reset and set mode upon hotplug. In this
4260 * case, we call set mode ourselves to restore the previous mode
4261 * and the modelist may not be filled in in time.
4262 */
f1ad2f5e 4263 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4264 } else {
4265 decide_crtc_timing_for_drm_display_mode(
4266 &mode, preferred_mode,
f4791779 4267 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4268 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4269 }
4270
f783577c
JFZ
4271 if (!dm_state)
4272 drm_mode_set_crtcinfo(&mode, 0);
4273
b333730d
BL
4274 /*
4275 * If scaling is enabled and refresh rate didn't change
4276 * we copy the vic and polarities of the old timings
4277 */
4278 if (!scale || mode_refresh != preferred_refresh)
4279 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4280 &mode, &aconnector->base, con_state, NULL);
b333730d
BL
4281 else
4282 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4283 &mode, &aconnector->base, con_state, old_stream);
b333730d 4284
df2f1015
DF
4285 stream->timing.flags.DSC = 0;
4286
4287 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4288#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4289 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4290 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4291 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4292 &dsc_caps);
defeb878 4293#endif
df2f1015
DF
4294 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4295 dc_link_get_link_cap(aconnector->dc_link));
4296
defeb878 4297#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4298 if (dsc_caps.is_dsc_supported)
0417df16 4299 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4300 &dsc_caps,
0417df16 4301 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4302 link_bandwidth_kbps,
4303 &stream->timing,
4304 &stream->timing.dsc_cfg))
4305 stream->timing.flags.DSC = 1;
39a4eb85 4306#endif
df2f1015 4307 }
39a4eb85 4308
e7b07cee
HW
4309 update_stream_scaling_settings(&mode, dm_state, stream);
4310
4311 fill_audio_info(
4312 &stream->audio_info,
4313 drm_connector,
aed15309 4314 sink);
e7b07cee 4315
ceb3dbb4 4316 update_stream_signal(stream, sink);
9182b4cb 4317
d832fc3b
WL
4318 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4319 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8c322309
RL
4320 if (stream->link->psr_feature_enabled) {
4321 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4322
8c322309
RL
4323 if (dc_is_dmcu_initialized(core_dc)) {
4324 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4325
4326 stream->psr_version = dmcu->dmcu_version.psr_version;
c38cc677
MT
4327
4328 //
4329 // should decide stream support vsc sdp colorimetry capability
4330 // before building vsc info packet
4331 //
4332 stream->use_vsc_sdp_for_colorimetry = false;
4333 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4334 stream->use_vsc_sdp_for_colorimetry =
4335 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4336 } else {
4337 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4338 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4339 stream->use_vsc_sdp_for_colorimetry = true;
4340 }
4341 }
4342 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309
RL
4343 }
4344 }
aed15309 4345finish:
dcd5fb82 4346 dc_sink_release(sink);
9e3efe3e 4347
e7b07cee
HW
4348 return stream;
4349}
4350
7578ecda 4351static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4352{
4353 drm_crtc_cleanup(crtc);
4354 kfree(crtc);
4355}
4356
4357static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4358 struct drm_crtc_state *state)
e7b07cee
HW
4359{
4360 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4361
4362 /* TODO Destroy dc_stream objects are stream object is flattened */
4363 if (cur->stream)
4364 dc_stream_release(cur->stream);
4365
4366
4367 __drm_atomic_helper_crtc_destroy_state(state);
4368
4369
4370 kfree(state);
4371}
4372
4373static void dm_crtc_reset_state(struct drm_crtc *crtc)
4374{
4375 struct dm_crtc_state *state;
4376
4377 if (crtc->state)
4378 dm_crtc_destroy_state(crtc, crtc->state);
4379
4380 state = kzalloc(sizeof(*state), GFP_KERNEL);
4381 if (WARN_ON(!state))
4382 return;
4383
4384 crtc->state = &state->base;
4385 crtc->state->crtc = crtc;
4386
4387}
4388
4389static struct drm_crtc_state *
4390dm_crtc_duplicate_state(struct drm_crtc *crtc)
4391{
4392 struct dm_crtc_state *state, *cur;
4393
4394 cur = to_dm_crtc_state(crtc->state);
4395
4396 if (WARN_ON(!crtc->state))
4397 return NULL;
4398
2004f45e 4399 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4400 if (!state)
4401 return NULL;
e7b07cee
HW
4402
4403 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4404
4405 if (cur->stream) {
4406 state->stream = cur->stream;
4407 dc_stream_retain(state->stream);
4408 }
4409
d6ef9b41
NK
4410 state->active_planes = cur->active_planes;
4411 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4412 state->vrr_params = cur->vrr_params;
98e6436d 4413 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4414 state->abm_level = cur->abm_level;
bb47de73
NK
4415 state->vrr_supported = cur->vrr_supported;
4416 state->freesync_config = cur->freesync_config;
14b25846 4417 state->crc_src = cur->crc_src;
cf020d49
NK
4418 state->cm_has_degamma = cur->cm_has_degamma;
4419 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4420
e7b07cee
HW
4421 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4422
4423 return &state->base;
4424}
4425
d2574c33
MK
4426static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4427{
4428 enum dc_irq_source irq_source;
4429 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4430 struct amdgpu_device *adev = crtc->dev->dev_private;
4431 int rc;
4432
4433 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4434
4435 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4436
4437 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4438 acrtc->crtc_id, enable ? "en" : "dis", rc);
4439 return rc;
4440}
589d2739
HW
4441
4442static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4443{
4444 enum dc_irq_source irq_source;
4445 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4446 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4447 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4448 int rc = 0;
4449
4450 if (enable) {
4451 /* vblank irq on -> Only need vupdate irq in vrr mode */
4452 if (amdgpu_dm_vrr_active(acrtc_state))
4453 rc = dm_set_vupdate_irq(crtc, true);
4454 } else {
4455 /* vblank irq off -> vupdate irq off */
4456 rc = dm_set_vupdate_irq(crtc, false);
4457 }
4458
4459 if (rc)
4460 return rc;
589d2739
HW
4461
4462 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4463 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4464}
4465
4466static int dm_enable_vblank(struct drm_crtc *crtc)
4467{
4468 return dm_set_vblank(crtc, true);
4469}
4470
4471static void dm_disable_vblank(struct drm_crtc *crtc)
4472{
4473 dm_set_vblank(crtc, false);
4474}
4475
e7b07cee
HW
4476/* Implemented only the options currently availible for the driver */
4477static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4478 .reset = dm_crtc_reset_state,
4479 .destroy = amdgpu_dm_crtc_destroy,
4480 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4481 .set_config = drm_atomic_helper_set_config,
4482 .page_flip = drm_atomic_helper_page_flip,
4483 .atomic_duplicate_state = dm_crtc_duplicate_state,
4484 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4485 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4486 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4487 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4488 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4489 .enable_vblank = dm_enable_vblank,
4490 .disable_vblank = dm_disable_vblank,
e3eff4b5 4491 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4492};
4493
4494static enum drm_connector_status
4495amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4496{
4497 bool connected;
c84dec2f 4498 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4499
1f6010a9
DF
4500 /*
4501 * Notes:
e7b07cee
HW
4502 * 1. This interface is NOT called in context of HPD irq.
4503 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4504 * makes it a bad place for *any* MST-related activity.
4505 */
e7b07cee 4506
8580d60b
HW
4507 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4508 !aconnector->fake_enable)
e7b07cee
HW
4509 connected = (aconnector->dc_sink != NULL);
4510 else
4511 connected = (aconnector->base.force == DRM_FORCE_ON);
4512
4513 return (connected ? connector_status_connected :
4514 connector_status_disconnected);
4515}
4516
3ee6b26b
AD
4517int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4518 struct drm_connector_state *connector_state,
4519 struct drm_property *property,
4520 uint64_t val)
e7b07cee
HW
4521{
4522 struct drm_device *dev = connector->dev;
4523 struct amdgpu_device *adev = dev->dev_private;
4524 struct dm_connector_state *dm_old_state =
4525 to_dm_connector_state(connector->state);
4526 struct dm_connector_state *dm_new_state =
4527 to_dm_connector_state(connector_state);
4528
4529 int ret = -EINVAL;
4530
4531 if (property == dev->mode_config.scaling_mode_property) {
4532 enum amdgpu_rmx_type rmx_type;
4533
4534 switch (val) {
4535 case DRM_MODE_SCALE_CENTER:
4536 rmx_type = RMX_CENTER;
4537 break;
4538 case DRM_MODE_SCALE_ASPECT:
4539 rmx_type = RMX_ASPECT;
4540 break;
4541 case DRM_MODE_SCALE_FULLSCREEN:
4542 rmx_type = RMX_FULL;
4543 break;
4544 case DRM_MODE_SCALE_NONE:
4545 default:
4546 rmx_type = RMX_OFF;
4547 break;
4548 }
4549
4550 if (dm_old_state->scaling == rmx_type)
4551 return 0;
4552
4553 dm_new_state->scaling = rmx_type;
4554 ret = 0;
4555 } else if (property == adev->mode_info.underscan_hborder_property) {
4556 dm_new_state->underscan_hborder = val;
4557 ret = 0;
4558 } else if (property == adev->mode_info.underscan_vborder_property) {
4559 dm_new_state->underscan_vborder = val;
4560 ret = 0;
4561 } else if (property == adev->mode_info.underscan_property) {
4562 dm_new_state->underscan_enable = val;
4563 ret = 0;
c1ee92f9
DF
4564 } else if (property == adev->mode_info.abm_level_property) {
4565 dm_new_state->abm_level = val;
4566 ret = 0;
e7b07cee
HW
4567 }
4568
4569 return ret;
4570}
4571
3ee6b26b
AD
4572int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4573 const struct drm_connector_state *state,
4574 struct drm_property *property,
4575 uint64_t *val)
e7b07cee
HW
4576{
4577 struct drm_device *dev = connector->dev;
4578 struct amdgpu_device *adev = dev->dev_private;
4579 struct dm_connector_state *dm_state =
4580 to_dm_connector_state(state);
4581 int ret = -EINVAL;
4582
4583 if (property == dev->mode_config.scaling_mode_property) {
4584 switch (dm_state->scaling) {
4585 case RMX_CENTER:
4586 *val = DRM_MODE_SCALE_CENTER;
4587 break;
4588 case RMX_ASPECT:
4589 *val = DRM_MODE_SCALE_ASPECT;
4590 break;
4591 case RMX_FULL:
4592 *val = DRM_MODE_SCALE_FULLSCREEN;
4593 break;
4594 case RMX_OFF:
4595 default:
4596 *val = DRM_MODE_SCALE_NONE;
4597 break;
4598 }
4599 ret = 0;
4600 } else if (property == adev->mode_info.underscan_hborder_property) {
4601 *val = dm_state->underscan_hborder;
4602 ret = 0;
4603 } else if (property == adev->mode_info.underscan_vborder_property) {
4604 *val = dm_state->underscan_vborder;
4605 ret = 0;
4606 } else if (property == adev->mode_info.underscan_property) {
4607 *val = dm_state->underscan_enable;
4608 ret = 0;
c1ee92f9
DF
4609 } else if (property == adev->mode_info.abm_level_property) {
4610 *val = dm_state->abm_level;
4611 ret = 0;
e7b07cee 4612 }
c1ee92f9 4613
e7b07cee
HW
4614 return ret;
4615}
4616
526c654a
ED
4617static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4618{
4619 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4620
4621 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4622}
4623
7578ecda 4624static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4625{
c84dec2f 4626 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4627 const struct dc_link *link = aconnector->dc_link;
4628 struct amdgpu_device *adev = connector->dev->dev_private;
4629 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4630
e7b07cee
HW
4631#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4632 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4633
89fc8d4e 4634 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4635 link->type != dc_connection_none &&
4636 dm->backlight_dev) {
4637 backlight_device_unregister(dm->backlight_dev);
4638 dm->backlight_dev = NULL;
e7b07cee
HW
4639 }
4640#endif
dcd5fb82
MF
4641
4642 if (aconnector->dc_em_sink)
4643 dc_sink_release(aconnector->dc_em_sink);
4644 aconnector->dc_em_sink = NULL;
4645 if (aconnector->dc_sink)
4646 dc_sink_release(aconnector->dc_sink);
4647 aconnector->dc_sink = NULL;
4648
e86e8947 4649 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4650 drm_connector_unregister(connector);
4651 drm_connector_cleanup(connector);
526c654a
ED
4652 if (aconnector->i2c) {
4653 i2c_del_adapter(&aconnector->i2c->base);
4654 kfree(aconnector->i2c);
4655 }
7daec99f 4656 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 4657
e7b07cee
HW
4658 kfree(connector);
4659}
4660
4661void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4662{
4663 struct dm_connector_state *state =
4664 to_dm_connector_state(connector->state);
4665
df099b9b
LSL
4666 if (connector->state)
4667 __drm_atomic_helper_connector_destroy_state(connector->state);
4668
e7b07cee
HW
4669 kfree(state);
4670
4671 state = kzalloc(sizeof(*state), GFP_KERNEL);
4672
4673 if (state) {
4674 state->scaling = RMX_OFF;
4675 state->underscan_enable = false;
4676 state->underscan_hborder = 0;
4677 state->underscan_vborder = 0;
01933ba4 4678 state->base.max_requested_bpc = 8;
3261e013
ML
4679 state->vcpi_slots = 0;
4680 state->pbn = 0;
c3e50f89
NK
4681 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4682 state->abm_level = amdgpu_dm_abm_level;
4683
df099b9b 4684 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4685 }
4686}
4687
3ee6b26b
AD
4688struct drm_connector_state *
4689amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4690{
4691 struct dm_connector_state *state =
4692 to_dm_connector_state(connector->state);
4693
4694 struct dm_connector_state *new_state =
4695 kmemdup(state, sizeof(*state), GFP_KERNEL);
4696
98e6436d
AK
4697 if (!new_state)
4698 return NULL;
e7b07cee 4699
98e6436d
AK
4700 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4701
4702 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4703 new_state->abm_level = state->abm_level;
922454c2
NK
4704 new_state->scaling = state->scaling;
4705 new_state->underscan_enable = state->underscan_enable;
4706 new_state->underscan_hborder = state->underscan_hborder;
4707 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4708 new_state->vcpi_slots = state->vcpi_slots;
4709 new_state->pbn = state->pbn;
98e6436d 4710 return &new_state->base;
e7b07cee
HW
4711}
4712
14f04fa4
AD
4713static int
4714amdgpu_dm_connector_late_register(struct drm_connector *connector)
4715{
4716 struct amdgpu_dm_connector *amdgpu_dm_connector =
4717 to_amdgpu_dm_connector(connector);
00a8037e 4718 int r;
14f04fa4 4719
00a8037e
AD
4720 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4721 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4722 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4723 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4724 if (r)
4725 return r;
4726 }
4727
4728#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
4729 connector_debugfs_init(amdgpu_dm_connector);
4730#endif
4731
4732 return 0;
4733}
4734
e7b07cee
HW
4735static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4736 .reset = amdgpu_dm_connector_funcs_reset,
4737 .detect = amdgpu_dm_connector_detect,
4738 .fill_modes = drm_helper_probe_single_connector_modes,
4739 .destroy = amdgpu_dm_connector_destroy,
4740 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4741 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4742 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 4743 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 4744 .late_register = amdgpu_dm_connector_late_register,
526c654a 4745 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4746};
4747
e7b07cee
HW
4748static int get_modes(struct drm_connector *connector)
4749{
4750 return amdgpu_dm_connector_get_modes(connector);
4751}
4752
c84dec2f 4753static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4754{
4755 struct dc_sink_init_data init_params = {
4756 .link = aconnector->dc_link,
4757 .sink_signal = SIGNAL_TYPE_VIRTUAL
4758 };
70e8ffc5 4759 struct edid *edid;
e7b07cee 4760
a89ff457 4761 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4762 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4763 aconnector->base.name);
4764
4765 aconnector->base.force = DRM_FORCE_OFF;
4766 aconnector->base.override_edid = false;
4767 return;
4768 }
4769
70e8ffc5
HW
4770 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4771
e7b07cee
HW
4772 aconnector->edid = edid;
4773
4774 aconnector->dc_em_sink = dc_link_add_remote_sink(
4775 aconnector->dc_link,
4776 (uint8_t *)edid,
4777 (edid->extensions + 1) * EDID_LENGTH,
4778 &init_params);
4779
dcd5fb82 4780 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
4781 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4782 aconnector->dc_link->local_sink :
4783 aconnector->dc_em_sink;
dcd5fb82
MF
4784 dc_sink_retain(aconnector->dc_sink);
4785 }
e7b07cee
HW
4786}
4787
c84dec2f 4788static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4789{
4790 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4791
1f6010a9
DF
4792 /*
4793 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
4794 * Those settings have to be != 0 to get initial modeset
4795 */
4796 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4797 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4798 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4799 }
4800
4801
4802 aconnector->base.override_edid = true;
4803 create_eml_sink(aconnector);
4804}
4805
ba9ca088 4806enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 4807 struct drm_display_mode *mode)
e7b07cee
HW
4808{
4809 int result = MODE_ERROR;
4810 struct dc_sink *dc_sink;
4811 struct amdgpu_device *adev = connector->dev->dev_private;
4812 /* TODO: Unhardcode stream count */
0971c40e 4813 struct dc_stream_state *stream;
c84dec2f 4814 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 4815 enum dc_status dc_result = DC_OK;
e7b07cee
HW
4816
4817 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4818 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4819 return result;
4820
1f6010a9
DF
4821 /*
4822 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
4823 * EDID mgmt
4824 */
4825 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4826 !aconnector->dc_em_sink)
4827 handle_edid_mgmt(aconnector);
4828
c84dec2f 4829 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 4830
b830ebc9 4831 if (dc_sink == NULL) {
e7b07cee
HW
4832 DRM_ERROR("dc_sink is NULL!\n");
4833 goto fail;
4834 }
4835
b333730d 4836 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 4837 if (stream == NULL) {
e7b07cee
HW
4838 DRM_ERROR("Failed to create stream for sink!\n");
4839 goto fail;
4840 }
4841
a39438f0
HW
4842 dc_result = dc_validate_stream(adev->dm.dc, stream);
4843
4844 if (dc_result == DC_OK)
e7b07cee 4845 result = MODE_OK;
a39438f0 4846 else
9f921b14 4847 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0 4848 mode->hdisplay,
26e99ba6 4849 mode->vdisplay,
9f921b14
HW
4850 mode->clock,
4851 dc_result);
e7b07cee
HW
4852
4853 dc_stream_release(stream);
4854
4855fail:
4856 /* TODO: error handling*/
4857 return result;
4858}
4859
88694af9
NK
4860static int fill_hdr_info_packet(const struct drm_connector_state *state,
4861 struct dc_info_packet *out)
4862{
4863 struct hdmi_drm_infoframe frame;
4864 unsigned char buf[30]; /* 26 + 4 */
4865 ssize_t len;
4866 int ret, i;
4867
4868 memset(out, 0, sizeof(*out));
4869
4870 if (!state->hdr_output_metadata)
4871 return 0;
4872
4873 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4874 if (ret)
4875 return ret;
4876
4877 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4878 if (len < 0)
4879 return (int)len;
4880
4881 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4882 if (len != 30)
4883 return -EINVAL;
4884
4885 /* Prepare the infopacket for DC. */
4886 switch (state->connector->connector_type) {
4887 case DRM_MODE_CONNECTOR_HDMIA:
4888 out->hb0 = 0x87; /* type */
4889 out->hb1 = 0x01; /* version */
4890 out->hb2 = 0x1A; /* length */
4891 out->sb[0] = buf[3]; /* checksum */
4892 i = 1;
4893 break;
4894
4895 case DRM_MODE_CONNECTOR_DisplayPort:
4896 case DRM_MODE_CONNECTOR_eDP:
4897 out->hb0 = 0x00; /* sdp id, zero */
4898 out->hb1 = 0x87; /* type */
4899 out->hb2 = 0x1D; /* payload len - 1 */
4900 out->hb3 = (0x13 << 2); /* sdp version */
4901 out->sb[0] = 0x01; /* version */
4902 out->sb[1] = 0x1A; /* length */
4903 i = 2;
4904 break;
4905
4906 default:
4907 return -EINVAL;
4908 }
4909
4910 memcpy(&out->sb[i], &buf[4], 26);
4911 out->valid = true;
4912
4913 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4914 sizeof(out->sb), false);
4915
4916 return 0;
4917}
4918
4919static bool
4920is_hdr_metadata_different(const struct drm_connector_state *old_state,
4921 const struct drm_connector_state *new_state)
4922{
4923 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4924 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4925
4926 if (old_blob != new_blob) {
4927 if (old_blob && new_blob &&
4928 old_blob->length == new_blob->length)
4929 return memcmp(old_blob->data, new_blob->data,
4930 old_blob->length);
4931
4932 return true;
4933 }
4934
4935 return false;
4936}
4937
4938static int
4939amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 4940 struct drm_atomic_state *state)
88694af9 4941{
51e857af
SP
4942 struct drm_connector_state *new_con_state =
4943 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
4944 struct drm_connector_state *old_con_state =
4945 drm_atomic_get_old_connector_state(state, conn);
4946 struct drm_crtc *crtc = new_con_state->crtc;
4947 struct drm_crtc_state *new_crtc_state;
4948 int ret;
4949
4950 if (!crtc)
4951 return 0;
4952
4953 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4954 struct dc_info_packet hdr_infopacket;
4955
4956 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4957 if (ret)
4958 return ret;
4959
4960 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4961 if (IS_ERR(new_crtc_state))
4962 return PTR_ERR(new_crtc_state);
4963
4964 /*
4965 * DC considers the stream backends changed if the
4966 * static metadata changes. Forcing the modeset also
4967 * gives a simple way for userspace to switch from
b232d4ed
NK
4968 * 8bpc to 10bpc when setting the metadata to enter
4969 * or exit HDR.
4970 *
4971 * Changing the static metadata after it's been
4972 * set is permissible, however. So only force a
4973 * modeset if we're entering or exiting HDR.
88694af9 4974 */
b232d4ed
NK
4975 new_crtc_state->mode_changed =
4976 !old_con_state->hdr_output_metadata ||
4977 !new_con_state->hdr_output_metadata;
88694af9
NK
4978 }
4979
4980 return 0;
4981}
4982
e7b07cee
HW
4983static const struct drm_connector_helper_funcs
4984amdgpu_dm_connector_helper_funcs = {
4985 /*
1f6010a9 4986 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 4987 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 4988 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
4989 * in get_modes call back, not just return the modes count
4990 */
e7b07cee
HW
4991 .get_modes = get_modes,
4992 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 4993 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
4994};
4995
4996static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4997{
4998}
4999
bc92c065
NK
5000static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5001{
5002 struct drm_device *dev = new_crtc_state->crtc->dev;
5003 struct drm_plane *plane;
5004
5005 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5006 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5007 return true;
5008 }
5009
5010 return false;
5011}
5012
d6ef9b41 5013static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5014{
5015 struct drm_atomic_state *state = new_crtc_state->state;
5016 struct drm_plane *plane;
5017 int num_active = 0;
5018
5019 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5020 struct drm_plane_state *new_plane_state;
5021
5022 /* Cursor planes are "fake". */
5023 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5024 continue;
5025
5026 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5027
5028 if (!new_plane_state) {
5029 /*
5030 * The plane is enable on the CRTC and hasn't changed
5031 * state. This means that it previously passed
5032 * validation and is therefore enabled.
5033 */
5034 num_active += 1;
5035 continue;
5036 }
5037
5038 /* We need a framebuffer to be considered enabled. */
5039 num_active += (new_plane_state->fb != NULL);
5040 }
5041
d6ef9b41
NK
5042 return num_active;
5043}
5044
5045/*
5046 * Sets whether interrupts should be enabled on a specific CRTC.
5047 * We require that the stream be enabled and that there exist active
5048 * DC planes on the stream.
5049 */
5050static void
5051dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5052 struct drm_crtc_state *new_crtc_state)
5053{
5054 struct dm_crtc_state *dm_new_crtc_state =
5055 to_dm_crtc_state(new_crtc_state);
5056
5057 dm_new_crtc_state->active_planes = 0;
5058 dm_new_crtc_state->interrupts_enabled = false;
5059
5060 if (!dm_new_crtc_state->stream)
5061 return;
5062
5063 dm_new_crtc_state->active_planes =
5064 count_crtc_active_planes(new_crtc_state);
5065
5066 dm_new_crtc_state->interrupts_enabled =
5067 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
5068}
5069
3ee6b26b
AD
5070static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5071 struct drm_crtc_state *state)
e7b07cee
HW
5072{
5073 struct amdgpu_device *adev = crtc->dev->dev_private;
5074 struct dc *dc = adev->dm.dc;
5075 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5076 int ret = -EINVAL;
5077
d6ef9b41
NK
5078 /*
5079 * Update interrupt state for the CRTC. This needs to happen whenever
5080 * the CRTC has changed or whenever any of its planes have changed.
5081 * Atomic check satisfies both of these requirements since the CRTC
5082 * is added to the state by DRM during drm_atomic_helper_check_planes.
5083 */
5084 dm_update_crtc_interrupt_state(crtc, state);
5085
9b690ef3
BL
5086 if (unlikely(!dm_crtc_state->stream &&
5087 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5088 WARN_ON(1);
5089 return ret;
5090 }
5091
1f6010a9 5092 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5093 if (!dm_crtc_state->stream)
5094 return 0;
5095
bc92c065
NK
5096 /*
5097 * We want at least one hardware plane enabled to use
5098 * the stream with a cursor enabled.
5099 */
c14a005c 5100 if (state->enable && state->active &&
bc92c065 5101 does_crtc_have_active_cursor(state) &&
d6ef9b41 5102 dm_crtc_state->active_planes == 0)
c14a005c
NK
5103 return -EINVAL;
5104
62c933f9 5105 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5106 return 0;
5107
5108 return ret;
5109}
5110
3ee6b26b
AD
5111static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5112 const struct drm_display_mode *mode,
5113 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5114{
5115 return true;
5116}
5117
5118static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5119 .disable = dm_crtc_helper_disable,
5120 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5121 .mode_fixup = dm_crtc_helper_mode_fixup,
5122 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5123};
5124
5125static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5126{
5127
5128}
5129
3261e013
ML
5130static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5131{
5132 switch (display_color_depth) {
5133 case COLOR_DEPTH_666:
5134 return 6;
5135 case COLOR_DEPTH_888:
5136 return 8;
5137 case COLOR_DEPTH_101010:
5138 return 10;
5139 case COLOR_DEPTH_121212:
5140 return 12;
5141 case COLOR_DEPTH_141414:
5142 return 14;
5143 case COLOR_DEPTH_161616:
5144 return 16;
5145 default:
5146 break;
5147 }
5148 return 0;
5149}
5150
3ee6b26b
AD
5151static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5152 struct drm_crtc_state *crtc_state,
5153 struct drm_connector_state *conn_state)
e7b07cee 5154{
3261e013
ML
5155 struct drm_atomic_state *state = crtc_state->state;
5156 struct drm_connector *connector = conn_state->connector;
5157 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5158 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5159 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5160 struct drm_dp_mst_topology_mgr *mst_mgr;
5161 struct drm_dp_mst_port *mst_port;
5162 enum dc_color_depth color_depth;
5163 int clock, bpp = 0;
1bc22f20 5164 bool is_y420 = false;
3261e013
ML
5165
5166 if (!aconnector->port || !aconnector->dc_sink)
5167 return 0;
5168
5169 mst_port = aconnector->port;
5170 mst_mgr = &aconnector->mst_port->mst_mgr;
5171
5172 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5173 return 0;
5174
5175 if (!state->duplicated) {
1bc22f20
SW
5176 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5177 aconnector->force_yuv420_output;
5178 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5179 is_y420);
3261e013
ML
5180 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5181 clock = adjusted_mode->clock;
dc48529f 5182 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5183 }
5184 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5185 mst_mgr,
5186 mst_port,
1c6c1cb5
ML
5187 dm_new_connector_state->pbn,
5188 0);
3261e013
ML
5189 if (dm_new_connector_state->vcpi_slots < 0) {
5190 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5191 return dm_new_connector_state->vcpi_slots;
5192 }
e7b07cee
HW
5193 return 0;
5194}
5195
5196const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5197 .disable = dm_encoder_helper_disable,
5198 .atomic_check = dm_encoder_helper_atomic_check
5199};
5200
d9fe1a4c 5201#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5202static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5203 struct dc_state *dc_state)
5204{
5205 struct dc_stream_state *stream = NULL;
5206 struct drm_connector *connector;
5207 struct drm_connector_state *new_con_state, *old_con_state;
5208 struct amdgpu_dm_connector *aconnector;
5209 struct dm_connector_state *dm_conn_state;
5210 int i, j, clock, bpp;
5211 int vcpi, pbn_div, pbn = 0;
5212
5213 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5214
5215 aconnector = to_amdgpu_dm_connector(connector);
5216
5217 if (!aconnector->port)
5218 continue;
5219
5220 if (!new_con_state || !new_con_state->crtc)
5221 continue;
5222
5223 dm_conn_state = to_dm_connector_state(new_con_state);
5224
5225 for (j = 0; j < dc_state->stream_count; j++) {
5226 stream = dc_state->streams[j];
5227 if (!stream)
5228 continue;
5229
5230 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5231 break;
5232
5233 stream = NULL;
5234 }
5235
5236 if (!stream)
5237 continue;
5238
5239 if (stream->timing.flags.DSC != 1) {
5240 drm_dp_mst_atomic_enable_dsc(state,
5241 aconnector->port,
5242 dm_conn_state->pbn,
5243 0,
5244 false);
5245 continue;
5246 }
5247
5248 pbn_div = dm_mst_get_pbn_divider(stream->link);
5249 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5250 clock = stream->timing.pix_clk_100hz / 10;
5251 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5252 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5253 aconnector->port,
5254 pbn, pbn_div,
5255 true);
5256 if (vcpi < 0)
5257 return vcpi;
5258
5259 dm_conn_state->pbn = pbn;
5260 dm_conn_state->vcpi_slots = vcpi;
5261 }
5262 return 0;
5263}
d9fe1a4c 5264#endif
29b9ba74 5265
e7b07cee
HW
5266static void dm_drm_plane_reset(struct drm_plane *plane)
5267{
5268 struct dm_plane_state *amdgpu_state = NULL;
5269
5270 if (plane->state)
5271 plane->funcs->atomic_destroy_state(plane, plane->state);
5272
5273 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5274 WARN_ON(amdgpu_state == NULL);
1f6010a9 5275
7ddaef96
NK
5276 if (amdgpu_state)
5277 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5278}
5279
5280static struct drm_plane_state *
5281dm_drm_plane_duplicate_state(struct drm_plane *plane)
5282{
5283 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5284
5285 old_dm_plane_state = to_dm_plane_state(plane->state);
5286 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5287 if (!dm_plane_state)
5288 return NULL;
5289
5290 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5291
3be5262e
HW
5292 if (old_dm_plane_state->dc_state) {
5293 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5294 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5295 }
5296
5297 return &dm_plane_state->base;
5298}
5299
5300void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5301 struct drm_plane_state *state)
e7b07cee
HW
5302{
5303 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5304
3be5262e
HW
5305 if (dm_plane_state->dc_state)
5306 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5307
0627bbd3 5308 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5309}
5310
5311static const struct drm_plane_funcs dm_plane_funcs = {
5312 .update_plane = drm_atomic_helper_update_plane,
5313 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5314 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5315 .reset = dm_drm_plane_reset,
5316 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5317 .atomic_destroy_state = dm_drm_plane_destroy_state,
5318};
5319
3ee6b26b
AD
5320static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5321 struct drm_plane_state *new_state)
e7b07cee
HW
5322{
5323 struct amdgpu_framebuffer *afb;
5324 struct drm_gem_object *obj;
5d43be0c 5325 struct amdgpu_device *adev;
e7b07cee 5326 struct amdgpu_bo *rbo;
e7b07cee 5327 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5328 struct list_head list;
5329 struct ttm_validate_buffer tv;
5330 struct ww_acquire_ctx ticket;
e0634e8d 5331 uint64_t tiling_flags;
5d43be0c
CK
5332 uint32_t domain;
5333 int r;
87b7ebc2 5334 bool force_disable_dcc = false;
e7b07cee
HW
5335
5336 dm_plane_state_old = to_dm_plane_state(plane->state);
5337 dm_plane_state_new = to_dm_plane_state(new_state);
5338
5339 if (!new_state->fb) {
f1ad2f5e 5340 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5341 return 0;
5342 }
5343
5344 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5345 obj = new_state->fb->obj[0];
e7b07cee 5346 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5347 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5348 INIT_LIST_HEAD(&list);
5349
5350 tv.bo = &rbo->tbo;
5351 tv.num_shared = 1;
5352 list_add(&tv.head, &list);
5353
9165fb87 5354 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5355 if (r) {
5356 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5357 return r;
0f257b09 5358 }
e7b07cee 5359
5d43be0c 5360 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5361 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5362 else
5363 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5364
7b7c6c81 5365 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5366 if (unlikely(r != 0)) {
30b7c614
HW
5367 if (r != -ERESTARTSYS)
5368 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5369 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5370 return r;
5371 }
5372
bb812f1e
JZ
5373 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5374 if (unlikely(r != 0)) {
5375 amdgpu_bo_unpin(rbo);
0f257b09 5376 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5377 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5378 return r;
5379 }
7df7e505
NK
5380
5381 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5382
0f257b09 5383 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5384
7b7c6c81 5385 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5386
5387 amdgpu_bo_ref(rbo);
5388
3be5262e
HW
5389 if (dm_plane_state_new->dc_state &&
5390 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5391 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5392
87b7ebc2 5393 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
320932bf 5394 fill_plane_buffer_attributes(
695af5f9
NK
5395 adev, afb, plane_state->format, plane_state->rotation,
5396 tiling_flags, &plane_state->tiling_info,
320932bf 5397 &plane_state->plane_size, &plane_state->dcc,
87b7ebc2
RS
5398 &plane_state->address,
5399 force_disable_dcc);
e7b07cee
HW
5400 }
5401
e7b07cee
HW
5402 return 0;
5403}
5404
3ee6b26b
AD
5405static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5406 struct drm_plane_state *old_state)
e7b07cee
HW
5407{
5408 struct amdgpu_bo *rbo;
e7b07cee
HW
5409 int r;
5410
5411 if (!old_state->fb)
5412 return;
5413
e68d14dd 5414 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5415 r = amdgpu_bo_reserve(rbo, false);
5416 if (unlikely(r)) {
5417 DRM_ERROR("failed to reserve rbo before unpin\n");
5418 return;
b830ebc9
HW
5419 }
5420
5421 amdgpu_bo_unpin(rbo);
5422 amdgpu_bo_unreserve(rbo);
5423 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5424}
5425
7578ecda
AD
5426static int dm_plane_atomic_check(struct drm_plane *plane,
5427 struct drm_plane_state *state)
cbd19488
AG
5428{
5429 struct amdgpu_device *adev = plane->dev->dev_private;
5430 struct dc *dc = adev->dm.dc;
78171832 5431 struct dm_plane_state *dm_plane_state;
695af5f9
NK
5432 struct dc_scaling_info scaling_info;
5433 int ret;
78171832
NK
5434
5435 dm_plane_state = to_dm_plane_state(state);
cbd19488 5436
3be5262e 5437 if (!dm_plane_state->dc_state)
9a3329b1 5438 return 0;
cbd19488 5439
695af5f9
NK
5440 ret = fill_dc_scaling_info(state, &scaling_info);
5441 if (ret)
5442 return ret;
a05bcff1 5443
62c933f9 5444 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5445 return 0;
5446
5447 return -EINVAL;
5448}
5449
674e78ac
NK
5450static int dm_plane_atomic_async_check(struct drm_plane *plane,
5451 struct drm_plane_state *new_plane_state)
5452{
5453 /* Only support async updates on cursor planes. */
5454 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5455 return -EINVAL;
5456
5457 return 0;
5458}
5459
5460static void dm_plane_atomic_async_update(struct drm_plane *plane,
5461 struct drm_plane_state *new_state)
5462{
5463 struct drm_plane_state *old_state =
5464 drm_atomic_get_old_plane_state(new_state->state, plane);
5465
332af874 5466 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5467
5468 plane->state->src_x = new_state->src_x;
5469 plane->state->src_y = new_state->src_y;
5470 plane->state->src_w = new_state->src_w;
5471 plane->state->src_h = new_state->src_h;
5472 plane->state->crtc_x = new_state->crtc_x;
5473 plane->state->crtc_y = new_state->crtc_y;
5474 plane->state->crtc_w = new_state->crtc_w;
5475 plane->state->crtc_h = new_state->crtc_h;
5476
5477 handle_cursor_update(plane, old_state);
5478}
5479
e7b07cee
HW
5480static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5481 .prepare_fb = dm_plane_helper_prepare_fb,
5482 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5483 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5484 .atomic_async_check = dm_plane_atomic_async_check,
5485 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5486};
5487
5488/*
5489 * TODO: these are currently initialized to rgb formats only.
5490 * For future use cases we should either initialize them dynamically based on
5491 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5492 * check will succeed, and let DC implement proper check
e7b07cee 5493 */
d90371b0 5494static const uint32_t rgb_formats[] = {
e7b07cee
HW
5495 DRM_FORMAT_XRGB8888,
5496 DRM_FORMAT_ARGB8888,
5497 DRM_FORMAT_RGBA8888,
5498 DRM_FORMAT_XRGB2101010,
5499 DRM_FORMAT_XBGR2101010,
5500 DRM_FORMAT_ARGB2101010,
5501 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5502 DRM_FORMAT_XBGR8888,
5503 DRM_FORMAT_ABGR8888,
46dd9ff7 5504 DRM_FORMAT_RGB565,
e7b07cee
HW
5505};
5506
0d579c7e
NK
5507static const uint32_t overlay_formats[] = {
5508 DRM_FORMAT_XRGB8888,
5509 DRM_FORMAT_ARGB8888,
5510 DRM_FORMAT_RGBA8888,
5511 DRM_FORMAT_XBGR8888,
5512 DRM_FORMAT_ABGR8888,
7267a1a9 5513 DRM_FORMAT_RGB565
e7b07cee
HW
5514};
5515
5516static const u32 cursor_formats[] = {
5517 DRM_FORMAT_ARGB8888
5518};
5519
37c6a93b
NK
5520static int get_plane_formats(const struct drm_plane *plane,
5521 const struct dc_plane_cap *plane_cap,
5522 uint32_t *formats, int max_formats)
e7b07cee 5523{
37c6a93b
NK
5524 int i, num_formats = 0;
5525
5526 /*
5527 * TODO: Query support for each group of formats directly from
5528 * DC plane caps. This will require adding more formats to the
5529 * caps list.
5530 */
e7b07cee 5531
f180b4bc 5532 switch (plane->type) {
e7b07cee 5533 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5534 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5535 if (num_formats >= max_formats)
5536 break;
5537
5538 formats[num_formats++] = rgb_formats[i];
5539 }
5540
ea36ad34 5541 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5542 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5543 if (plane_cap && plane_cap->pixel_format_support.p010)
5544 formats[num_formats++] = DRM_FORMAT_P010;
e7b07cee 5545 break;
37c6a93b 5546
e7b07cee 5547 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5548 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5549 if (num_formats >= max_formats)
5550 break;
5551
5552 formats[num_formats++] = overlay_formats[i];
5553 }
e7b07cee 5554 break;
37c6a93b 5555
e7b07cee 5556 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5557 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5558 if (num_formats >= max_formats)
5559 break;
5560
5561 formats[num_formats++] = cursor_formats[i];
5562 }
e7b07cee
HW
5563 break;
5564 }
5565
37c6a93b
NK
5566 return num_formats;
5567}
5568
5569static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5570 struct drm_plane *plane,
5571 unsigned long possible_crtcs,
5572 const struct dc_plane_cap *plane_cap)
5573{
5574 uint32_t formats[32];
5575 int num_formats;
5576 int res = -EPERM;
5577
5578 num_formats = get_plane_formats(plane, plane_cap, formats,
5579 ARRAY_SIZE(formats));
5580
5581 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5582 &dm_plane_funcs, formats, num_formats,
5583 NULL, plane->type, NULL);
5584 if (res)
5585 return res;
5586
cc1fec57
NK
5587 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5588 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5589 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5590 BIT(DRM_MODE_BLEND_PREMULTI);
5591
5592 drm_plane_create_alpha_property(plane);
5593 drm_plane_create_blend_mode_property(plane, blend_caps);
5594 }
5595
fc8e5230 5596 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
5597 plane_cap &&
5598 (plane_cap->pixel_format_support.nv12 ||
5599 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
5600 /* This only affects YUV formats. */
5601 drm_plane_create_color_properties(
5602 plane,
5603 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
5604 BIT(DRM_COLOR_YCBCR_BT709) |
5605 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
5606 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5607 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5608 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5609 }
5610
f180b4bc 5611 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5612
96719c54 5613 /* Create (reset) the plane state */
f180b4bc
HW
5614 if (plane->funcs->reset)
5615 plane->funcs->reset(plane);
96719c54 5616
37c6a93b 5617 return 0;
e7b07cee
HW
5618}
5619
7578ecda
AD
5620static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5621 struct drm_plane *plane,
5622 uint32_t crtc_index)
e7b07cee
HW
5623{
5624 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5625 struct drm_plane *cursor_plane;
e7b07cee
HW
5626
5627 int res = -ENOMEM;
5628
5629 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5630 if (!cursor_plane)
5631 goto fail;
5632
f180b4bc 5633 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5634 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5635
5636 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5637 if (!acrtc)
5638 goto fail;
5639
5640 res = drm_crtc_init_with_planes(
5641 dm->ddev,
5642 &acrtc->base,
5643 plane,
f180b4bc 5644 cursor_plane,
e7b07cee
HW
5645 &amdgpu_dm_crtc_funcs, NULL);
5646
5647 if (res)
5648 goto fail;
5649
5650 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5651
96719c54
HW
5652 /* Create (reset) the plane state */
5653 if (acrtc->base.funcs->reset)
5654 acrtc->base.funcs->reset(&acrtc->base);
5655
e7b07cee
HW
5656 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5657 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5658
5659 acrtc->crtc_id = crtc_index;
5660 acrtc->base.enabled = false;
c37e2d29 5661 acrtc->otg_inst = -1;
e7b07cee
HW
5662
5663 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5664 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5665 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5666 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5667
5668 return 0;
5669
5670fail:
b830ebc9
HW
5671 kfree(acrtc);
5672 kfree(cursor_plane);
e7b07cee
HW
5673 return res;
5674}
5675
5676
5677static int to_drm_connector_type(enum signal_type st)
5678{
5679 switch (st) {
5680 case SIGNAL_TYPE_HDMI_TYPE_A:
5681 return DRM_MODE_CONNECTOR_HDMIA;
5682 case SIGNAL_TYPE_EDP:
5683 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5684 case SIGNAL_TYPE_LVDS:
5685 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5686 case SIGNAL_TYPE_RGB:
5687 return DRM_MODE_CONNECTOR_VGA;
5688 case SIGNAL_TYPE_DISPLAY_PORT:
5689 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5690 return DRM_MODE_CONNECTOR_DisplayPort;
5691 case SIGNAL_TYPE_DVI_DUAL_LINK:
5692 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5693 return DRM_MODE_CONNECTOR_DVID;
5694 case SIGNAL_TYPE_VIRTUAL:
5695 return DRM_MODE_CONNECTOR_VIRTUAL;
5696
5697 default:
5698 return DRM_MODE_CONNECTOR_Unknown;
5699 }
5700}
5701
2b4c1c05
DV
5702static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5703{
62afb4ad
JRS
5704 struct drm_encoder *encoder;
5705
5706 /* There is only one encoder per connector */
5707 drm_connector_for_each_possible_encoder(connector, encoder)
5708 return encoder;
5709
5710 return NULL;
2b4c1c05
DV
5711}
5712
e7b07cee
HW
5713static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5714{
e7b07cee
HW
5715 struct drm_encoder *encoder;
5716 struct amdgpu_encoder *amdgpu_encoder;
5717
2b4c1c05 5718 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
5719
5720 if (encoder == NULL)
5721 return;
5722
5723 amdgpu_encoder = to_amdgpu_encoder(encoder);
5724
5725 amdgpu_encoder->native_mode.clock = 0;
5726
5727 if (!list_empty(&connector->probed_modes)) {
5728 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 5729
e7b07cee 5730 list_for_each_entry(preferred_mode,
b830ebc9
HW
5731 &connector->probed_modes,
5732 head) {
5733 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5734 amdgpu_encoder->native_mode = *preferred_mode;
5735
e7b07cee
HW
5736 break;
5737 }
5738
5739 }
5740}
5741
3ee6b26b
AD
5742static struct drm_display_mode *
5743amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5744 char *name,
5745 int hdisplay, int vdisplay)
e7b07cee
HW
5746{
5747 struct drm_device *dev = encoder->dev;
5748 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5749 struct drm_display_mode *mode = NULL;
5750 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5751
5752 mode = drm_mode_duplicate(dev, native_mode);
5753
b830ebc9 5754 if (mode == NULL)
e7b07cee
HW
5755 return NULL;
5756
5757 mode->hdisplay = hdisplay;
5758 mode->vdisplay = vdisplay;
5759 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 5760 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
5761
5762 return mode;
5763
5764}
5765
5766static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 5767 struct drm_connector *connector)
e7b07cee
HW
5768{
5769 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5770 struct drm_display_mode *mode = NULL;
5771 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
5772 struct amdgpu_dm_connector *amdgpu_dm_connector =
5773 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5774 int i;
5775 int n;
5776 struct mode_size {
5777 char name[DRM_DISPLAY_MODE_LEN];
5778 int w;
5779 int h;
b830ebc9 5780 } common_modes[] = {
e7b07cee
HW
5781 { "640x480", 640, 480},
5782 { "800x600", 800, 600},
5783 { "1024x768", 1024, 768},
5784 { "1280x720", 1280, 720},
5785 { "1280x800", 1280, 800},
5786 {"1280x1024", 1280, 1024},
5787 { "1440x900", 1440, 900},
5788 {"1680x1050", 1680, 1050},
5789 {"1600x1200", 1600, 1200},
5790 {"1920x1080", 1920, 1080},
5791 {"1920x1200", 1920, 1200}
5792 };
5793
b830ebc9 5794 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
5795
5796 for (i = 0; i < n; i++) {
5797 struct drm_display_mode *curmode = NULL;
5798 bool mode_existed = false;
5799
5800 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
5801 common_modes[i].h > native_mode->vdisplay ||
5802 (common_modes[i].w == native_mode->hdisplay &&
5803 common_modes[i].h == native_mode->vdisplay))
5804 continue;
e7b07cee
HW
5805
5806 list_for_each_entry(curmode, &connector->probed_modes, head) {
5807 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 5808 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
5809 mode_existed = true;
5810 break;
5811 }
5812 }
5813
5814 if (mode_existed)
5815 continue;
5816
5817 mode = amdgpu_dm_create_common_mode(encoder,
5818 common_modes[i].name, common_modes[i].w,
5819 common_modes[i].h);
5820 drm_mode_probed_add(connector, mode);
c84dec2f 5821 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
5822 }
5823}
5824
3ee6b26b
AD
5825static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5826 struct edid *edid)
e7b07cee 5827{
c84dec2f
HW
5828 struct amdgpu_dm_connector *amdgpu_dm_connector =
5829 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5830
5831 if (edid) {
5832 /* empty probed_modes */
5833 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 5834 amdgpu_dm_connector->num_modes =
e7b07cee
HW
5835 drm_add_edid_modes(connector, edid);
5836
f1e5e913
YMM
5837 /* sorting the probed modes before calling function
5838 * amdgpu_dm_get_native_mode() since EDID can have
5839 * more than one preferred mode. The modes that are
5840 * later in the probed mode list could be of higher
5841 * and preferred resolution. For example, 3840x2160
5842 * resolution in base EDID preferred timing and 4096x2160
5843 * preferred resolution in DID extension block later.
5844 */
5845 drm_mode_sort(&connector->probed_modes);
e7b07cee 5846 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 5847 } else {
c84dec2f 5848 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 5849 }
e7b07cee
HW
5850}
5851
7578ecda 5852static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 5853{
c84dec2f
HW
5854 struct amdgpu_dm_connector *amdgpu_dm_connector =
5855 to_amdgpu_dm_connector(connector);
e7b07cee 5856 struct drm_encoder *encoder;
c84dec2f 5857 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 5858
2b4c1c05 5859 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 5860
85ee15d6 5861 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
5862 amdgpu_dm_connector->num_modes =
5863 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
5864 } else {
5865 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5866 amdgpu_dm_connector_add_common_modes(encoder, connector);
5867 }
3e332d3a 5868 amdgpu_dm_fbc_init(connector);
5099114b 5869
c84dec2f 5870 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
5871}
5872
3ee6b26b
AD
5873void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5874 struct amdgpu_dm_connector *aconnector,
5875 int connector_type,
5876 struct dc_link *link,
5877 int link_index)
e7b07cee
HW
5878{
5879 struct amdgpu_device *adev = dm->ddev->dev_private;
5880
f04bee34
NK
5881 /*
5882 * Some of the properties below require access to state, like bpc.
5883 * Allocate some default initial connector state with our reset helper.
5884 */
5885 if (aconnector->base.funcs->reset)
5886 aconnector->base.funcs->reset(&aconnector->base);
5887
e7b07cee
HW
5888 aconnector->connector_id = link_index;
5889 aconnector->dc_link = link;
5890 aconnector->base.interlace_allowed = false;
5891 aconnector->base.doublescan_allowed = false;
5892 aconnector->base.stereo_allowed = false;
5893 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5894 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 5895 aconnector->audio_inst = -1;
e7b07cee
HW
5896 mutex_init(&aconnector->hpd_lock);
5897
1f6010a9
DF
5898 /*
5899 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
5900 * which means HPD hot plug not supported
5901 */
e7b07cee
HW
5902 switch (connector_type) {
5903 case DRM_MODE_CONNECTOR_HDMIA:
5904 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5905 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5906 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
5907 break;
5908 case DRM_MODE_CONNECTOR_DisplayPort:
5909 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5910 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5911 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
5912 break;
5913 case DRM_MODE_CONNECTOR_DVID:
5914 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5915 break;
5916 default:
5917 break;
5918 }
5919
5920 drm_object_attach_property(&aconnector->base.base,
5921 dm->ddev->mode_config.scaling_mode_property,
5922 DRM_MODE_SCALE_NONE);
5923
5924 drm_object_attach_property(&aconnector->base.base,
5925 adev->mode_info.underscan_property,
5926 UNDERSCAN_OFF);
5927 drm_object_attach_property(&aconnector->base.base,
5928 adev->mode_info.underscan_hborder_property,
5929 0);
5930 drm_object_attach_property(&aconnector->base.base,
5931 adev->mode_info.underscan_vborder_property,
5932 0);
1825fd34 5933
8c61b31e
JFZ
5934 if (!aconnector->mst_port)
5935 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 5936
4a8ca46b
RL
5937 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5938 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5939 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 5940
c1ee92f9
DF
5941 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5942 dc_is_dmcu_initialized(adev->dm.dc)) {
5943 drm_object_attach_property(&aconnector->base.base,
5944 adev->mode_info.abm_level_property, 0);
5945 }
bb47de73
NK
5946
5947 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
5948 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5949 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
5950 drm_object_attach_property(
5951 &aconnector->base.base,
5952 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5953
8c61b31e
JFZ
5954 if (!aconnector->mst_port)
5955 drm_connector_attach_vrr_capable_property(&aconnector->base);
5956
0c8620d6 5957#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 5958 if (adev->dm.hdcp_workqueue)
53e108aa 5959 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 5960#endif
bb47de73 5961 }
e7b07cee
HW
5962}
5963
7578ecda
AD
5964static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5965 struct i2c_msg *msgs, int num)
e7b07cee
HW
5966{
5967 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5968 struct ddc_service *ddc_service = i2c->ddc_service;
5969 struct i2c_command cmd;
5970 int i;
5971 int result = -EIO;
5972
b830ebc9 5973 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
5974
5975 if (!cmd.payloads)
5976 return result;
5977
5978 cmd.number_of_payloads = num;
5979 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5980 cmd.speed = 100;
5981
5982 for (i = 0; i < num; i++) {
5983 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5984 cmd.payloads[i].address = msgs[i].addr;
5985 cmd.payloads[i].length = msgs[i].len;
5986 cmd.payloads[i].data = msgs[i].buf;
5987 }
5988
c85e6e54
DF
5989 if (dc_submit_i2c(
5990 ddc_service->ctx->dc,
5991 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
5992 &cmd))
5993 result = num;
5994
5995 kfree(cmd.payloads);
5996 return result;
5997}
5998
7578ecda 5999static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6000{
6001 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6002}
6003
6004static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6005 .master_xfer = amdgpu_dm_i2c_xfer,
6006 .functionality = amdgpu_dm_i2c_func,
6007};
6008
3ee6b26b
AD
6009static struct amdgpu_i2c_adapter *
6010create_i2c(struct ddc_service *ddc_service,
6011 int link_index,
6012 int *res)
e7b07cee
HW
6013{
6014 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6015 struct amdgpu_i2c_adapter *i2c;
6016
b830ebc9 6017 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6018 if (!i2c)
6019 return NULL;
e7b07cee
HW
6020 i2c->base.owner = THIS_MODULE;
6021 i2c->base.class = I2C_CLASS_DDC;
6022 i2c->base.dev.parent = &adev->pdev->dev;
6023 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6024 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6025 i2c_set_adapdata(&i2c->base, i2c);
6026 i2c->ddc_service = ddc_service;
c85e6e54 6027 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6028
6029 return i2c;
6030}
6031
89fc8d4e 6032
1f6010a9
DF
6033/*
6034 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6035 * dc_link which will be represented by this aconnector.
6036 */
7578ecda
AD
6037static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6038 struct amdgpu_dm_connector *aconnector,
6039 uint32_t link_index,
6040 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6041{
6042 int res = 0;
6043 int connector_type;
6044 struct dc *dc = dm->dc;
6045 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6046 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6047
6048 link->priv = aconnector;
e7b07cee 6049
f1ad2f5e 6050 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6051
6052 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6053 if (!i2c) {
6054 DRM_ERROR("Failed to create i2c adapter data\n");
6055 return -ENOMEM;
6056 }
6057
e7b07cee
HW
6058 aconnector->i2c = i2c;
6059 res = i2c_add_adapter(&i2c->base);
6060
6061 if (res) {
6062 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6063 goto out_free;
6064 }
6065
6066 connector_type = to_drm_connector_type(link->connector_signal);
6067
17165de2 6068 res = drm_connector_init_with_ddc(
e7b07cee
HW
6069 dm->ddev,
6070 &aconnector->base,
6071 &amdgpu_dm_connector_funcs,
17165de2
AP
6072 connector_type,
6073 &i2c->base);
e7b07cee
HW
6074
6075 if (res) {
6076 DRM_ERROR("connector_init failed\n");
6077 aconnector->connector_id = -1;
6078 goto out_free;
6079 }
6080
6081 drm_connector_helper_add(
6082 &aconnector->base,
6083 &amdgpu_dm_connector_helper_funcs);
6084
6085 amdgpu_dm_connector_init_helper(
6086 dm,
6087 aconnector,
6088 connector_type,
6089 link,
6090 link_index);
6091
cde4c44d 6092 drm_connector_attach_encoder(
e7b07cee
HW
6093 &aconnector->base, &aencoder->base);
6094
e7b07cee
HW
6095 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6096 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 6097 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6098
e7b07cee
HW
6099out_free:
6100 if (res) {
6101 kfree(i2c);
6102 aconnector->i2c = NULL;
6103 }
6104 return res;
6105}
6106
6107int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6108{
6109 switch (adev->mode_info.num_crtc) {
6110 case 1:
6111 return 0x1;
6112 case 2:
6113 return 0x3;
6114 case 3:
6115 return 0x7;
6116 case 4:
6117 return 0xf;
6118 case 5:
6119 return 0x1f;
6120 case 6:
6121 default:
6122 return 0x3f;
6123 }
6124}
6125
7578ecda
AD
6126static int amdgpu_dm_encoder_init(struct drm_device *dev,
6127 struct amdgpu_encoder *aencoder,
6128 uint32_t link_index)
e7b07cee
HW
6129{
6130 struct amdgpu_device *adev = dev->dev_private;
6131
6132 int res = drm_encoder_init(dev,
6133 &aencoder->base,
6134 &amdgpu_dm_encoder_funcs,
6135 DRM_MODE_ENCODER_TMDS,
6136 NULL);
6137
6138 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6139
6140 if (!res)
6141 aencoder->encoder_id = link_index;
6142 else
6143 aencoder->encoder_id = -1;
6144
6145 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6146
6147 return res;
6148}
6149
3ee6b26b
AD
6150static void manage_dm_interrupts(struct amdgpu_device *adev,
6151 struct amdgpu_crtc *acrtc,
6152 bool enable)
e7b07cee
HW
6153{
6154 /*
6155 * this is not correct translation but will work as soon as VBLANK
6156 * constant is the same as PFLIP
6157 */
6158 int irq_type =
734dd01d 6159 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6160 adev,
6161 acrtc->crtc_id);
6162
6163 if (enable) {
6164 drm_crtc_vblank_on(&acrtc->base);
6165 amdgpu_irq_get(
6166 adev,
6167 &adev->pageflip_irq,
6168 irq_type);
6169 } else {
6170
6171 amdgpu_irq_put(
6172 adev,
6173 &adev->pageflip_irq,
6174 irq_type);
6175 drm_crtc_vblank_off(&acrtc->base);
6176 }
6177}
6178
3ee6b26b
AD
6179static bool
6180is_scaling_state_different(const struct dm_connector_state *dm_state,
6181 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6182{
6183 if (dm_state->scaling != old_dm_state->scaling)
6184 return true;
6185 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6186 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6187 return true;
6188 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6189 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6190 return true;
b830ebc9
HW
6191 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6192 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6193 return true;
e7b07cee
HW
6194 return false;
6195}
6196
0c8620d6
BL
6197#ifdef CONFIG_DRM_AMD_DC_HDCP
6198static bool is_content_protection_different(struct drm_connector_state *state,
6199 const struct drm_connector_state *old_state,
6200 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6201{
6202 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6203
53e108aa
BL
6204 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6205 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6206 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6207 return true;
6208 }
6209
0c8620d6
BL
6210 /* CP is being re enabled, ignore this */
6211 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6212 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6213 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6214 return false;
6215 }
6216
6217 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6218 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6219 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6220 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6221
6222 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6223 * hot-plug, headless s3, dpms
6224 */
6225 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6226 aconnector->dc_sink != NULL)
6227 return true;
6228
6229 if (old_state->content_protection == state->content_protection)
6230 return false;
6231
6232 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6233 return true;
6234
6235 return false;
6236}
6237
0c8620d6 6238#endif
3ee6b26b
AD
6239static void remove_stream(struct amdgpu_device *adev,
6240 struct amdgpu_crtc *acrtc,
6241 struct dc_stream_state *stream)
e7b07cee
HW
6242{
6243 /* this is the update mode case */
e7b07cee
HW
6244
6245 acrtc->otg_inst = -1;
6246 acrtc->enabled = false;
6247}
6248
7578ecda
AD
6249static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6250 struct dc_cursor_position *position)
2a8f6ccb 6251{
f4c2cc43 6252 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6253 int x, y;
6254 int xorigin = 0, yorigin = 0;
6255
e371e19c
NK
6256 position->enable = false;
6257 position->x = 0;
6258 position->y = 0;
6259
6260 if (!crtc || !plane->state->fb)
2a8f6ccb 6261 return 0;
2a8f6ccb
HW
6262
6263 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6264 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6265 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6266 __func__,
6267 plane->state->crtc_w,
6268 plane->state->crtc_h);
6269 return -EINVAL;
6270 }
6271
6272 x = plane->state->crtc_x;
6273 y = plane->state->crtc_y;
c14a005c 6274
e371e19c
NK
6275 if (x <= -amdgpu_crtc->max_cursor_width ||
6276 y <= -amdgpu_crtc->max_cursor_height)
6277 return 0;
6278
2a8f6ccb
HW
6279 if (x < 0) {
6280 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6281 x = 0;
6282 }
6283 if (y < 0) {
6284 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6285 y = 0;
6286 }
6287 position->enable = true;
d243b6ff 6288 position->translate_by_source = true;
2a8f6ccb
HW
6289 position->x = x;
6290 position->y = y;
6291 position->x_hotspot = xorigin;
6292 position->y_hotspot = yorigin;
6293
6294 return 0;
6295}
6296
3ee6b26b
AD
6297static void handle_cursor_update(struct drm_plane *plane,
6298 struct drm_plane_state *old_plane_state)
e7b07cee 6299{
674e78ac 6300 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6301 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6302 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6303 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6304 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6305 uint64_t address = afb ? afb->address : 0;
6306 struct dc_cursor_position position;
6307 struct dc_cursor_attributes attributes;
6308 int ret;
6309
e7b07cee
HW
6310 if (!plane->state->fb && !old_plane_state->fb)
6311 return;
6312
f1ad2f5e 6313 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6314 __func__,
6315 amdgpu_crtc->crtc_id,
6316 plane->state->crtc_w,
6317 plane->state->crtc_h);
2a8f6ccb
HW
6318
6319 ret = get_cursor_position(plane, crtc, &position);
6320 if (ret)
6321 return;
6322
6323 if (!position.enable) {
6324 /* turn off cursor */
674e78ac
NK
6325 if (crtc_state && crtc_state->stream) {
6326 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6327 dc_stream_set_cursor_position(crtc_state->stream,
6328 &position);
674e78ac
NK
6329 mutex_unlock(&adev->dm.dc_lock);
6330 }
2a8f6ccb 6331 return;
e7b07cee 6332 }
e7b07cee 6333
2a8f6ccb
HW
6334 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6335 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6336
c1cefe11 6337 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6338 attributes.address.high_part = upper_32_bits(address);
6339 attributes.address.low_part = lower_32_bits(address);
6340 attributes.width = plane->state->crtc_w;
6341 attributes.height = plane->state->crtc_h;
6342 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6343 attributes.rotation_angle = 0;
6344 attributes.attribute_flags.value = 0;
6345
6346 attributes.pitch = attributes.width;
6347
886daac9 6348 if (crtc_state->stream) {
674e78ac 6349 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6350 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6351 &attributes))
6352 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6353
2a8f6ccb
HW
6354 if (!dc_stream_set_cursor_position(crtc_state->stream,
6355 &position))
6356 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6357 mutex_unlock(&adev->dm.dc_lock);
886daac9 6358 }
2a8f6ccb 6359}
e7b07cee
HW
6360
6361static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6362{
6363
6364 assert_spin_locked(&acrtc->base.dev->event_lock);
6365 WARN_ON(acrtc->event);
6366
6367 acrtc->event = acrtc->base.state->event;
6368
6369 /* Set the flip status */
6370 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6371
6372 /* Mark this event as consumed */
6373 acrtc->base.state->event = NULL;
6374
6375 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6376 acrtc->crtc_id);
6377}
6378
bb47de73
NK
6379static void update_freesync_state_on_stream(
6380 struct amdgpu_display_manager *dm,
6381 struct dm_crtc_state *new_crtc_state,
180db303
NK
6382 struct dc_stream_state *new_stream,
6383 struct dc_plane_state *surface,
6384 u32 flip_timestamp_in_us)
bb47de73 6385{
09aef2c4 6386 struct mod_vrr_params vrr_params;
bb47de73 6387 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6388 struct amdgpu_device *adev = dm->adev;
6389 unsigned long flags;
bb47de73
NK
6390
6391 if (!new_stream)
6392 return;
6393
6394 /*
6395 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6396 * For now it's sufficient to just guard against these conditions.
6397 */
6398
6399 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6400 return;
6401
09aef2c4
MK
6402 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6403 vrr_params = new_crtc_state->vrr_params;
6404
180db303
NK
6405 if (surface) {
6406 mod_freesync_handle_preflip(
6407 dm->freesync_module,
6408 surface,
6409 new_stream,
6410 flip_timestamp_in_us,
6411 &vrr_params);
09aef2c4
MK
6412
6413 if (adev->family < AMDGPU_FAMILY_AI &&
6414 amdgpu_dm_vrr_active(new_crtc_state)) {
6415 mod_freesync_handle_v_update(dm->freesync_module,
6416 new_stream, &vrr_params);
e63e2491
EB
6417
6418 /* Need to call this before the frame ends. */
6419 dc_stream_adjust_vmin_vmax(dm->dc,
6420 new_crtc_state->stream,
6421 &vrr_params.adjust);
09aef2c4 6422 }
180db303 6423 }
bb47de73
NK
6424
6425 mod_freesync_build_vrr_infopacket(
6426 dm->freesync_module,
6427 new_stream,
180db303 6428 &vrr_params,
ecd0136b
HT
6429 PACKET_TYPE_VRR,
6430 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6431 &vrr_infopacket);
6432
8a48b44c 6433 new_crtc_state->freesync_timing_changed |=
180db303
NK
6434 (memcmp(&new_crtc_state->vrr_params.adjust,
6435 &vrr_params.adjust,
6436 sizeof(vrr_params.adjust)) != 0);
bb47de73 6437
8a48b44c 6438 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6439 (memcmp(&new_crtc_state->vrr_infopacket,
6440 &vrr_infopacket,
6441 sizeof(vrr_infopacket)) != 0);
6442
180db303 6443 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6444 new_crtc_state->vrr_infopacket = vrr_infopacket;
6445
180db303 6446 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6447 new_stream->vrr_infopacket = vrr_infopacket;
6448
6449 if (new_crtc_state->freesync_vrr_info_changed)
6450 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6451 new_crtc_state->base.crtc->base.id,
6452 (int)new_crtc_state->base.vrr_enabled,
180db303 6453 (int)vrr_params.state);
09aef2c4
MK
6454
6455 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6456}
6457
e854194c
MK
6458static void pre_update_freesync_state_on_stream(
6459 struct amdgpu_display_manager *dm,
6460 struct dm_crtc_state *new_crtc_state)
6461{
6462 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6463 struct mod_vrr_params vrr_params;
e854194c 6464 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6465 struct amdgpu_device *adev = dm->adev;
6466 unsigned long flags;
e854194c
MK
6467
6468 if (!new_stream)
6469 return;
6470
6471 /*
6472 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6473 * For now it's sufficient to just guard against these conditions.
6474 */
6475 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6476 return;
6477
09aef2c4
MK
6478 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6479 vrr_params = new_crtc_state->vrr_params;
6480
e854194c
MK
6481 if (new_crtc_state->vrr_supported &&
6482 config.min_refresh_in_uhz &&
6483 config.max_refresh_in_uhz) {
6484 config.state = new_crtc_state->base.vrr_enabled ?
6485 VRR_STATE_ACTIVE_VARIABLE :
6486 VRR_STATE_INACTIVE;
6487 } else {
6488 config.state = VRR_STATE_UNSUPPORTED;
6489 }
6490
6491 mod_freesync_build_vrr_params(dm->freesync_module,
6492 new_stream,
6493 &config, &vrr_params);
6494
6495 new_crtc_state->freesync_timing_changed |=
6496 (memcmp(&new_crtc_state->vrr_params.adjust,
6497 &vrr_params.adjust,
6498 sizeof(vrr_params.adjust)) != 0);
6499
6500 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6501 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6502}
6503
66b0c973
MK
6504static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6505 struct dm_crtc_state *new_state)
6506{
6507 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6508 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6509
6510 if (!old_vrr_active && new_vrr_active) {
6511 /* Transition VRR inactive -> active:
6512 * While VRR is active, we must not disable vblank irq, as a
6513 * reenable after disable would compute bogus vblank/pflip
6514 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6515 *
6516 * We also need vupdate irq for the actual core vblank handling
6517 * at end of vblank.
66b0c973 6518 */
d2574c33 6519 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6520 drm_crtc_vblank_get(new_state->base.crtc);
6521 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6522 __func__, new_state->base.crtc->base.id);
6523 } else if (old_vrr_active && !new_vrr_active) {
6524 /* Transition VRR active -> inactive:
6525 * Allow vblank irq disable again for fixed refresh rate.
6526 */
d2574c33 6527 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6528 drm_crtc_vblank_put(new_state->base.crtc);
6529 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6530 __func__, new_state->base.crtc->base.id);
6531 }
6532}
6533
8ad27806
NK
6534static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6535{
6536 struct drm_plane *plane;
6537 struct drm_plane_state *old_plane_state, *new_plane_state;
6538 int i;
6539
6540 /*
6541 * TODO: Make this per-stream so we don't issue redundant updates for
6542 * commits with multiple streams.
6543 */
6544 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6545 new_plane_state, i)
6546 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6547 handle_cursor_update(plane, old_plane_state);
6548}
6549
3be5262e 6550static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6551 struct dc_state *dc_state,
3ee6b26b
AD
6552 struct drm_device *dev,
6553 struct amdgpu_display_manager *dm,
6554 struct drm_crtc *pcrtc,
420cd472 6555 bool wait_for_vblank)
e7b07cee 6556{
570c91d5 6557 uint32_t i;
8a48b44c 6558 uint64_t timestamp_ns;
e7b07cee 6559 struct drm_plane *plane;
0bc9706d 6560 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6561 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6562 struct drm_crtc_state *new_pcrtc_state =
6563 drm_atomic_get_new_crtc_state(state, pcrtc);
6564 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6565 struct dm_crtc_state *dm_old_crtc_state =
6566 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6567 int planes_count = 0, vpos, hpos;
570c91d5 6568 long r;
e7b07cee 6569 unsigned long flags;
8a48b44c 6570 struct amdgpu_bo *abo;
09e5665a 6571 uint64_t tiling_flags;
fdd1fe57
MK
6572 uint32_t target_vblank, last_flip_vblank;
6573 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6574 bool pflip_present = false;
bc7f670e
DF
6575 struct {
6576 struct dc_surface_update surface_updates[MAX_SURFACES];
6577 struct dc_plane_info plane_infos[MAX_SURFACES];
6578 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6579 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6580 struct dc_stream_update stream_update;
74aa7bd4 6581 } *bundle;
bc7f670e 6582
74aa7bd4 6583 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6584
74aa7bd4
DF
6585 if (!bundle) {
6586 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6587 goto cleanup;
6588 }
e7b07cee 6589
8ad27806
NK
6590 /*
6591 * Disable the cursor first if we're disabling all the planes.
6592 * It'll remain on the screen after the planes are re-enabled
6593 * if we don't.
6594 */
6595 if (acrtc_state->active_planes == 0)
6596 amdgpu_dm_commit_cursors(state);
6597
e7b07cee 6598 /* update planes when needed */
0bc9706d
LSL
6599 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6600 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6601 struct drm_crtc_state *new_crtc_state;
0bc9706d 6602 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6603 bool plane_needs_flip;
c7af5f77 6604 struct dc_plane_state *dc_plane;
54d76575 6605 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6606
80c218d5
NK
6607 /* Cursor plane is handled after stream updates */
6608 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6609 continue;
e7b07cee 6610
f5ba60fe
DD
6611 if (!fb || !crtc || pcrtc != crtc)
6612 continue;
6613
6614 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6615 if (!new_crtc_state->active)
e7b07cee
HW
6616 continue;
6617
bc7f670e 6618 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6619
74aa7bd4 6620 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6621 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6622 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6623 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bc7f670e 6624 }
8a48b44c 6625
695af5f9
NK
6626 fill_dc_scaling_info(new_plane_state,
6627 &bundle->scaling_infos[planes_count]);
8a48b44c 6628
695af5f9
NK
6629 bundle->surface_updates[planes_count].scaling_info =
6630 &bundle->scaling_infos[planes_count];
8a48b44c 6631
f5031000 6632 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6633
f5031000 6634 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6635
f5031000
DF
6636 if (!plane_needs_flip) {
6637 planes_count += 1;
6638 continue;
6639 }
8a48b44c 6640
2fac0f53
CK
6641 abo = gem_to_amdgpu_bo(fb->obj[0]);
6642
f8308898
AG
6643 /*
6644 * Wait for all fences on this FB. Do limited wait to avoid
6645 * deadlock during GPU reset when this fence will not signal
6646 * but we hold reservation lock for the BO.
6647 */
52791eee 6648 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6649 false,
f8308898
AG
6650 msecs_to_jiffies(5000));
6651 if (unlikely(r <= 0))
ed8a5fb2 6652 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6653
f5031000
DF
6654 /*
6655 * TODO This might fail and hence better not used, wait
6656 * explicitly on fences instead
6657 * and in general should be called for
6658 * blocking commit to as per framework helpers
6659 */
f5031000 6660 r = amdgpu_bo_reserve(abo, true);
f8308898 6661 if (unlikely(r != 0))
f5031000 6662 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6663
f5031000 6664 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6665
f5031000 6666 amdgpu_bo_unreserve(abo);
8a48b44c 6667
695af5f9
NK
6668 fill_dc_plane_info_and_addr(
6669 dm->adev, new_plane_state, tiling_flags,
6670 &bundle->plane_infos[planes_count],
87b7ebc2
RS
6671 &bundle->flip_addrs[planes_count].address,
6672 false);
6673
6674 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6675 new_plane_state->plane->index,
6676 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
6677
6678 bundle->surface_updates[planes_count].plane_info =
6679 &bundle->plane_infos[planes_count];
8a48b44c 6680
caff0e66
NK
6681 /*
6682 * Only allow immediate flips for fast updates that don't
6683 * change FB pitch, DCC state, rotation or mirroing.
6684 */
f5031000 6685 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6686 crtc->state->async_flip &&
caff0e66 6687 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6688
f5031000
DF
6689 timestamp_ns = ktime_get_ns();
6690 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6691 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6692 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6693
f5031000
DF
6694 if (!bundle->surface_updates[planes_count].surface) {
6695 DRM_ERROR("No surface for CRTC: id=%d\n",
6696 acrtc_attach->crtc_id);
6697 continue;
bc7f670e
DF
6698 }
6699
f5031000
DF
6700 if (plane == pcrtc->primary)
6701 update_freesync_state_on_stream(
6702 dm,
6703 acrtc_state,
6704 acrtc_state->stream,
6705 dc_plane,
6706 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 6707
f5031000
DF
6708 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6709 __func__,
6710 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6711 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
6712
6713 planes_count += 1;
6714
8a48b44c
DF
6715 }
6716
74aa7bd4 6717 if (pflip_present) {
634092b1
MK
6718 if (!vrr_active) {
6719 /* Use old throttling in non-vrr fixed refresh rate mode
6720 * to keep flip scheduling based on target vblank counts
6721 * working in a backwards compatible way, e.g., for
6722 * clients using the GLX_OML_sync_control extension or
6723 * DRI3/Present extension with defined target_msc.
6724 */
e3eff4b5 6725 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
6726 }
6727 else {
6728 /* For variable refresh rate mode only:
6729 * Get vblank of last completed flip to avoid > 1 vrr
6730 * flips per video frame by use of throttling, but allow
6731 * flip programming anywhere in the possibly large
6732 * variable vrr vblank interval for fine-grained flip
6733 * timing control and more opportunity to avoid stutter
6734 * on late submission of flips.
6735 */
6736 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6737 last_flip_vblank = acrtc_attach->last_flip_vblank;
6738 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6739 }
6740
fdd1fe57 6741 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
6742
6743 /*
6744 * Wait until we're out of the vertical blank period before the one
6745 * targeted by the flip
6746 */
6747 while ((acrtc_attach->enabled &&
6748 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6749 0, &vpos, &hpos, NULL,
6750 NULL, &pcrtc->hwmode)
6751 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6752 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6753 (int)(target_vblank -
e3eff4b5 6754 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
6755 usleep_range(1000, 1100);
6756 }
6757
6758 if (acrtc_attach->base.state->event) {
6759 drm_crtc_vblank_get(pcrtc);
6760
6761 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6762
6763 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6764 prepare_flip_isr(acrtc_attach);
6765
6766 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6767 }
6768
6769 if (acrtc_state->stream) {
8a48b44c 6770 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 6771 bundle->stream_update.vrr_infopacket =
8a48b44c 6772 &acrtc_state->stream->vrr_infopacket;
e7b07cee 6773 }
e7b07cee
HW
6774 }
6775
bc92c065 6776 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
6777 if ((planes_count || acrtc_state->active_planes == 0) &&
6778 acrtc_state->stream) {
b6e881c9 6779 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 6780 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
6781 bundle->stream_update.src = acrtc_state->stream->src;
6782 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
6783 }
6784
cf020d49
NK
6785 if (new_pcrtc_state->color_mgmt_changed) {
6786 /*
6787 * TODO: This isn't fully correct since we've actually
6788 * already modified the stream in place.
6789 */
6790 bundle->stream_update.gamut_remap =
6791 &acrtc_state->stream->gamut_remap_matrix;
6792 bundle->stream_update.output_csc_transform =
6793 &acrtc_state->stream->csc_color_matrix;
6794 bundle->stream_update.out_transfer_func =
6795 acrtc_state->stream->out_transfer_func;
6796 }
bc7f670e 6797
8a48b44c 6798 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 6799 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 6800 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 6801
e63e2491
EB
6802 /*
6803 * If FreeSync state on the stream has changed then we need to
6804 * re-adjust the min/max bounds now that DC doesn't handle this
6805 * as part of commit.
6806 */
6807 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6808 amdgpu_dm_vrr_active(acrtc_state)) {
6809 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6810 dc_stream_adjust_vmin_vmax(
6811 dm->dc, acrtc_state->stream,
6812 &acrtc_state->vrr_params.adjust);
6813 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6814 }
bc7f670e 6815 mutex_lock(&dm->dc_lock);
8c322309
RL
6816 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6817 acrtc_state->stream->link->psr_allow_active)
6818 amdgpu_dm_psr_disable(acrtc_state->stream);
6819
bc7f670e 6820 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 6821 bundle->surface_updates,
bc7f670e
DF
6822 planes_count,
6823 acrtc_state->stream,
74aa7bd4 6824 &bundle->stream_update,
bc7f670e 6825 dc_state);
8c322309
RL
6826
6827 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6828 acrtc_state->stream->psr_version &&
6829 !acrtc_state->stream->link->psr_feature_enabled)
6830 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6831 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6832 acrtc_state->stream->link->psr_feature_enabled &&
3b58f22e 6833 !acrtc_state->stream->link->psr_allow_active) {
8c322309
RL
6834 amdgpu_dm_psr_enable(acrtc_state->stream);
6835 }
6836
bc7f670e 6837 mutex_unlock(&dm->dc_lock);
e7b07cee 6838 }
4b510503 6839
8ad27806
NK
6840 /*
6841 * Update cursor state *after* programming all the planes.
6842 * This avoids redundant programming in the case where we're going
6843 * to be disabling a single plane - those pipes are being disabled.
6844 */
6845 if (acrtc_state->active_planes)
6846 amdgpu_dm_commit_cursors(state);
80c218d5 6847
4b510503 6848cleanup:
74aa7bd4 6849 kfree(bundle);
e7b07cee
HW
6850}
6851
6ce8f316
NK
6852static void amdgpu_dm_commit_audio(struct drm_device *dev,
6853 struct drm_atomic_state *state)
6854{
6855 struct amdgpu_device *adev = dev->dev_private;
6856 struct amdgpu_dm_connector *aconnector;
6857 struct drm_connector *connector;
6858 struct drm_connector_state *old_con_state, *new_con_state;
6859 struct drm_crtc_state *new_crtc_state;
6860 struct dm_crtc_state *new_dm_crtc_state;
6861 const struct dc_stream_status *status;
6862 int i, inst;
6863
6864 /* Notify device removals. */
6865 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6866 if (old_con_state->crtc != new_con_state->crtc) {
6867 /* CRTC changes require notification. */
6868 goto notify;
6869 }
6870
6871 if (!new_con_state->crtc)
6872 continue;
6873
6874 new_crtc_state = drm_atomic_get_new_crtc_state(
6875 state, new_con_state->crtc);
6876
6877 if (!new_crtc_state)
6878 continue;
6879
6880 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6881 continue;
6882
6883 notify:
6884 aconnector = to_amdgpu_dm_connector(connector);
6885
6886 mutex_lock(&adev->dm.audio_lock);
6887 inst = aconnector->audio_inst;
6888 aconnector->audio_inst = -1;
6889 mutex_unlock(&adev->dm.audio_lock);
6890
6891 amdgpu_dm_audio_eld_notify(adev, inst);
6892 }
6893
6894 /* Notify audio device additions. */
6895 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6896 if (!new_con_state->crtc)
6897 continue;
6898
6899 new_crtc_state = drm_atomic_get_new_crtc_state(
6900 state, new_con_state->crtc);
6901
6902 if (!new_crtc_state)
6903 continue;
6904
6905 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6906 continue;
6907
6908 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6909 if (!new_dm_crtc_state->stream)
6910 continue;
6911
6912 status = dc_stream_get_status(new_dm_crtc_state->stream);
6913 if (!status)
6914 continue;
6915
6916 aconnector = to_amdgpu_dm_connector(connector);
6917
6918 mutex_lock(&adev->dm.audio_lock);
6919 inst = status->audio_inst;
6920 aconnector->audio_inst = inst;
6921 mutex_unlock(&adev->dm.audio_lock);
6922
6923 amdgpu_dm_audio_eld_notify(adev, inst);
6924 }
6925}
6926
b5e83f6f
NK
6927/*
6928 * Enable interrupts on CRTCs that are newly active, undergone
6929 * a modeset, or have active planes again.
6930 *
6931 * Done in two passes, based on the for_modeset flag:
6932 * Pass 1: For CRTCs going through modeset
6933 * Pass 2: For CRTCs going from 0 to n active planes
6934 *
6935 * Interrupts can only be enabled after the planes are programmed,
6936 * so this requires a two-pass approach since we don't want to
6937 * just defer the interrupts until after commit planes every time.
6938 */
6939static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6940 struct drm_atomic_state *state,
6941 bool for_modeset)
6942{
6943 struct amdgpu_device *adev = dev->dev_private;
6944 struct drm_crtc *crtc;
6945 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6946 int i;
148d31e3 6947#ifdef CONFIG_DEBUG_FS
14b25846 6948 enum amdgpu_dm_pipe_crc_source source;
148d31e3 6949#endif
b5e83f6f
NK
6950
6951 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6952 new_crtc_state, i) {
6953 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6954 struct dm_crtc_state *dm_new_crtc_state =
6955 to_dm_crtc_state(new_crtc_state);
6956 struct dm_crtc_state *dm_old_crtc_state =
6957 to_dm_crtc_state(old_crtc_state);
6958 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6959 bool run_pass;
6960
6961 run_pass = (for_modeset && modeset) ||
6962 (!for_modeset && !modeset &&
6963 !dm_old_crtc_state->interrupts_enabled);
6964
6965 if (!run_pass)
6966 continue;
6967
b5e83f6f
NK
6968 if (!dm_new_crtc_state->interrupts_enabled)
6969 continue;
6970
6971 manage_dm_interrupts(adev, acrtc, true);
6972
6973#ifdef CONFIG_DEBUG_FS
6974 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
6975 source = dm_new_crtc_state->crc_src;
6976 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
6977 amdgpu_dm_crtc_configure_crc_source(
6978 crtc, dm_new_crtc_state,
6979 dm_new_crtc_state->crc_src);
b5e83f6f
NK
6980 }
6981#endif
6982 }
6983}
6984
1f6010a9 6985/*
27b3f4fc
LSL
6986 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6987 * @crtc_state: the DRM CRTC state
6988 * @stream_state: the DC stream state.
6989 *
6990 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6991 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6992 */
6993static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6994 struct dc_stream_state *stream_state)
6995{
b9952f93 6996 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 6997}
e7b07cee 6998
7578ecda
AD
6999static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7000 struct drm_atomic_state *state,
7001 bool nonblock)
e7b07cee
HW
7002{
7003 struct drm_crtc *crtc;
c2cea706 7004 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7005 struct amdgpu_device *adev = dev->dev_private;
7006 int i;
7007
7008 /*
d6ef9b41
NK
7009 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7010 * a modeset, being disabled, or have no active planes.
7011 *
7012 * It's done in atomic commit rather than commit tail for now since
7013 * some of these interrupt handlers access the current CRTC state and
7014 * potentially the stream pointer itself.
7015 *
7016 * Since the atomic state is swapped within atomic commit and not within
7017 * commit tail this would leave to new state (that hasn't been committed yet)
7018 * being accesssed from within the handlers.
7019 *
7020 * TODO: Fix this so we can do this in commit tail and not have to block
7021 * in atomic check.
e7b07cee 7022 */
c2cea706 7023 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 7024 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 7025 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
7026 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7027
d6ef9b41
NK
7028 if (dm_old_crtc_state->interrupts_enabled &&
7029 (!dm_new_crtc_state->interrupts_enabled ||
57638021 7030 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7031 manage_dm_interrupts(adev, acrtc, false);
7032 }
1f6010a9
DF
7033 /*
7034 * Add check here for SoC's that support hardware cursor plane, to
7035 * unset legacy_cursor_update
7036 */
e7b07cee
HW
7037
7038 return drm_atomic_helper_commit(dev, state, nonblock);
7039
7040 /*TODO Handle EINTR, reenable IRQ*/
7041}
7042
b8592b48
LL
7043/**
7044 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7045 * @state: The atomic state to commit
7046 *
7047 * This will tell DC to commit the constructed DC state from atomic_check,
7048 * programming the hardware. Any failures here implies a hardware failure, since
7049 * atomic check should have filtered anything non-kosher.
7050 */
7578ecda 7051static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7052{
7053 struct drm_device *dev = state->dev;
7054 struct amdgpu_device *adev = dev->dev_private;
7055 struct amdgpu_display_manager *dm = &adev->dm;
7056 struct dm_atomic_state *dm_state;
eb3dc897 7057 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7058 uint32_t i, j;
5cc6dcbd 7059 struct drm_crtc *crtc;
0bc9706d 7060 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7061 unsigned long flags;
7062 bool wait_for_vblank = true;
7063 struct drm_connector *connector;
c2cea706 7064 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7065 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7066 int crtc_disable_count = 0;
e7b07cee
HW
7067
7068 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7069
eb3dc897
NK
7070 dm_state = dm_atomic_get_new_state(state);
7071 if (dm_state && dm_state->context) {
7072 dc_state = dm_state->context;
7073 } else {
7074 /* No state changes, retain current state. */
813d20dc 7075 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7076 ASSERT(dc_state_temp);
7077 dc_state = dc_state_temp;
7078 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7079 }
e7b07cee
HW
7080
7081 /* update changed items */
0bc9706d 7082 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7083 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7084
54d76575
LSL
7085 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7086 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7087
f1ad2f5e 7088 DRM_DEBUG_DRIVER(
e7b07cee
HW
7089 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7090 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7091 "connectors_changed:%d\n",
7092 acrtc->crtc_id,
0bc9706d
LSL
7093 new_crtc_state->enable,
7094 new_crtc_state->active,
7095 new_crtc_state->planes_changed,
7096 new_crtc_state->mode_changed,
7097 new_crtc_state->active_changed,
7098 new_crtc_state->connectors_changed);
e7b07cee 7099
27b3f4fc
LSL
7100 /* Copy all transient state flags into dc state */
7101 if (dm_new_crtc_state->stream) {
7102 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7103 dm_new_crtc_state->stream);
7104 }
7105
e7b07cee
HW
7106 /* handles headless hotplug case, updating new_state and
7107 * aconnector as needed
7108 */
7109
54d76575 7110 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7111
f1ad2f5e 7112 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7113
54d76575 7114 if (!dm_new_crtc_state->stream) {
e7b07cee 7115 /*
b830ebc9
HW
7116 * this could happen because of issues with
7117 * userspace notifications delivery.
7118 * In this case userspace tries to set mode on
1f6010a9
DF
7119 * display which is disconnected in fact.
7120 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7121 * We expect reset mode will come soon.
7122 *
7123 * This can also happen when unplug is done
7124 * during resume sequence ended
7125 *
7126 * In this case, we want to pretend we still
7127 * have a sink to keep the pipe running so that
7128 * hw state is consistent with the sw state
7129 */
f1ad2f5e 7130 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7131 __func__, acrtc->base.base.id);
7132 continue;
7133 }
7134
54d76575
LSL
7135 if (dm_old_crtc_state->stream)
7136 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7137
97028037
LP
7138 pm_runtime_get_noresume(dev->dev);
7139
e7b07cee 7140 acrtc->enabled = true;
0bc9706d
LSL
7141 acrtc->hw_mode = new_crtc_state->mode;
7142 crtc->hwmode = new_crtc_state->mode;
7143 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7144 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7145 /* i.e. reset mode */
8c322309
RL
7146 if (dm_old_crtc_state->stream) {
7147 if (dm_old_crtc_state->stream->link->psr_allow_active)
7148 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7149
54d76575 7150 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7151 }
e7b07cee
HW
7152 }
7153 } /* for_each_crtc_in_state() */
7154
eb3dc897
NK
7155 if (dc_state) {
7156 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7157 mutex_lock(&dm->dc_lock);
eb3dc897 7158 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7159 mutex_unlock(&dm->dc_lock);
fa2123db 7160 }
e7b07cee 7161
0bc9706d 7162 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7163 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7164
54d76575 7165 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7166
54d76575 7167 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7168 const struct dc_stream_status *status =
54d76575 7169 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7170
eb3dc897 7171 if (!status)
09f609c3
LL
7172 status = dc_stream_get_status_from_state(dc_state,
7173 dm_new_crtc_state->stream);
eb3dc897 7174
e7b07cee 7175 if (!status)
54d76575 7176 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7177 else
7178 acrtc->otg_inst = status->primary_otg_inst;
7179 }
7180 }
0c8620d6
BL
7181#ifdef CONFIG_DRM_AMD_DC_HDCP
7182 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7183 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7184 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7185 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7186
7187 new_crtc_state = NULL;
7188
7189 if (acrtc)
7190 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7191
7192 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7193
7194 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7195 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7196 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7197 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7198 continue;
7199 }
7200
7201 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7202 hdcp_update_display(
7203 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7204 new_con_state->hdcp_content_type,
b1abe558
BL
7205 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7206 : false);
0c8620d6
BL
7207 }
7208#endif
e7b07cee 7209
02d6a6fc 7210 /* Handle connector state changes */
c2cea706 7211 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7212 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7213 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7214 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7215 struct dc_surface_update dummy_updates[MAX_SURFACES];
7216 struct dc_stream_update stream_update;
b232d4ed 7217 struct dc_info_packet hdr_packet;
e7b07cee 7218 struct dc_stream_status *status = NULL;
b232d4ed 7219 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7220
19afd799
NC
7221 memset(&dummy_updates, 0, sizeof(dummy_updates));
7222 memset(&stream_update, 0, sizeof(stream_update));
7223
44d09c6a 7224 if (acrtc) {
0bc9706d 7225 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7226 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7227 }
0bc9706d 7228
e7b07cee 7229 /* Skip any modesets/resets */
0bc9706d 7230 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7231 continue;
7232
54d76575 7233 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7234 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7235
b232d4ed
NK
7236 scaling_changed = is_scaling_state_different(dm_new_con_state,
7237 dm_old_con_state);
7238
7239 abm_changed = dm_new_crtc_state->abm_level !=
7240 dm_old_crtc_state->abm_level;
7241
7242 hdr_changed =
7243 is_hdr_metadata_different(old_con_state, new_con_state);
7244
7245 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7246 continue;
e7b07cee 7247
b6e881c9 7248 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7249 if (scaling_changed) {
02d6a6fc 7250 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7251 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7252
02d6a6fc
DF
7253 stream_update.src = dm_new_crtc_state->stream->src;
7254 stream_update.dst = dm_new_crtc_state->stream->dst;
7255 }
7256
b232d4ed 7257 if (abm_changed) {
02d6a6fc
DF
7258 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7259
7260 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7261 }
70e8ffc5 7262
b232d4ed
NK
7263 if (hdr_changed) {
7264 fill_hdr_info_packet(new_con_state, &hdr_packet);
7265 stream_update.hdr_static_metadata = &hdr_packet;
7266 }
7267
54d76575 7268 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7269 WARN_ON(!status);
3be5262e 7270 WARN_ON(!status->plane_count);
e7b07cee 7271
02d6a6fc
DF
7272 /*
7273 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7274 * Here we create an empty update on each plane.
7275 * To fix this, DC should permit updating only stream properties.
7276 */
7277 for (j = 0; j < status->plane_count; j++)
7278 dummy_updates[j].surface = status->plane_states[0];
7279
7280
7281 mutex_lock(&dm->dc_lock);
7282 dc_commit_updates_for_stream(dm->dc,
7283 dummy_updates,
7284 status->plane_count,
7285 dm_new_crtc_state->stream,
7286 &stream_update,
7287 dc_state);
7288 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7289 }
7290
b5e83f6f 7291 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7292 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7293 new_crtc_state, i) {
fe2a1965
LP
7294 if (old_crtc_state->active && !new_crtc_state->active)
7295 crtc_disable_count++;
7296
54d76575 7297 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7298 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7299
057be086
NK
7300 /* Update freesync active state. */
7301 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7302
66b0c973
MK
7303 /* Handle vrr on->off / off->on transitions */
7304 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7305 dm_new_crtc_state);
e7b07cee
HW
7306 }
7307
b5e83f6f
NK
7308 /* Enable interrupts for CRTCs going through a modeset. */
7309 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7310
420cd472 7311 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7312 if (new_crtc_state->async_flip)
420cd472
DF
7313 wait_for_vblank = false;
7314
e7b07cee 7315 /* update planes when needed per crtc*/
5cc6dcbd 7316 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7317 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7318
54d76575 7319 if (dm_new_crtc_state->stream)
eb3dc897 7320 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7321 dm, crtc, wait_for_vblank);
e7b07cee
HW
7322 }
7323
b5e83f6f
NK
7324 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7325 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7326
6ce8f316
NK
7327 /* Update audio instances for each connector. */
7328 amdgpu_dm_commit_audio(dev, state);
7329
e7b07cee
HW
7330 /*
7331 * send vblank event on all events not handled in flip and
7332 * mark consumed event for drm_atomic_helper_commit_hw_done
7333 */
7334 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7335 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7336
0bc9706d
LSL
7337 if (new_crtc_state->event)
7338 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7339
0bc9706d 7340 new_crtc_state->event = NULL;
e7b07cee
HW
7341 }
7342 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7343
29c8f234
LL
7344 /* Signal HW programming completion */
7345 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7346
7347 if (wait_for_vblank)
320a1274 7348 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7349
7350 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7351
1f6010a9
DF
7352 /*
7353 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7354 * so we can put the GPU into runtime suspend if we're not driving any
7355 * displays anymore
7356 */
fe2a1965
LP
7357 for (i = 0; i < crtc_disable_count; i++)
7358 pm_runtime_put_autosuspend(dev->dev);
97028037 7359 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7360
7361 if (dc_state_temp)
7362 dc_release_state(dc_state_temp);
e7b07cee
HW
7363}
7364
7365
7366static int dm_force_atomic_commit(struct drm_connector *connector)
7367{
7368 int ret = 0;
7369 struct drm_device *ddev = connector->dev;
7370 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7371 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7372 struct drm_plane *plane = disconnected_acrtc->base.primary;
7373 struct drm_connector_state *conn_state;
7374 struct drm_crtc_state *crtc_state;
7375 struct drm_plane_state *plane_state;
7376
7377 if (!state)
7378 return -ENOMEM;
7379
7380 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7381
7382 /* Construct an atomic state to restore previous display setting */
7383
7384 /*
7385 * Attach connectors to drm_atomic_state
7386 */
7387 conn_state = drm_atomic_get_connector_state(state, connector);
7388
7389 ret = PTR_ERR_OR_ZERO(conn_state);
7390 if (ret)
7391 goto err;
7392
7393 /* Attach crtc to drm_atomic_state*/
7394 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7395
7396 ret = PTR_ERR_OR_ZERO(crtc_state);
7397 if (ret)
7398 goto err;
7399
7400 /* force a restore */
7401 crtc_state->mode_changed = true;
7402
7403 /* Attach plane to drm_atomic_state */
7404 plane_state = drm_atomic_get_plane_state(state, plane);
7405
7406 ret = PTR_ERR_OR_ZERO(plane_state);
7407 if (ret)
7408 goto err;
7409
7410
7411 /* Call commit internally with the state we just constructed */
7412 ret = drm_atomic_commit(state);
7413 if (!ret)
7414 return 0;
7415
7416err:
7417 DRM_ERROR("Restoring old state failed with %i\n", ret);
7418 drm_atomic_state_put(state);
7419
7420 return ret;
7421}
7422
7423/*
1f6010a9
DF
7424 * This function handles all cases when set mode does not come upon hotplug.
7425 * This includes when a display is unplugged then plugged back into the
7426 * same port and when running without usermode desktop manager supprot
e7b07cee 7427 */
3ee6b26b
AD
7428void dm_restore_drm_connector_state(struct drm_device *dev,
7429 struct drm_connector *connector)
e7b07cee 7430{
c84dec2f 7431 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7432 struct amdgpu_crtc *disconnected_acrtc;
7433 struct dm_crtc_state *acrtc_state;
7434
7435 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7436 return;
7437
7438 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7439 if (!disconnected_acrtc)
7440 return;
e7b07cee 7441
70e8ffc5
HW
7442 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7443 if (!acrtc_state->stream)
e7b07cee
HW
7444 return;
7445
7446 /*
7447 * If the previous sink is not released and different from the current,
7448 * we deduce we are in a state where we can not rely on usermode call
7449 * to turn on the display, so we do it here
7450 */
7451 if (acrtc_state->stream->sink != aconnector->dc_sink)
7452 dm_force_atomic_commit(&aconnector->base);
7453}
7454
1f6010a9 7455/*
e7b07cee
HW
7456 * Grabs all modesetting locks to serialize against any blocking commits,
7457 * Waits for completion of all non blocking commits.
7458 */
3ee6b26b
AD
7459static int do_aquire_global_lock(struct drm_device *dev,
7460 struct drm_atomic_state *state)
e7b07cee
HW
7461{
7462 struct drm_crtc *crtc;
7463 struct drm_crtc_commit *commit;
7464 long ret;
7465
1f6010a9
DF
7466 /*
7467 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7468 * ensure that when the framework release it the
7469 * extra locks we are locking here will get released to
7470 */
7471 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7472 if (ret)
7473 return ret;
7474
7475 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7476 spin_lock(&crtc->commit_lock);
7477 commit = list_first_entry_or_null(&crtc->commit_list,
7478 struct drm_crtc_commit, commit_entry);
7479 if (commit)
7480 drm_crtc_commit_get(commit);
7481 spin_unlock(&crtc->commit_lock);
7482
7483 if (!commit)
7484 continue;
7485
1f6010a9
DF
7486 /*
7487 * Make sure all pending HW programming completed and
e7b07cee
HW
7488 * page flips done
7489 */
7490 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7491
7492 if (ret > 0)
7493 ret = wait_for_completion_interruptible_timeout(
7494 &commit->flip_done, 10*HZ);
7495
7496 if (ret == 0)
7497 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7498 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7499
7500 drm_crtc_commit_put(commit);
7501 }
7502
7503 return ret < 0 ? ret : 0;
7504}
7505
bb47de73
NK
7506static void get_freesync_config_for_crtc(
7507 struct dm_crtc_state *new_crtc_state,
7508 struct dm_connector_state *new_con_state)
98e6436d
AK
7509{
7510 struct mod_freesync_config config = {0};
98e6436d
AK
7511 struct amdgpu_dm_connector *aconnector =
7512 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7513 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7514 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7515
a057ec46 7516 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7517 vrefresh >= aconnector->min_vfreq &&
7518 vrefresh <= aconnector->max_vfreq;
bb47de73 7519
a057ec46
IB
7520 if (new_crtc_state->vrr_supported) {
7521 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7522 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7523 VRR_STATE_ACTIVE_VARIABLE :
7524 VRR_STATE_INACTIVE;
7525 config.min_refresh_in_uhz =
7526 aconnector->min_vfreq * 1000000;
7527 config.max_refresh_in_uhz =
7528 aconnector->max_vfreq * 1000000;
69ff8845 7529 config.vsif_supported = true;
180db303 7530 config.btr = true;
98e6436d
AK
7531 }
7532
bb47de73
NK
7533 new_crtc_state->freesync_config = config;
7534}
98e6436d 7535
bb47de73
NK
7536static void reset_freesync_config_for_crtc(
7537 struct dm_crtc_state *new_crtc_state)
7538{
7539 new_crtc_state->vrr_supported = false;
98e6436d 7540
180db303
NK
7541 memset(&new_crtc_state->vrr_params, 0,
7542 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7543 memset(&new_crtc_state->vrr_infopacket, 0,
7544 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7545}
7546
4b9674e5
LL
7547static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7548 struct drm_atomic_state *state,
7549 struct drm_crtc *crtc,
7550 struct drm_crtc_state *old_crtc_state,
7551 struct drm_crtc_state *new_crtc_state,
7552 bool enable,
7553 bool *lock_and_validation_needed)
e7b07cee 7554{
eb3dc897 7555 struct dm_atomic_state *dm_state = NULL;
54d76575 7556 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7557 struct dc_stream_state *new_stream;
62f55537 7558 int ret = 0;
d4d4a645 7559
1f6010a9
DF
7560 /*
7561 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7562 * update changed items
7563 */
4b9674e5
LL
7564 struct amdgpu_crtc *acrtc = NULL;
7565 struct amdgpu_dm_connector *aconnector = NULL;
7566 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7567 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7568
4b9674e5 7569 new_stream = NULL;
9635b754 7570
4b9674e5
LL
7571 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7572 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7573 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7574 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7575
4b9674e5
LL
7576 /* TODO This hack should go away */
7577 if (aconnector && enable) {
7578 /* Make sure fake sink is created in plug-in scenario */
7579 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7580 &aconnector->base);
7581 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7582 &aconnector->base);
19f89e23 7583
4b9674e5
LL
7584 if (IS_ERR(drm_new_conn_state)) {
7585 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7586 goto fail;
7587 }
19f89e23 7588
4b9674e5
LL
7589 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7590 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7591
02d35a67
JFZ
7592 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7593 goto skip_modeset;
7594
4b9674e5
LL
7595 new_stream = create_stream_for_sink(aconnector,
7596 &new_crtc_state->mode,
7597 dm_new_conn_state,
7598 dm_old_crtc_state->stream);
19f89e23 7599
4b9674e5
LL
7600 /*
7601 * we can have no stream on ACTION_SET if a display
7602 * was disconnected during S3, in this case it is not an
7603 * error, the OS will be updated after detection, and
7604 * will do the right thing on next atomic commit
7605 */
19f89e23 7606
4b9674e5
LL
7607 if (!new_stream) {
7608 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7609 __func__, acrtc->base.base.id);
7610 ret = -ENOMEM;
7611 goto fail;
7612 }
e7b07cee 7613
4b9674e5 7614 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7615
88694af9
NK
7616 ret = fill_hdr_info_packet(drm_new_conn_state,
7617 &new_stream->hdr_static_metadata);
7618 if (ret)
7619 goto fail;
7620
7e930949
NK
7621 /*
7622 * If we already removed the old stream from the context
7623 * (and set the new stream to NULL) then we can't reuse
7624 * the old stream even if the stream and scaling are unchanged.
7625 * We'll hit the BUG_ON and black screen.
7626 *
7627 * TODO: Refactor this function to allow this check to work
7628 * in all conditions.
7629 */
7630 if (dm_new_crtc_state->stream &&
7631 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7632 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7633 new_crtc_state->mode_changed = false;
7634 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7635 new_crtc_state->mode_changed);
62f55537 7636 }
4b9674e5 7637 }
b830ebc9 7638
02d35a67 7639 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7640 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7641 goto skip_modeset;
e7b07cee 7642
4b9674e5
LL
7643 DRM_DEBUG_DRIVER(
7644 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7645 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7646 "connectors_changed:%d\n",
7647 acrtc->crtc_id,
7648 new_crtc_state->enable,
7649 new_crtc_state->active,
7650 new_crtc_state->planes_changed,
7651 new_crtc_state->mode_changed,
7652 new_crtc_state->active_changed,
7653 new_crtc_state->connectors_changed);
62f55537 7654
4b9674e5
LL
7655 /* Remove stream for any changed/disabled CRTC */
7656 if (!enable) {
62f55537 7657
4b9674e5
LL
7658 if (!dm_old_crtc_state->stream)
7659 goto skip_modeset;
eb3dc897 7660
4b9674e5
LL
7661 ret = dm_atomic_get_state(state, &dm_state);
7662 if (ret)
7663 goto fail;
e7b07cee 7664
4b9674e5
LL
7665 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7666 crtc->base.id);
62f55537 7667
4b9674e5
LL
7668 /* i.e. reset mode */
7669 if (dc_remove_stream_from_ctx(
7670 dm->dc,
7671 dm_state->context,
7672 dm_old_crtc_state->stream) != DC_OK) {
7673 ret = -EINVAL;
7674 goto fail;
7675 }
62f55537 7676
4b9674e5
LL
7677 dc_stream_release(dm_old_crtc_state->stream);
7678 dm_new_crtc_state->stream = NULL;
bb47de73 7679
4b9674e5 7680 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7681
4b9674e5 7682 *lock_and_validation_needed = true;
62f55537 7683
4b9674e5
LL
7684 } else {/* Add stream for any updated/enabled CRTC */
7685 /*
7686 * Quick fix to prevent NULL pointer on new_stream when
7687 * added MST connectors not found in existing crtc_state in the chained mode
7688 * TODO: need to dig out the root cause of that
7689 */
7690 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7691 goto skip_modeset;
62f55537 7692
4b9674e5
LL
7693 if (modereset_required(new_crtc_state))
7694 goto skip_modeset;
62f55537 7695
4b9674e5
LL
7696 if (modeset_required(new_crtc_state, new_stream,
7697 dm_old_crtc_state->stream)) {
62f55537 7698
4b9674e5 7699 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 7700
4b9674e5
LL
7701 ret = dm_atomic_get_state(state, &dm_state);
7702 if (ret)
7703 goto fail;
27b3f4fc 7704
4b9674e5 7705 dm_new_crtc_state->stream = new_stream;
62f55537 7706
4b9674e5 7707 dc_stream_retain(new_stream);
1dc90497 7708
4b9674e5
LL
7709 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7710 crtc->base.id);
1dc90497 7711
4b9674e5
LL
7712 if (dc_add_stream_to_ctx(
7713 dm->dc,
7714 dm_state->context,
7715 dm_new_crtc_state->stream) != DC_OK) {
7716 ret = -EINVAL;
7717 goto fail;
9b690ef3
BL
7718 }
7719
4b9674e5
LL
7720 *lock_and_validation_needed = true;
7721 }
7722 }
e277adc5 7723
4b9674e5
LL
7724skip_modeset:
7725 /* Release extra reference */
7726 if (new_stream)
7727 dc_stream_release(new_stream);
e277adc5 7728
4b9674e5
LL
7729 /*
7730 * We want to do dc stream updates that do not require a
7731 * full modeset below.
7732 */
7733 if (!(enable && aconnector && new_crtc_state->enable &&
7734 new_crtc_state->active))
7735 return 0;
7736 /*
7737 * Given above conditions, the dc state cannot be NULL because:
7738 * 1. We're in the process of enabling CRTCs (just been added
7739 * to the dc context, or already is on the context)
7740 * 2. Has a valid connector attached, and
7741 * 3. Is currently active and enabled.
7742 * => The dc stream state currently exists.
7743 */
7744 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 7745
4b9674e5
LL
7746 /* Scaling or underscan settings */
7747 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7748 update_stream_scaling_settings(
7749 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 7750
b05e2c5e
DF
7751 /* ABM settings */
7752 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7753
4b9674e5
LL
7754 /*
7755 * Color management settings. We also update color properties
7756 * when a modeset is needed, to ensure it gets reprogrammed.
7757 */
7758 if (dm_new_crtc_state->base.color_mgmt_changed ||
7759 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 7760 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
7761 if (ret)
7762 goto fail;
62f55537 7763 }
e7b07cee 7764
4b9674e5
LL
7765 /* Update Freesync settings. */
7766 get_freesync_config_for_crtc(dm_new_crtc_state,
7767 dm_new_conn_state);
7768
62f55537 7769 return ret;
9635b754
DS
7770
7771fail:
7772 if (new_stream)
7773 dc_stream_release(new_stream);
7774 return ret;
62f55537 7775}
9b690ef3 7776
f6ff2a08
NK
7777static bool should_reset_plane(struct drm_atomic_state *state,
7778 struct drm_plane *plane,
7779 struct drm_plane_state *old_plane_state,
7780 struct drm_plane_state *new_plane_state)
7781{
7782 struct drm_plane *other;
7783 struct drm_plane_state *old_other_state, *new_other_state;
7784 struct drm_crtc_state *new_crtc_state;
7785 int i;
7786
70a1efac
NK
7787 /*
7788 * TODO: Remove this hack once the checks below are sufficient
7789 * enough to determine when we need to reset all the planes on
7790 * the stream.
7791 */
7792 if (state->allow_modeset)
7793 return true;
7794
f6ff2a08
NK
7795 /* Exit early if we know that we're adding or removing the plane. */
7796 if (old_plane_state->crtc != new_plane_state->crtc)
7797 return true;
7798
7799 /* old crtc == new_crtc == NULL, plane not in context. */
7800 if (!new_plane_state->crtc)
7801 return false;
7802
7803 new_crtc_state =
7804 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7805
7806 if (!new_crtc_state)
7807 return true;
7808
7316c4ad
NK
7809 /* CRTC Degamma changes currently require us to recreate planes. */
7810 if (new_crtc_state->color_mgmt_changed)
7811 return true;
7812
f6ff2a08
NK
7813 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7814 return true;
7815
7816 /*
7817 * If there are any new primary or overlay planes being added or
7818 * removed then the z-order can potentially change. To ensure
7819 * correct z-order and pipe acquisition the current DC architecture
7820 * requires us to remove and recreate all existing planes.
7821 *
7822 * TODO: Come up with a more elegant solution for this.
7823 */
7824 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7825 if (other->type == DRM_PLANE_TYPE_CURSOR)
7826 continue;
7827
7828 if (old_other_state->crtc != new_plane_state->crtc &&
7829 new_other_state->crtc != new_plane_state->crtc)
7830 continue;
7831
7832 if (old_other_state->crtc != new_other_state->crtc)
7833 return true;
7834
7835 /* TODO: Remove this once we can handle fast format changes. */
7836 if (old_other_state->fb && new_other_state->fb &&
7837 old_other_state->fb->format != new_other_state->fb->format)
7838 return true;
7839 }
7840
7841 return false;
7842}
7843
9e869063
LL
7844static int dm_update_plane_state(struct dc *dc,
7845 struct drm_atomic_state *state,
7846 struct drm_plane *plane,
7847 struct drm_plane_state *old_plane_state,
7848 struct drm_plane_state *new_plane_state,
7849 bool enable,
7850 bool *lock_and_validation_needed)
62f55537 7851{
eb3dc897
NK
7852
7853 struct dm_atomic_state *dm_state = NULL;
62f55537 7854 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 7855 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 7856 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 7857 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 7858 struct amdgpu_crtc *new_acrtc;
f6ff2a08 7859 bool needs_reset;
62f55537 7860 int ret = 0;
e7b07cee 7861
9b690ef3 7862
9e869063
LL
7863 new_plane_crtc = new_plane_state->crtc;
7864 old_plane_crtc = old_plane_state->crtc;
7865 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7866 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 7867
626bf90f
SS
7868 /*TODO Implement better atomic check for cursor plane */
7869 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7870 if (!enable || !new_plane_crtc ||
7871 drm_atomic_plane_disabling(plane->state, new_plane_state))
7872 return 0;
7873
7874 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7875
7876 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7877 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7878 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7879 new_plane_state->crtc_w, new_plane_state->crtc_h);
7880 return -EINVAL;
7881 }
7882
7883 if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
7884 new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
7885 DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
7886 new_plane_state->crtc_x, new_plane_state->crtc_y);
7887 return -EINVAL;
7888 }
7889
9e869063 7890 return 0;
626bf90f 7891 }
9b690ef3 7892
f6ff2a08
NK
7893 needs_reset = should_reset_plane(state, plane, old_plane_state,
7894 new_plane_state);
7895
9e869063
LL
7896 /* Remove any changed/removed planes */
7897 if (!enable) {
f6ff2a08 7898 if (!needs_reset)
9e869063 7899 return 0;
a7b06724 7900
9e869063
LL
7901 if (!old_plane_crtc)
7902 return 0;
62f55537 7903
9e869063
LL
7904 old_crtc_state = drm_atomic_get_old_crtc_state(
7905 state, old_plane_crtc);
7906 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 7907
9e869063
LL
7908 if (!dm_old_crtc_state->stream)
7909 return 0;
62f55537 7910
9e869063
LL
7911 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7912 plane->base.id, old_plane_crtc->base.id);
9b690ef3 7913
9e869063
LL
7914 ret = dm_atomic_get_state(state, &dm_state);
7915 if (ret)
7916 return ret;
eb3dc897 7917
9e869063
LL
7918 if (!dc_remove_plane_from_context(
7919 dc,
7920 dm_old_crtc_state->stream,
7921 dm_old_plane_state->dc_state,
7922 dm_state->context)) {
62f55537 7923
9e869063
LL
7924 ret = EINVAL;
7925 return ret;
7926 }
e7b07cee 7927
9b690ef3 7928
9e869063
LL
7929 dc_plane_state_release(dm_old_plane_state->dc_state);
7930 dm_new_plane_state->dc_state = NULL;
1dc90497 7931
9e869063 7932 *lock_and_validation_needed = true;
1dc90497 7933
9e869063
LL
7934 } else { /* Add new planes */
7935 struct dc_plane_state *dc_new_plane_state;
1dc90497 7936
9e869063
LL
7937 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7938 return 0;
e7b07cee 7939
9e869063
LL
7940 if (!new_plane_crtc)
7941 return 0;
e7b07cee 7942
9e869063
LL
7943 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7944 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 7945
9e869063
LL
7946 if (!dm_new_crtc_state->stream)
7947 return 0;
62f55537 7948
f6ff2a08 7949 if (!needs_reset)
9e869063 7950 return 0;
62f55537 7951
9e869063 7952 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 7953
9e869063
LL
7954 dc_new_plane_state = dc_create_plane_state(dc);
7955 if (!dc_new_plane_state)
7956 return -ENOMEM;
62f55537 7957
9e869063
LL
7958 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7959 plane->base.id, new_plane_crtc->base.id);
8c45c5db 7960
695af5f9 7961 ret = fill_dc_plane_attributes(
9e869063
LL
7962 new_plane_crtc->dev->dev_private,
7963 dc_new_plane_state,
7964 new_plane_state,
7965 new_crtc_state);
7966 if (ret) {
7967 dc_plane_state_release(dc_new_plane_state);
7968 return ret;
7969 }
62f55537 7970
9e869063
LL
7971 ret = dm_atomic_get_state(state, &dm_state);
7972 if (ret) {
7973 dc_plane_state_release(dc_new_plane_state);
7974 return ret;
7975 }
eb3dc897 7976
9e869063
LL
7977 /*
7978 * Any atomic check errors that occur after this will
7979 * not need a release. The plane state will be attached
7980 * to the stream, and therefore part of the atomic
7981 * state. It'll be released when the atomic state is
7982 * cleaned.
7983 */
7984 if (!dc_add_plane_to_context(
7985 dc,
7986 dm_new_crtc_state->stream,
7987 dc_new_plane_state,
7988 dm_state->context)) {
62f55537 7989
9e869063
LL
7990 dc_plane_state_release(dc_new_plane_state);
7991 return -EINVAL;
7992 }
8c45c5db 7993
9e869063 7994 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 7995
9e869063
LL
7996 /* Tell DC to do a full surface update every time there
7997 * is a plane change. Inefficient, but works for now.
7998 */
7999 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8000
8001 *lock_and_validation_needed = true;
62f55537 8002 }
e7b07cee
HW
8003
8004
62f55537
AG
8005 return ret;
8006}
a87fa993 8007
eb3dc897 8008static int
f843b308 8009dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
8010 struct drm_atomic_state *state,
8011 enum surface_update_type *out_type)
8012{
f843b308 8013 struct dc *dc = dm->dc;
eb3dc897
NK
8014 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8015 int i, j, num_plane, ret = 0;
a87fa993
BL
8016 struct drm_plane_state *old_plane_state, *new_plane_state;
8017 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 8018 struct drm_crtc *new_plane_crtc;
a87fa993
BL
8019 struct drm_plane *plane;
8020
8021 struct drm_crtc *crtc;
8022 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8023 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8024 struct dc_stream_status *status = NULL;
a87fa993 8025 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
8026 struct surface_info_bundle {
8027 struct dc_surface_update surface_updates[MAX_SURFACES];
8028 struct dc_plane_info plane_infos[MAX_SURFACES];
8029 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8030 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8031 struct dc_stream_update stream_update;
8032 } *bundle;
a87fa993 8033
7527791e 8034 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 8035
7527791e
RL
8036 if (!bundle) {
8037 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
8038 /* Set type to FULL to avoid crashing in DC*/
8039 update_type = UPDATE_TYPE_FULL;
eb3dc897 8040 goto cleanup;
4f712911 8041 }
a87fa993
BL
8042
8043 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 8044
7527791e 8045 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 8046
a87fa993
BL
8047 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8048 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8049 num_plane = 0;
8050
6836d239
NK
8051 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8052 update_type = UPDATE_TYPE_FULL;
8053 goto cleanup;
8054 }
a87fa993 8055
6836d239 8056 if (!new_dm_crtc_state->stream)
c744e974 8057 continue;
eb3dc897 8058
c744e974 8059 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8060 const struct amdgpu_framebuffer *amdgpu_fb =
8061 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8062 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8063 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8064 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce
NK
8065 uint64_t tiling_flags;
8066
c744e974 8067 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8068 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8069 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8070
c744e974
NK
8071 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8072 continue;
eb3dc897 8073
6836d239
NK
8074 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8075 update_type = UPDATE_TYPE_FULL;
8076 goto cleanup;
8077 }
8078
c744e974
NK
8079 if (crtc != new_plane_crtc)
8080 continue;
8081
7527791e
RL
8082 bundle->surface_updates[num_plane].surface =
8083 new_dm_plane_state->dc_state;
c744e974
NK
8084
8085 if (new_crtc_state->mode_changed) {
7527791e
RL
8086 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8087 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8088 }
8089
8090 if (new_crtc_state->color_mgmt_changed) {
7527791e 8091 bundle->surface_updates[num_plane].gamma =
c744e974 8092 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8093 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8094 new_dm_plane_state->dc_state->in_transfer_func;
7527791e 8095 bundle->stream_update.gamut_remap =
c744e974 8096 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8097 bundle->stream_update.output_csc_transform =
cf020d49 8098 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8099 bundle->stream_update.out_transfer_func =
c744e974 8100 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8101 }
8102
004b3938 8103 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8104 scaling_info);
004b3938
NK
8105 if (ret)
8106 goto cleanup;
8107
7527791e 8108 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8109
2cc450ce
NK
8110 if (amdgpu_fb) {
8111 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8112 if (ret)
8113 goto cleanup;
8114
2cc450ce
NK
8115 ret = fill_dc_plane_info_and_addr(
8116 dm->adev, new_plane_state, tiling_flags,
7527791e 8117 plane_info,
87b7ebc2
RS
8118 &flip_addr->address,
8119 false);
2cc450ce
NK
8120 if (ret)
8121 goto cleanup;
8122
7527791e
RL
8123 bundle->surface_updates[num_plane].plane_info = plane_info;
8124 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8125 }
8126
c744e974
NK
8127 num_plane++;
8128 }
8129
8130 if (num_plane == 0)
8131 continue;
8132
8133 ret = dm_atomic_get_state(state, &dm_state);
8134 if (ret)
8135 goto cleanup;
8136
8137 old_dm_state = dm_atomic_get_old_state(state);
8138 if (!old_dm_state) {
8139 ret = -EINVAL;
8140 goto cleanup;
8141 }
8142
8143 status = dc_stream_get_status_from_state(old_dm_state->context,
8144 new_dm_crtc_state->stream);
7527791e 8145 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8146 /*
8147 * TODO: DC modifies the surface during this call so we need
8148 * to lock here - find a way to do this without locking.
8149 */
8150 mutex_lock(&dm->dc_lock);
7527791e
RL
8151 update_type = dc_check_update_surfaces_for_stream(
8152 dc, bundle->surface_updates, num_plane,
8153 &bundle->stream_update, status);
f843b308 8154 mutex_unlock(&dm->dc_lock);
c744e974
NK
8155
8156 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8157 update_type = UPDATE_TYPE_FULL;
eb3dc897 8158 goto cleanup;
a87fa993
BL
8159 }
8160 }
8161
eb3dc897 8162cleanup:
7527791e 8163 kfree(bundle);
a87fa993 8164
eb3dc897
NK
8165 *out_type = update_type;
8166 return ret;
a87fa993 8167}
62f55537 8168
44be939f
ML
8169static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8170{
8171 struct drm_connector *connector;
8172 struct drm_connector_state *conn_state;
8173 struct amdgpu_dm_connector *aconnector = NULL;
8174 int i;
8175 for_each_new_connector_in_state(state, connector, conn_state, i) {
8176 if (conn_state->crtc != crtc)
8177 continue;
8178
8179 aconnector = to_amdgpu_dm_connector(connector);
8180 if (!aconnector->port || !aconnector->mst_port)
8181 aconnector = NULL;
8182 else
8183 break;
8184 }
8185
8186 if (!aconnector)
8187 return 0;
8188
8189 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8190}
8191
b8592b48
LL
8192/**
8193 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8194 * @dev: The DRM device
8195 * @state: The atomic state to commit
8196 *
8197 * Validate that the given atomic state is programmable by DC into hardware.
8198 * This involves constructing a &struct dc_state reflecting the new hardware
8199 * state we wish to commit, then querying DC to see if it is programmable. It's
8200 * important not to modify the existing DC state. Otherwise, atomic_check
8201 * may unexpectedly commit hardware changes.
8202 *
8203 * When validating the DC state, it's important that the right locks are
8204 * acquired. For full updates case which removes/adds/updates streams on one
8205 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8206 * that any such full update commit will wait for completion of any outstanding
8207 * flip using DRMs synchronization events. See
8208 * dm_determine_update_type_for_commit()
8209 *
8210 * Note that DM adds the affected connectors for all CRTCs in state, when that
8211 * might not seem necessary. This is because DC stream creation requires the
8212 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8213 * be possible but non-trivial - a possible TODO item.
8214 *
8215 * Return: -Error code if validation failed.
8216 */
7578ecda
AD
8217static int amdgpu_dm_atomic_check(struct drm_device *dev,
8218 struct drm_atomic_state *state)
62f55537 8219{
62f55537 8220 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8221 struct dm_atomic_state *dm_state = NULL;
62f55537 8222 struct dc *dc = adev->dm.dc;
62f55537 8223 struct drm_connector *connector;
c2cea706 8224 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8225 struct drm_crtc *crtc;
fc9e9920 8226 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8227 struct drm_plane *plane;
8228 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8229 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8230 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8231
1e88ad0a 8232 int ret, i;
e7b07cee 8233
62f55537
AG
8234 /*
8235 * This bool will be set for true for any modeset/reset
8236 * or plane update which implies non fast surface update.
8237 */
8238 bool lock_and_validation_needed = false;
8239
8240 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8241 if (ret)
8242 goto fail;
62f55537 8243
44be939f
ML
8244 if (adev->asic_type >= CHIP_NAVI10) {
8245 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8246 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8247 ret = add_affected_mst_dsc_crtcs(state, crtc);
8248 if (ret)
8249 goto fail;
8250 }
8251 }
8252 }
8253
1e88ad0a
S
8254 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8255 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8256 !new_crtc_state->color_mgmt_changed &&
a93587b3 8257 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8258 continue;
7bef1af3 8259
1e88ad0a
S
8260 if (!new_crtc_state->enable)
8261 continue;
fc9e9920 8262
1e88ad0a
S
8263 ret = drm_atomic_add_affected_connectors(state, crtc);
8264 if (ret)
8265 return ret;
fc9e9920 8266
1e88ad0a
S
8267 ret = drm_atomic_add_affected_planes(state, crtc);
8268 if (ret)
8269 goto fail;
e7b07cee
HW
8270 }
8271
2d9e6431
NK
8272 /*
8273 * Add all primary and overlay planes on the CRTC to the state
8274 * whenever a plane is enabled to maintain correct z-ordering
8275 * and to enable fast surface updates.
8276 */
8277 drm_for_each_crtc(crtc, dev) {
8278 bool modified = false;
8279
8280 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8281 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8282 continue;
8283
8284 if (new_plane_state->crtc == crtc ||
8285 old_plane_state->crtc == crtc) {
8286 modified = true;
8287 break;
8288 }
8289 }
8290
8291 if (!modified)
8292 continue;
8293
8294 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8295 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8296 continue;
8297
8298 new_plane_state =
8299 drm_atomic_get_plane_state(state, plane);
8300
8301 if (IS_ERR(new_plane_state)) {
8302 ret = PTR_ERR(new_plane_state);
8303 goto fail;
8304 }
8305 }
8306 }
8307
62f55537 8308 /* Remove exiting planes if they are modified */
9e869063
LL
8309 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8310 ret = dm_update_plane_state(dc, state, plane,
8311 old_plane_state,
8312 new_plane_state,
8313 false,
8314 &lock_and_validation_needed);
8315 if (ret)
8316 goto fail;
62f55537
AG
8317 }
8318
8319 /* Disable all crtcs which require disable */
4b9674e5
LL
8320 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8321 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8322 old_crtc_state,
8323 new_crtc_state,
8324 false,
8325 &lock_and_validation_needed);
8326 if (ret)
8327 goto fail;
62f55537
AG
8328 }
8329
8330 /* Enable all crtcs which require enable */
4b9674e5
LL
8331 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8332 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8333 old_crtc_state,
8334 new_crtc_state,
8335 true,
8336 &lock_and_validation_needed);
8337 if (ret)
8338 goto fail;
62f55537
AG
8339 }
8340
8341 /* Add new/modified planes */
9e869063
LL
8342 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8343 ret = dm_update_plane_state(dc, state, plane,
8344 old_plane_state,
8345 new_plane_state,
8346 true,
8347 &lock_and_validation_needed);
8348 if (ret)
8349 goto fail;
62f55537
AG
8350 }
8351
b349f76e
ES
8352 /* Run this here since we want to validate the streams we created */
8353 ret = drm_atomic_helper_check_planes(dev, state);
8354 if (ret)
8355 goto fail;
62f55537 8356
43d10d30
NK
8357 if (state->legacy_cursor_update) {
8358 /*
8359 * This is a fast cursor update coming from the plane update
8360 * helper, check if it can be done asynchronously for better
8361 * performance.
8362 */
8363 state->async_update =
8364 !drm_atomic_helper_async_check(dev, state);
8365
8366 /*
8367 * Skip the remaining global validation if this is an async
8368 * update. Cursor updates can be done without affecting
8369 * state or bandwidth calcs and this avoids the performance
8370 * penalty of locking the private state object and
8371 * allocating a new dc_state.
8372 */
8373 if (state->async_update)
8374 return 0;
8375 }
8376
ebdd27e1 8377 /* Check scaling and underscan changes*/
1f6010a9 8378 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8379 * new stream into context w\o causing full reset. Need to
8380 * decide how to handle.
8381 */
c2cea706 8382 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8383 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8384 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8385 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8386
8387 /* Skip any modesets/resets */
0bc9706d
LSL
8388 if (!acrtc || drm_atomic_crtc_needs_modeset(
8389 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8390 continue;
8391
b830ebc9 8392 /* Skip any thing not scale or underscan changes */
54d76575 8393 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8394 continue;
8395
a87fa993 8396 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8397 lock_and_validation_needed = true;
8398 }
8399
f843b308 8400 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8401 if (ret)
8402 goto fail;
a87fa993
BL
8403
8404 if (overall_update_type < update_type)
8405 overall_update_type = update_type;
8406
8407 /*
8408 * lock_and_validation_needed was an old way to determine if we need to set
8409 * the global lock. Leaving it in to check if we broke any corner cases
8410 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8411 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8412 */
8413 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8414 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8415
a87fa993 8416 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8417 ret = dm_atomic_get_state(state, &dm_state);
8418 if (ret)
8419 goto fail;
e7b07cee
HW
8420
8421 ret = do_aquire_global_lock(dev, state);
8422 if (ret)
8423 goto fail;
1dc90497 8424
d9fe1a4c 8425#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8426 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8427 goto fail;
8428
29b9ba74
ML
8429 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8430 if (ret)
8431 goto fail;
d9fe1a4c 8432#endif
29b9ba74 8433
ded58c7b
ZL
8434 /*
8435 * Perform validation of MST topology in the state:
8436 * We need to perform MST atomic check before calling
8437 * dc_validate_global_state(), or there is a chance
8438 * to get stuck in an infinite loop and hang eventually.
8439 */
8440 ret = drm_dp_mst_atomic_check(state);
8441 if (ret)
8442 goto fail;
8443
afcd526b 8444 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
e7b07cee
HW
8445 ret = -EINVAL;
8446 goto fail;
8447 }
bd200d19 8448 } else {
674e78ac 8449 /*
bd200d19
NK
8450 * The commit is a fast update. Fast updates shouldn't change
8451 * the DC context, affect global validation, and can have their
8452 * commit work done in parallel with other commits not touching
8453 * the same resource. If we have a new DC context as part of
8454 * the DM atomic state from validation we need to free it and
8455 * retain the existing one instead.
674e78ac 8456 */
bd200d19
NK
8457 struct dm_atomic_state *new_dm_state, *old_dm_state;
8458
8459 new_dm_state = dm_atomic_get_new_state(state);
8460 old_dm_state = dm_atomic_get_old_state(state);
8461
8462 if (new_dm_state && old_dm_state) {
8463 if (new_dm_state->context)
8464 dc_release_state(new_dm_state->context);
8465
8466 new_dm_state->context = old_dm_state->context;
8467
8468 if (old_dm_state->context)
8469 dc_retain_state(old_dm_state->context);
8470 }
e7b07cee
HW
8471 }
8472
caff0e66
NK
8473 /* Store the overall update type for use later in atomic check. */
8474 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8475 struct dm_crtc_state *dm_new_crtc_state =
8476 to_dm_crtc_state(new_crtc_state);
8477
8478 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8479 }
8480
8481 /* Must be success */
8482 WARN_ON(ret);
8483 return ret;
8484
8485fail:
8486 if (ret == -EDEADLK)
01e28f9c 8487 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8488 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8489 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8490 else
01e28f9c 8491 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8492
8493 return ret;
8494}
8495
3ee6b26b
AD
8496static bool is_dp_capable_without_timing_msa(struct dc *dc,
8497 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8498{
8499 uint8_t dpcd_data;
8500 bool capable = false;
8501
c84dec2f 8502 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8503 dm_helpers_dp_read_dpcd(
8504 NULL,
c84dec2f 8505 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8506 DP_DOWN_STREAM_PORT_COUNT,
8507 &dpcd_data,
8508 sizeof(dpcd_data))) {
8509 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8510 }
8511
8512 return capable;
8513}
98e6436d
AK
8514void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8515 struct edid *edid)
e7b07cee
HW
8516{
8517 int i;
e7b07cee
HW
8518 bool edid_check_required;
8519 struct detailed_timing *timing;
8520 struct detailed_non_pixel *data;
8521 struct detailed_data_monitor_range *range;
c84dec2f
HW
8522 struct amdgpu_dm_connector *amdgpu_dm_connector =
8523 to_amdgpu_dm_connector(connector);
bb47de73 8524 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8525
8526 struct drm_device *dev = connector->dev;
8527 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8528 bool freesync_capable = false;
b830ebc9 8529
8218d7f1
HW
8530 if (!connector->state) {
8531 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8532 goto update;
8218d7f1
HW
8533 }
8534
98e6436d
AK
8535 if (!edid) {
8536 dm_con_state = to_dm_connector_state(connector->state);
8537
8538 amdgpu_dm_connector->min_vfreq = 0;
8539 amdgpu_dm_connector->max_vfreq = 0;
8540 amdgpu_dm_connector->pixel_clock_mhz = 0;
8541
bb47de73 8542 goto update;
98e6436d
AK
8543 }
8544
8218d7f1
HW
8545 dm_con_state = to_dm_connector_state(connector->state);
8546
e7b07cee 8547 edid_check_required = false;
c84dec2f 8548 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8549 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8550 goto update;
e7b07cee
HW
8551 }
8552 if (!adev->dm.freesync_module)
bb47de73 8553 goto update;
e7b07cee
HW
8554 /*
8555 * if edid non zero restrict freesync only for dp and edp
8556 */
8557 if (edid) {
c84dec2f
HW
8558 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8559 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8560 edid_check_required = is_dp_capable_without_timing_msa(
8561 adev->dm.dc,
c84dec2f 8562 amdgpu_dm_connector);
e7b07cee
HW
8563 }
8564 }
e7b07cee
HW
8565 if (edid_check_required == true && (edid->version > 1 ||
8566 (edid->version == 1 && edid->revision > 1))) {
8567 for (i = 0; i < 4; i++) {
8568
8569 timing = &edid->detailed_timings[i];
8570 data = &timing->data.other_data;
8571 range = &data->data.range;
8572 /*
8573 * Check if monitor has continuous frequency mode
8574 */
8575 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8576 continue;
8577 /*
8578 * Check for flag range limits only. If flag == 1 then
8579 * no additional timing information provided.
8580 * Default GTF, GTF Secondary curve and CVT are not
8581 * supported
8582 */
8583 if (range->flags != 1)
8584 continue;
8585
c84dec2f
HW
8586 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8587 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8588 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8589 range->pixel_clock_mhz * 10;
8590 break;
8591 }
8592
c84dec2f 8593 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8594 amdgpu_dm_connector->min_vfreq > 10) {
8595
bb47de73 8596 freesync_capable = true;
e7b07cee
HW
8597 }
8598 }
bb47de73
NK
8599
8600update:
8601 if (dm_con_state)
8602 dm_con_state->freesync_capable = freesync_capable;
8603
8604 if (connector->vrr_capable_property)
8605 drm_connector_set_vrr_capable_property(connector,
8606 freesync_capable);
e7b07cee
HW
8607}
8608
8c322309
RL
8609static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8610{
8611 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8612
8613 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8614 return;
8615 if (link->type == dc_connection_none)
8616 return;
8617 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8618 dpcd_data, sizeof(dpcd_data))) {
8619 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8620 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8621 }
8622}
8623
8624/*
8625 * amdgpu_dm_link_setup_psr() - configure psr link
8626 * @stream: stream state
8627 *
8628 * Return: true if success
8629 */
8630static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8631{
8632 struct dc_link *link = NULL;
8633 struct psr_config psr_config = {0};
8634 struct psr_context psr_context = {0};
8635 struct dc *dc = NULL;
8636 bool ret = false;
8637
8638 if (stream == NULL)
8639 return false;
8640
8641 link = stream->link;
8642 dc = link->ctx->dc;
8643
8644 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8645
8646 if (psr_config.psr_version > 0) {
8647 psr_config.psr_exit_link_training_required = 0x1;
8648 psr_config.psr_frame_capture_indication_req = 0;
8649 psr_config.psr_rfb_setup_time = 0x37;
8650 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8651 psr_config.allow_smu_optimizations = 0x0;
8652
8653 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8654
8655 }
8656 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8657
8658 return ret;
8659}
8660
8661/*
8662 * amdgpu_dm_psr_enable() - enable psr f/w
8663 * @stream: stream state
8664 *
8665 * Return: true if success
8666 */
8667bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8668{
8669 struct dc_link *link = stream->link;
5b5abe95
AK
8670 unsigned int vsync_rate_hz = 0;
8671 struct dc_static_screen_params params = {0};
8672 /* Calculate number of static frames before generating interrupt to
8673 * enter PSR.
8674 */
5b5abe95
AK
8675 // Init fail safe of 2 frames static
8676 unsigned int num_frames_static = 2;
8c322309
RL
8677
8678 DRM_DEBUG_DRIVER("Enabling psr...\n");
8679
5b5abe95
AK
8680 vsync_rate_hz = div64_u64(div64_u64((
8681 stream->timing.pix_clk_100hz * 100),
8682 stream->timing.v_total),
8683 stream->timing.h_total);
8684
8685 /* Round up
8686 * Calculate number of frames such that at least 30 ms of time has
8687 * passed.
8688 */
7aa62404
RL
8689 if (vsync_rate_hz != 0) {
8690 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 8691 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 8692 }
5b5abe95
AK
8693
8694 params.triggers.cursor_update = true;
8695 params.triggers.overlay_update = true;
8696 params.triggers.surface_update = true;
8697 params.num_frames = num_frames_static;
8c322309 8698
5b5abe95 8699 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 8700 &stream, 1,
5b5abe95 8701 &params);
8c322309
RL
8702
8703 return dc_link_set_psr_allow_active(link, true, false);
8704}
8705
8706/*
8707 * amdgpu_dm_psr_disable() - disable psr f/w
8708 * @stream: stream state
8709 *
8710 * Return: true if success
8711 */
8712static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8713{
8714
8715 DRM_DEBUG_DRIVER("Disabling psr...\n");
8716
8717 return dc_link_set_psr_allow_active(stream->link, false, true);
8718}