2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/string.h>
27 #include <linux/acpi.h>
28 #include <linux/i2c.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_probe_helper.h>
32 #include <drm/amdgpu_drm.h>
33 #include <drm/drm_edid.h>
35 #include "dm_services.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_dm_irq.h"
40 #include "amdgpu_dm_mst_types.h"
41 #include "dpcd_defs.h"
42 #include "dc/inc/core_types.h"
44 #include "dm_helpers.h"
45 #include "ddc_service_types.h"
47 static u32
edid_extract_panel_id(struct edid
*edid
)
49 return (u32
)edid
->mfg_id
[0] << 24 |
50 (u32
)edid
->mfg_id
[1] << 16 |
51 (u32
)EDID_PRODUCT_ID(edid
);
54 static void apply_edid_quirks(struct edid
*edid
, struct dc_edid_caps
*edid_caps
)
56 uint32_t panel_id
= edid_extract_panel_id(edid
);
59 /* Workaround for some monitors which does not work well with FAMS */
60 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
61 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
62 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
63 DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id
);
64 edid_caps
->panel_patch
.disable_fams
= true;
72 * dm_helpers_parse_edid_caps() - Parse edid caps
74 * @link: current detected link
75 * @edid: [in] pointer to edid
76 * @edid_caps: [in] pointer to edid caps
80 enum dc_edid_status
dm_helpers_parse_edid_caps(
82 const struct dc_edid
*edid
,
83 struct dc_edid_caps
*edid_caps
)
85 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
86 struct drm_connector
*connector
= &aconnector
->base
;
87 struct edid
*edid_buf
= edid
? (struct edid
*) edid
->raw_edid
: NULL
;
94 enum dc_edid_status result
= EDID_OK
;
96 if (!edid_caps
|| !edid
)
97 return EDID_BAD_INPUT
;
99 if (!drm_edid_is_valid(edid_buf
))
100 result
= EDID_BAD_CHECKSUM
;
102 edid_caps
->manufacturer_id
= (uint16_t) edid_buf
->mfg_id
[0] |
103 ((uint16_t) edid_buf
->mfg_id
[1])<<8;
104 edid_caps
->product_id
= (uint16_t) edid_buf
->prod_code
[0] |
105 ((uint16_t) edid_buf
->prod_code
[1])<<8;
106 edid_caps
->serial_number
= edid_buf
->serial
;
107 edid_caps
->manufacture_week
= edid_buf
->mfg_week
;
108 edid_caps
->manufacture_year
= edid_buf
->mfg_year
;
110 drm_edid_get_monitor_name(edid_buf
,
111 edid_caps
->display_name
,
112 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
114 edid_caps
->edid_hdmi
= connector
->display_info
.is_hdmi
;
116 sad_count
= drm_edid_to_sad((struct edid
*) edid
->raw_edid
, &sads
);
120 edid_caps
->audio_mode_count
= min(sad_count
, DC_MAX_AUDIO_DESC_COUNT
);
121 for (i
= 0; i
< edid_caps
->audio_mode_count
; ++i
) {
122 struct cea_sad
*sad
= &sads
[i
];
124 edid_caps
->audio_modes
[i
].format_code
= sad
->format
;
125 edid_caps
->audio_modes
[i
].channel_count
= sad
->channels
+ 1;
126 edid_caps
->audio_modes
[i
].sample_rate
= sad
->freq
;
127 edid_caps
->audio_modes
[i
].sample_size
= sad
->byte2
;
130 sadb_count
= drm_edid_to_speaker_allocation((struct edid
*) edid
->raw_edid
, &sadb
);
132 if (sadb_count
< 0) {
133 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count
);
138 edid_caps
->speaker_flags
= sadb
[0];
140 edid_caps
->speaker_flags
= DEFAULT_SPEAKER_LOCATION
;
142 apply_edid_quirks(edid_buf
, edid_caps
);
151 fill_dc_mst_payload_table_from_drm(struct dc_link
*link
,
153 struct drm_dp_mst_atomic_payload
*target_payload
,
154 struct dc_dp_mst_stream_allocation_table
*table
)
156 struct dc_dp_mst_stream_allocation_table new_table
= { 0 };
157 struct dc_dp_mst_stream_allocation
*sa
;
158 struct link_mst_stream_allocation_table copy_of_link_table
=
159 link
->mst_stream_alloc_table
;
162 int current_hw_table_stream_cnt
= copy_of_link_table
.stream_count
;
163 struct link_mst_stream_allocation
*dc_alloc
;
165 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
168 ©_of_link_table
.stream_allocations
[current_hw_table_stream_cnt
];
169 dc_alloc
->vcp_id
= target_payload
->vcpi
;
170 dc_alloc
->slot_count
= target_payload
->time_slots
;
172 for (i
= 0; i
< copy_of_link_table
.stream_count
; i
++) {
174 ©_of_link_table
.stream_allocations
[i
];
176 if (dc_alloc
->vcp_id
== target_payload
->vcpi
) {
177 dc_alloc
->vcp_id
= 0;
178 dc_alloc
->slot_count
= 0;
182 ASSERT(i
!= copy_of_link_table
.stream_count
);
185 /* Fill payload info*/
186 for (i
= 0; i
< MAX_CONTROLLER_NUM
; i
++) {
188 ©_of_link_table
.stream_allocations
[i
];
189 if (dc_alloc
->vcp_id
> 0 && dc_alloc
->slot_count
> 0) {
190 sa
= &new_table
.stream_allocations
[new_table
.stream_count
];
191 sa
->slot_count
= dc_alloc
->slot_count
;
192 sa
->vcp_id
= dc_alloc
->vcp_id
;
193 new_table
.stream_count
++;
197 /* Overwrite the old table */
201 void dm_helpers_dp_update_branch_info(
202 struct dc_context
*ctx
,
203 const struct dc_link
*link
)
206 static void dm_helpers_construct_old_payload(
207 struct dc_link
*link
,
209 struct drm_dp_mst_atomic_payload
*new_payload
,
210 struct drm_dp_mst_atomic_payload
*old_payload
)
212 struct link_mst_stream_allocation_table current_link_table
=
213 link
->mst_stream_alloc_table
;
214 struct link_mst_stream_allocation
*dc_alloc
;
217 *old_payload
= *new_payload
;
219 /* Set correct time_slots/PBN of old payload.
220 * other fields (delete & dsc_enabled) in
221 * struct drm_dp_mst_atomic_payload are don't care fields
222 * while calling drm_dp_remove_payload_part2()
224 for (i
= 0; i
< current_link_table
.stream_count
; i
++) {
226 ¤t_link_table
.stream_allocations
[i
];
228 if (dc_alloc
->vcp_id
== new_payload
->vcpi
) {
229 old_payload
->time_slots
= dc_alloc
->slot_count
;
230 old_payload
->pbn
= dc_alloc
->slot_count
* pbn_per_slot
;
235 /* make sure there is an old payload*/
236 ASSERT(i
!= current_link_table
.stream_count
);
241 * Writes payload allocation table in immediate downstream device.
243 bool dm_helpers_dp_mst_write_payload_allocation_table(
244 struct dc_context
*ctx
,
245 const struct dc_stream_state
*stream
,
246 struct dc_dp_mst_stream_allocation_table
*proposed_table
,
249 struct amdgpu_dm_connector
*aconnector
;
250 struct drm_dp_mst_topology_state
*mst_state
;
251 struct drm_dp_mst_atomic_payload
*target_payload
, *new_payload
, old_payload
;
252 struct drm_dp_mst_topology_mgr
*mst_mgr
;
254 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
255 /* Accessing the connector state is required for vcpi_slots allocation
256 * and directly relies on behaviour in commit check
257 * that blocks before commit guaranteeing that the state
258 * is not gonna be swapped while still in use in commit tail
261 if (!aconnector
|| !aconnector
->mst_root
)
264 mst_mgr
= &aconnector
->mst_root
->mst_mgr
;
265 mst_state
= to_drm_dp_mst_topology_state(mst_mgr
->base
.state
);
266 new_payload
= drm_atomic_get_mst_payload_state(mst_state
, aconnector
->mst_output_port
);
269 target_payload
= new_payload
;
271 /* It's OK for this to fail */
272 drm_dp_add_payload_part1(mst_mgr
, mst_state
, new_payload
);
274 /* construct old payload by VCPI*/
275 dm_helpers_construct_old_payload(stream
->link
, mst_state
->pbn_div
,
276 new_payload
, &old_payload
);
277 target_payload
= &old_payload
;
279 drm_dp_remove_payload_part1(mst_mgr
, mst_state
, new_payload
);
282 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
283 * AUX message. The sequence is slot 1-63 allocated sequence for each
284 * stream. AMD ASIC stream slot allocation should follow the same
285 * sequence. copy DRM MST allocation to dc
287 fill_dc_mst_payload_table_from_drm(stream
->link
, enable
, target_payload
, proposed_table
);
293 * poll pending down reply
295 void dm_helpers_dp_mst_poll_pending_down_reply(
296 struct dc_context
*ctx
,
297 const struct dc_link
*link
)
301 * Clear payload allocation table before enable MST DP link.
303 void dm_helpers_dp_mst_clear_payload_allocation_table(
304 struct dc_context
*ctx
,
305 const struct dc_link
*link
)
309 * Polls for ACT (allocation change trigger) handled and sends
310 * ALLOCATE_PAYLOAD message.
312 enum act_return_status
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
313 struct dc_context
*ctx
,
314 const struct dc_stream_state
*stream
)
316 struct amdgpu_dm_connector
*aconnector
;
317 struct drm_dp_mst_topology_mgr
*mst_mgr
;
320 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
322 if (!aconnector
|| !aconnector
->mst_root
)
325 mst_mgr
= &aconnector
->mst_root
->mst_mgr
;
327 if (!mst_mgr
->mst_state
)
330 ret
= drm_dp_check_act_status(mst_mgr
);
338 bool dm_helpers_dp_mst_send_payload_allocation(
339 struct dc_context
*ctx
,
340 const struct dc_stream_state
*stream
,
343 struct amdgpu_dm_connector
*aconnector
;
344 struct drm_dp_mst_topology_state
*mst_state
;
345 struct drm_dp_mst_topology_mgr
*mst_mgr
;
346 struct drm_dp_mst_atomic_payload
*new_payload
, old_payload
;
347 enum mst_progress_status set_flag
= MST_ALLOCATE_NEW_PAYLOAD
;
348 enum mst_progress_status clr_flag
= MST_CLEAR_ALLOCATED_PAYLOAD
;
351 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
353 if (!aconnector
|| !aconnector
->mst_root
)
356 mst_mgr
= &aconnector
->mst_root
->mst_mgr
;
357 mst_state
= to_drm_dp_mst_topology_state(mst_mgr
->base
.state
);
359 new_payload
= drm_atomic_get_mst_payload_state(mst_state
, aconnector
->mst_output_port
);
362 set_flag
= MST_CLEAR_ALLOCATED_PAYLOAD
;
363 clr_flag
= MST_ALLOCATE_NEW_PAYLOAD
;
367 ret
= drm_dp_add_payload_part2(mst_mgr
, mst_state
->base
.state
, new_payload
);
369 dm_helpers_construct_old_payload(stream
->link
, mst_state
->pbn_div
,
370 new_payload
, &old_payload
);
371 drm_dp_remove_payload_part2(mst_mgr
, mst_state
, &old_payload
, new_payload
);
375 amdgpu_dm_set_mst_status(&aconnector
->mst_status
,
378 amdgpu_dm_set_mst_status(&aconnector
->mst_status
,
380 amdgpu_dm_set_mst_status(&aconnector
->mst_status
,
387 void dm_dtn_log_begin(struct dc_context
*ctx
,
388 struct dc_log_buffer_ctx
*log_ctx
)
390 static const char msg
[] = "[dtn begin]\n";
397 dm_dtn_log_append_v(ctx
, log_ctx
, "%s", msg
);
401 void dm_dtn_log_append_v(struct dc_context
*ctx
,
402 struct dc_log_buffer_ctx
*log_ctx
,
403 const char *msg
, ...)
410 /* No context, redirect to dmesg. */
411 struct va_format vaf
;
417 pr_info("%pV", &vaf
);
423 /* Measure the output. */
425 n
= vsnprintf(NULL
, 0, msg
, args
);
431 /* Reallocate the string buffer as needed. */
432 total
= log_ctx
->pos
+ n
+ 1;
434 if (total
> log_ctx
->size
) {
435 char *buf
= kvcalloc(total
, sizeof(char), GFP_KERNEL
);
438 memcpy(buf
, log_ctx
->buf
, log_ctx
->pos
);
442 log_ctx
->size
= total
;
449 /* Write the formatted string to the log buffer. */
452 log_ctx
->buf
+ log_ctx
->pos
,
453 log_ctx
->size
- log_ctx
->pos
,
462 void dm_dtn_log_end(struct dc_context
*ctx
,
463 struct dc_log_buffer_ctx
*log_ctx
)
465 static const char msg
[] = "[dtn end]\n";
472 dm_dtn_log_append_v(ctx
, log_ctx
, "%s", msg
);
475 bool dm_helpers_dp_mst_start_top_mgr(
476 struct dc_context
*ctx
,
477 const struct dc_link
*link
,
480 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
484 DRM_ERROR("Failed to find connector for link!");
489 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
490 aconnector
, aconnector
->base
.base
.id
);
494 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
495 aconnector
, aconnector
->base
.base
.id
);
497 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
499 DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
503 DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector
->mst_mgr
.dpcd
[0],
504 aconnector
->mst_mgr
.dpcd
[2] & DP_MAX_LANE_COUNT_MASK
);
509 bool dm_helpers_dp_mst_stop_top_mgr(
510 struct dc_context
*ctx
,
511 struct dc_link
*link
)
513 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
516 DRM_ERROR("Failed to find connector for link!");
520 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
521 aconnector
, aconnector
->base
.base
.id
);
523 if (aconnector
->mst_mgr
.mst_state
== true) {
524 drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, false);
525 link
->cur_link_settings
.lane_count
= 0;
531 bool dm_helpers_dp_read_dpcd(
532 struct dc_context
*ctx
,
533 const struct dc_link
*link
,
539 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
542 DC_LOG_DC("Failed to find connector for link!\n");
546 return drm_dp_dpcd_read(&aconnector
->dm_dp_aux
.aux
, address
, data
,
550 bool dm_helpers_dp_write_dpcd(
551 struct dc_context
*ctx
,
552 const struct dc_link
*link
,
557 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
560 DRM_ERROR("Failed to find connector for link!");
564 return drm_dp_dpcd_write(&aconnector
->dm_dp_aux
.aux
,
565 address
, (uint8_t *)data
, size
) > 0;
568 bool dm_helpers_submit_i2c(
569 struct dc_context
*ctx
,
570 const struct dc_link
*link
,
571 struct i2c_command
*cmd
)
573 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
574 struct i2c_msg
*msgs
;
576 int num
= cmd
->number_of_payloads
;
580 DRM_ERROR("Failed to find connector for link!");
584 msgs
= kcalloc(num
, sizeof(struct i2c_msg
), GFP_KERNEL
);
589 for (i
= 0; i
< num
; i
++) {
590 msgs
[i
].flags
= cmd
->payloads
[i
].write
? 0 : I2C_M_RD
;
591 msgs
[i
].addr
= cmd
->payloads
[i
].address
;
592 msgs
[i
].len
= cmd
->payloads
[i
].length
;
593 msgs
[i
].buf
= cmd
->payloads
[i
].data
;
596 result
= i2c_transfer(&aconnector
->i2c
->base
, msgs
, num
) == num
;
603 static bool execute_synaptics_rc_command(struct drm_dp_aux
*aux
,
610 bool success
= false;
611 unsigned char rc_data
[16] = {0};
612 unsigned char rc_offset
[4] = {0};
613 unsigned char rc_length
[2] = {0};
614 unsigned char rc_cmd
= 0;
615 unsigned char rc_result
= 0xFF;
621 memmove(rc_data
, data
, length
);
622 ret
= drm_dp_dpcd_write(aux
, SYNAPTICS_RC_DATA
, rc_data
, sizeof(rc_data
));
626 rc_offset
[0] = (unsigned char) offset
& 0xFF;
627 rc_offset
[1] = (unsigned char) (offset
>> 8) & 0xFF;
628 rc_offset
[2] = (unsigned char) (offset
>> 16) & 0xFF;
629 rc_offset
[3] = (unsigned char) (offset
>> 24) & 0xFF;
630 ret
= drm_dp_dpcd_write(aux
, SYNAPTICS_RC_OFFSET
, rc_offset
, sizeof(rc_offset
));
633 rc_length
[0] = (unsigned char) length
& 0xFF;
634 rc_length
[1] = (unsigned char) (length
>> 8) & 0xFF;
635 ret
= drm_dp_dpcd_write(aux
, SYNAPTICS_RC_LENGTH
, rc_length
, sizeof(rc_length
));
639 ret
= drm_dp_dpcd_write(aux
, SYNAPTICS_RC_COMMAND
, &rc_cmd
, sizeof(rc_cmd
));
642 DRM_ERROR("%s: write cmd ..., err = %d\n", __func__
, ret
);
646 // poll until active is 0
647 for (i
= 0; i
< 10; i
++) {
648 drm_dp_dpcd_read(aux
, SYNAPTICS_RC_COMMAND
, &rc_cmd
, sizeof(rc_cmd
));
656 drm_dp_dpcd_read(aux
, SYNAPTICS_RC_RESULT
, &rc_result
, sizeof(rc_result
));
657 success
= (rc_result
== 0);
659 if (success
&& !is_write_cmd
) {
661 drm_dp_dpcd_read(aux
, SYNAPTICS_RC_DATA
, data
, length
);
664 DC_LOG_DC("%s: success = %d\n", __func__
, success
);
669 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux
*aux
)
671 unsigned char data
[16] = {0};
673 DC_LOG_DC("Start %s\n", __func__
);
682 if (!execute_synaptics_rc_command(aux
, true, 0x01, 5, 0, data
))
686 if (!execute_synaptics_rc_command(aux
, false, 0x31, 4, 0x220998, data
))
689 data
[0] &= (~(1 << 1)); // set bit 1 to 0
690 if (!execute_synaptics_rc_command(aux
, true, 0x21, 4, 0x220998, data
))
693 if (!execute_synaptics_rc_command(aux
, false, 0x31, 4, 0x220D98, data
))
696 data
[0] &= (~(1 << 1)); // set bit 1 to 0
697 if (!execute_synaptics_rc_command(aux
, true, 0x21, 4, 0x220D98, data
))
700 if (!execute_synaptics_rc_command(aux
, false, 0x31, 4, 0x221198, data
))
703 data
[0] &= (~(1 << 1)); // set bit 1 to 0
704 if (!execute_synaptics_rc_command(aux
, true, 0x21, 4, 0x221198, data
))
708 if (!execute_synaptics_rc_command(aux
, false, 0x31, 4, 0x220998, data
))
711 data
[0] |= (1 << 1); // set bit 1 to 1
712 if (!execute_synaptics_rc_command(aux
, true, 0x21, 4, 0x220998, data
))
715 if (!execute_synaptics_rc_command(aux
, false, 0x31, 4, 0x220D98, data
))
718 data
[0] |= (1 << 1); // set bit 1 to 1
720 if (!execute_synaptics_rc_command(aux
, false, 0x31, 4, 0x221198, data
))
723 data
[0] |= (1 << 1); // set bit 1 to 1
724 if (!execute_synaptics_rc_command(aux
, true, 0x21, 4, 0x221198, data
))
728 if (!execute_synaptics_rc_command(aux
, true, 0x02, 0, 0, NULL
))
731 DC_LOG_DC("Done %s\n", __func__
);
735 static const uint8_t SYNAPTICS_DEVICE_ID
[] = "SYNA";
737 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
738 struct drm_dp_aux
*aux
,
739 const struct dc_stream_state
*stream
,
744 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n");
747 /* When DSC is enabled on previous boot and reboot with the hub,
748 * there is a chance that Synaptics hub gets stuck during reboot sequence.
749 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
751 if (!stream
->link
->link_status
.link_active
&&
752 memcmp(stream
->link
->dpcd_caps
.branch_dev_name
,
753 (int8_t *)SYNAPTICS_DEVICE_ID
, 4) == 0)
754 apply_synaptics_fifo_reset_wa(aux
);
756 ret
= drm_dp_dpcd_write(aux
, DP_DSC_ENABLE
, &enable
, 1);
757 DRM_INFO("Send DSC enable to synaptics\n");
760 /* Synaptics hub not support virtual dpcd,
761 * external monitor occur garbage while disable DSC,
762 * Disable DSC only when entire link status turn to false,
764 if (!stream
->link
->link_status
.link_active
) {
765 ret
= drm_dp_dpcd_write(aux
, DP_DSC_ENABLE
, &enable
, 1);
766 DRM_INFO("Send DSC disable to synaptics\n");
773 bool dm_helpers_dp_write_dsc_enable(
774 struct dc_context
*ctx
,
775 const struct dc_stream_state
*stream
,
778 static const uint8_t DSC_DISABLE
;
779 static const uint8_t DSC_DECODING
= 0x01;
780 static const uint8_t DSC_PASSTHROUGH
= 0x02;
782 struct amdgpu_dm_connector
*aconnector
;
783 struct drm_dp_mst_port
*port
;
784 uint8_t enable_dsc
= enable
? DSC_DECODING
: DSC_DISABLE
;
785 uint8_t enable_passthrough
= enable
? DSC_PASSTHROUGH
: DSC_DISABLE
;
791 if (stream
->signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
792 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
794 if (!aconnector
->dsc_aux
)
797 // apply w/a to synaptics
798 if (needs_dsc_aux_workaround(aconnector
->dc_link
) &&
799 (aconnector
->mst_downstream_port_present
.byte
& 0x7) != 0x3)
800 return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
801 aconnector
->dsc_aux
, stream
, enable_dsc
);
803 port
= aconnector
->mst_output_port
;
806 if (port
->passthrough_aux
) {
807 ret
= drm_dp_dpcd_write(port
->passthrough_aux
,
809 &enable_passthrough
, 1);
810 DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
814 ret
= drm_dp_dpcd_write(aconnector
->dsc_aux
,
815 DP_DSC_ENABLE
, &enable_dsc
, 1);
816 DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n",
817 (port
->passthrough_aux
) ? "remote RX" :
821 ret
= drm_dp_dpcd_write(aconnector
->dsc_aux
,
822 DP_DSC_ENABLE
, &enable_dsc
, 1);
823 DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n",
824 (port
->passthrough_aux
) ? "remote RX" :
828 if (port
->passthrough_aux
) {
829 ret
= drm_dp_dpcd_write(port
->passthrough_aux
,
831 &enable_passthrough
, 1);
832 DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
838 if (stream
->signal
== SIGNAL_TYPE_DISPLAY_PORT
|| stream
->signal
== SIGNAL_TYPE_EDP
) {
839 if (stream
->sink
->link
->dpcd_caps
.dongle_type
== DISPLAY_DONGLE_NONE
) {
840 ret
= dm_helpers_dp_write_dpcd(ctx
, stream
->link
, DP_DSC_ENABLE
, &enable_dsc
, 1);
841 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc
? "enable" : "disable");
842 } else if (stream
->sink
->link
->dpcd_caps
.dongle_type
== DISPLAY_DONGLE_DP_HDMI_CONVERTER
) {
843 ret
= dm_helpers_dp_write_dpcd(ctx
, stream
->link
, DP_DSC_ENABLE
, &enable_dsc
, 1);
844 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc
? "enable" : "disable");
851 bool dm_helpers_is_dp_sink_present(struct dc_link
*link
)
853 bool dp_sink_present
;
854 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
857 BUG_ON("Failed to find connector for link!");
861 mutex_lock(&aconnector
->dm_dp_aux
.aux
.hw_mutex
);
862 dp_sink_present
= dc_link_is_dp_sink_present(link
);
863 mutex_unlock(&aconnector
->dm_dp_aux
.aux
.hw_mutex
);
864 return dp_sink_present
;
867 enum dc_edid_status
dm_helpers_read_local_edid(
868 struct dc_context
*ctx
,
869 struct dc_link
*link
,
870 struct dc_sink
*sink
)
872 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
873 struct drm_connector
*connector
= &aconnector
->base
;
874 struct i2c_adapter
*ddc
;
876 enum dc_edid_status edid_status
;
880 ddc
= &aconnector
->dm_dp_aux
.aux
.ddc
;
882 ddc
= &aconnector
->i2c
->base
;
884 /* some dongles read edid incorrectly the first time,
885 * do check sum and retry to make sure read correct edid.
889 edid
= drm_get_edid(&aconnector
->base
, ddc
);
891 /* DP Compliance Test 4.2.2.6 */
892 if (link
->aux_mode
&& connector
->edid_corrupt
)
893 drm_dp_send_real_edid_checksum(&aconnector
->dm_dp_aux
.aux
, connector
->real_edid_checksum
);
895 if (!edid
&& connector
->edid_corrupt
) {
896 connector
->edid_corrupt
= false;
897 return EDID_BAD_CHECKSUM
;
901 return EDID_NO_RESPONSE
;
903 sink
->dc_edid
.length
= EDID_LENGTH
* (edid
->extensions
+ 1);
904 memmove(sink
->dc_edid
.raw_edid
, (uint8_t *)edid
, sink
->dc_edid
.length
);
906 /* We don't need the original edid anymore */
909 edid_status
= dm_helpers_parse_edid_caps(
914 } while (edid_status
== EDID_BAD_CHECKSUM
&& --retry
> 0);
916 if (edid_status
!= EDID_OK
)
917 DRM_ERROR("EDID err: %d, on connector: %s",
919 aconnector
->base
.name
);
920 if (link
->aux_mode
) {
921 union test_request test_request
= {0};
922 union test_response test_response
= {0};
924 dm_helpers_dp_read_dpcd(ctx
,
928 sizeof(union test_request
));
930 if (!test_request
.bits
.EDID_READ
)
933 test_response
.bits
.EDID_CHECKSUM_WRITE
= 1;
935 dm_helpers_dp_write_dpcd(ctx
,
937 DP_TEST_EDID_CHECKSUM
,
938 &sink
->dc_edid
.raw_edid
[sink
->dc_edid
.length
-1],
941 dm_helpers_dp_write_dpcd(ctx
,
945 sizeof(test_response
));
951 int dm_helper_dmub_aux_transfer_sync(
952 struct dc_context
*ctx
,
953 const struct dc_link
*link
,
954 struct aux_payload
*payload
,
955 enum aux_return_code_type
*operation_result
)
957 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx
, link
->link_index
, payload
,
961 int dm_helpers_dmub_set_config_sync(struct dc_context
*ctx
,
962 const struct dc_link
*link
,
963 struct set_config_cmd_payload
*payload
,
964 enum set_config_status
*operation_result
)
966 return amdgpu_dm_process_dmub_set_config_sync(ctx
, link
->link_index
, payload
,
970 void dm_set_dcn_clocks(struct dc_context
*ctx
, struct dc_clocks
*clks
)
972 /* TODO: something */
975 void dm_helpers_smu_timeout(struct dc_context
*ctx
, unsigned int msg_id
, unsigned int param
, unsigned int timeout_us
)
978 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
981 void dm_helpers_init_panel_settings(
982 struct dc_context
*ctx
,
983 struct dc_panel_config
*panel_config
,
984 struct dc_sink
*sink
)
986 // Extra Panel Power Sequence
987 panel_config
->pps
.extra_t3_ms
= sink
->edid_caps
.panel_patch
.extra_t3_ms
;
988 panel_config
->pps
.extra_t7_ms
= sink
->edid_caps
.panel_patch
.extra_t7_ms
;
989 panel_config
->pps
.extra_delay_backlight_off
= sink
->edid_caps
.panel_patch
.extra_delay_backlight_off
;
990 panel_config
->pps
.extra_post_t7_ms
= 0;
991 panel_config
->pps
.extra_pre_t11_ms
= 0;
992 panel_config
->pps
.extra_t12_ms
= sink
->edid_caps
.panel_patch
.extra_t12_ms
;
993 panel_config
->pps
.extra_post_OUI_ms
= 0;
995 panel_config
->dsc
.disable_dsc_edp
= false;
996 panel_config
->dsc
.force_dsc_edp_policy
= 0;
999 void dm_helpers_override_panel_settings(
1000 struct dc_context
*ctx
,
1001 struct dc_panel_config
*panel_config
)
1004 if (amdgpu_dc_debug_mask
& DC_DISABLE_DSC
)
1005 panel_config
->dsc
.disable_dsc_edp
= true;
1008 void *dm_helpers_allocate_gpu_mem(
1009 struct dc_context
*ctx
,
1010 enum dc_gpu_mem_alloc_type type
,
1014 struct amdgpu_device
*adev
= ctx
->driver_context
;
1015 struct dal_allocation
*da
;
1016 u32 domain
= (type
== DC_MEM_ALLOC_TYPE_GART
) ?
1017 AMDGPU_GEM_DOMAIN_GTT
: AMDGPU_GEM_DOMAIN_VRAM
;
1020 da
= kzalloc(sizeof(struct dal_allocation
), GFP_KERNEL
);
1024 ret
= amdgpu_bo_create_kernel(adev
, size
, PAGE_SIZE
,
1026 &da
->gpu_addr
, &da
->cpu_ptr
);
1028 *addr
= da
->gpu_addr
;
1035 /* add da to list in dm */
1036 list_add(&da
->list
, &adev
->dm
.da_list
);
1041 void dm_helpers_free_gpu_mem(
1042 struct dc_context
*ctx
,
1043 enum dc_gpu_mem_alloc_type type
,
1046 struct amdgpu_device
*adev
= ctx
->driver_context
;
1047 struct dal_allocation
*da
;
1049 /* walk the da list in DM */
1050 list_for_each_entry(da
, &adev
->dm
.da_list
, list
) {
1051 if (pvMem
== da
->cpu_ptr
) {
1052 amdgpu_bo_free_kernel(&da
->bo
, &da
->gpu_addr
, &da
->cpu_ptr
);
1053 list_del(&da
->list
);
1060 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context
*ctx
, bool enable
)
1062 enum dc_irq_source irq_source
;
1065 irq_source
= DC_IRQ_SOURCE_DMCUB_OUTBOX
;
1067 ret
= dc_interrupt_set(ctx
->dc
, irq_source
, enable
);
1069 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
1070 enable
? "en" : "dis", ret
);
1074 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state
*stream
)
1076 /* TODO: virtual DPCD */
1077 struct dc_link
*link
= stream
->link
;
1078 union down_spread_ctrl old_downspread
;
1079 union down_spread_ctrl new_downspread
;
1081 if (link
->aux_access_disabled
)
1084 if (!dm_helpers_dp_read_dpcd(link
->ctx
, link
, DP_DOWNSPREAD_CTRL
,
1085 &old_downspread
.raw
,
1086 sizeof(old_downspread
)))
1089 new_downspread
.raw
= old_downspread
.raw
;
1090 new_downspread
.bits
.IGNORE_MSA_TIMING_PARAM
=
1091 (stream
->ignore_msa_timing_param
) ? 1 : 0;
1093 if (new_downspread
.raw
!= old_downspread
.raw
)
1094 dm_helpers_dp_write_dpcd(link
->ctx
, link
, DP_DOWNSPREAD_CTRL
,
1095 &new_downspread
.raw
,
1096 sizeof(new_downspread
));
1099 bool dm_helpers_dp_handle_test_pattern_request(
1100 struct dc_context
*ctx
,
1101 const struct dc_link
*link
,
1102 union link_test_pattern dpcd_test_pattern
,
1103 union test_misc dpcd_test_params
)
1105 enum dp_test_pattern test_pattern
;
1106 enum dp_test_pattern_color_space test_pattern_color_space
=
1107 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED
;
1108 enum dc_color_depth requestColorDepth
= COLOR_DEPTH_UNDEFINED
;
1109 enum dc_pixel_encoding requestPixelEncoding
= PIXEL_ENCODING_UNDEFINED
;
1110 struct pipe_ctx
*pipes
= link
->dc
->current_state
->res_ctx
.pipe_ctx
;
1111 struct pipe_ctx
*pipe_ctx
= NULL
;
1112 struct amdgpu_dm_connector
*aconnector
= link
->priv
;
1115 for (i
= 0; i
< MAX_PIPES
; i
++) {
1116 if (pipes
[i
].stream
== NULL
)
1119 if (pipes
[i
].stream
->link
== link
&& !pipes
[i
].top_pipe
&&
1120 !pipes
[i
].prev_odm_pipe
) {
1121 pipe_ctx
= &pipes
[i
];
1126 if (pipe_ctx
== NULL
)
1129 switch (dpcd_test_pattern
.bits
.PATTERN
) {
1130 case LINK_TEST_PATTERN_COLOR_RAMP
:
1131 test_pattern
= DP_TEST_PATTERN_COLOR_RAMP
;
1133 case LINK_TEST_PATTERN_VERTICAL_BARS
:
1134 test_pattern
= DP_TEST_PATTERN_VERTICAL_BARS
;
1135 break; /* black and white */
1136 case LINK_TEST_PATTERN_COLOR_SQUARES
:
1137 test_pattern
= (dpcd_test_params
.bits
.DYN_RANGE
==
1138 TEST_DYN_RANGE_VESA
?
1139 DP_TEST_PATTERN_COLOR_SQUARES
:
1140 DP_TEST_PATTERN_COLOR_SQUARES_CEA
);
1143 test_pattern
= DP_TEST_PATTERN_VIDEO_MODE
;
1147 if (dpcd_test_params
.bits
.CLR_FORMAT
== 0)
1148 test_pattern_color_space
= DP_TEST_PATTERN_COLOR_SPACE_RGB
;
1150 test_pattern_color_space
= dpcd_test_params
.bits
.YCBCR_COEFS
?
1151 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709
:
1152 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601
;
1154 switch (dpcd_test_params
.bits
.BPC
) {
1156 requestColorDepth
= COLOR_DEPTH_666
;
1159 requestColorDepth
= COLOR_DEPTH_888
;
1162 requestColorDepth
= COLOR_DEPTH_101010
;
1165 requestColorDepth
= COLOR_DEPTH_121212
;
1171 switch (dpcd_test_params
.bits
.CLR_FORMAT
) {
1173 requestPixelEncoding
= PIXEL_ENCODING_RGB
;
1176 requestPixelEncoding
= PIXEL_ENCODING_YCBCR422
;
1179 requestPixelEncoding
= PIXEL_ENCODING_YCBCR444
;
1182 requestPixelEncoding
= PIXEL_ENCODING_RGB
;
1186 if ((requestColorDepth
!= COLOR_DEPTH_UNDEFINED
1187 && pipe_ctx
->stream
->timing
.display_color_depth
!= requestColorDepth
)
1188 || (requestPixelEncoding
!= PIXEL_ENCODING_UNDEFINED
1189 && pipe_ctx
->stream
->timing
.pixel_encoding
!= requestPixelEncoding
)) {
1190 DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n",
1192 pipe_ctx
->stream
->timing
.display_color_depth
,
1193 pipe_ctx
->stream
->timing
.pixel_encoding
,
1195 requestPixelEncoding
);
1196 pipe_ctx
->stream
->timing
.display_color_depth
= requestColorDepth
;
1197 pipe_ctx
->stream
->timing
.pixel_encoding
= requestPixelEncoding
;
1199 dc_link_update_dsc_config(pipe_ctx
);
1201 aconnector
->timing_changed
= true;
1202 /* store current timing */
1203 if (aconnector
->timing_requested
)
1204 *aconnector
->timing_requested
= pipe_ctx
->stream
->timing
;
1206 DC_LOG_ERROR("%s: timing storage failed\n", __func__
);
1210 dc_link_dp_set_test_pattern(
1211 (struct dc_link
*) link
,
1213 test_pattern_color_space
,
1221 void dm_set_phyd32clk(struct dc_context
*ctx
, int freq_khz
)
1226 void dm_helpers_enable_periodic_detection(struct dc_context
*ctx
, bool enable
)
1228 /* TODO: add periodic detection implementation */
1231 void dm_helpers_dp_mst_update_branch_bandwidth(
1232 struct dc_context
*ctx
,
1233 struct dc_link
*link
)
1238 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id
)
1240 bool ret_val
= false;
1242 switch (branch_dev_id
) {
1243 case DP_BRANCH_DEVICE_ID_0060AD
:
1244 case DP_BRANCH_DEVICE_ID_00E04C
:
1245 case DP_BRANCH_DEVICE_ID_90CC24
:
1255 enum adaptive_sync_type
dm_get_adaptive_sync_support_type(struct dc_link
*link
)
1257 struct dpcd_caps
*dpcd_caps
= &link
->dpcd_caps
;
1258 enum adaptive_sync_type as_type
= ADAPTIVE_SYNC_TYPE_NONE
;
1260 switch (dpcd_caps
->dongle_type
) {
1261 case DISPLAY_DONGLE_DP_HDMI_CONVERTER
:
1262 if (dpcd_caps
->adaptive_sync_caps
.dp_adap_sync_caps
.bits
.ADAPTIVE_SYNC_SDP_SUPPORT
== true &&
1263 dpcd_caps
->allow_invalid_MSA_timing_param
== true &&
1264 dm_is_freesync_pcon_whitelist(dpcd_caps
->branch_dev_id
))
1265 as_type
= FREESYNC_TYPE_PCON_IN_WHITELIST
;