2 * Copyright 2012-15 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include "dm_services.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_mst_types.h"
35 #include "dm_helpers.h"
37 #include "dc_link_ddc.h"
39 #include "i2caux_interface.h"
40 #if defined(CONFIG_DEBUG_FS)
41 #include "amdgpu_dm_debugfs.h"
45 #if defined(CONFIG_DRM_AMD_DC_DCN)
46 #include "dc/dcn20/dcn20_resource.h"
49 /* #define TRACE_DPCD */
52 #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
54 static inline char *side_band_msg_type_to_str(uint32_t address
)
56 static char str
[10] = {0};
58 if (address
< DP_SIDEBAND_MSG_UP_REP_BASE
)
59 strcpy(str
, "DOWN_REQ");
60 else if (address
< DP_SIDEBAND_MSG_DOWN_REP_BASE
)
61 strcpy(str
, "UP_REP");
62 else if (address
< DP_SIDEBAND_MSG_UP_REQ_BASE
)
63 strcpy(str
, "DOWN_REP");
65 strcpy(str
, "UP_REQ");
70 static void log_dpcd(uint8_t type
,
76 DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
77 (type
== DP_AUX_NATIVE_READ
) ||
78 (type
== DP_AUX_I2C_READ
) ?
81 SIDE_BAND_MSG(address
) ?
82 side_band_msg_type_to_str(address
) : "Nop",
86 print_hex_dump(KERN_INFO
, "Body: ", DUMP_PREFIX_NONE
, 16, 1, data
, size
, false);
91 static ssize_t
dm_dp_aux_transfer(struct drm_dp_aux
*aux
,
92 struct drm_dp_aux_msg
*msg
)
95 struct aux_payload payload
;
96 enum aux_channel_operation_result operation_result
;
98 if (WARN_ON(msg
->size
> 16))
101 payload
.address
= msg
->address
;
102 payload
.data
= msg
->buffer
;
103 payload
.length
= msg
->size
;
104 payload
.reply
= &msg
->reply
;
105 payload
.i2c_over_aux
= (msg
->request
& DP_AUX_NATIVE_WRITE
) == 0;
106 payload
.write
= (msg
->request
& DP_AUX_I2C_READ
) == 0;
107 payload
.mot
= (msg
->request
& DP_AUX_I2C_MOT
) != 0;
108 payload
.defer_delay
= 0;
110 result
= dc_link_aux_transfer_raw(TO_DM_AUX(aux
)->ddc_service
, &payload
,
117 switch (operation_result
) {
118 case AUX_CHANNEL_OPERATION_SUCCEEDED
:
120 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
:
121 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN
:
124 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY
:
125 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
:
128 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
:
137 dm_dp_mst_connector_destroy(struct drm_connector
*connector
)
139 struct amdgpu_dm_connector
*aconnector
=
140 to_amdgpu_dm_connector(connector
);
141 struct amdgpu_encoder
*amdgpu_encoder
= aconnector
->mst_encoder
;
143 if (aconnector
->dc_sink
) {
144 dc_link_remove_remote_sink(aconnector
->dc_link
,
145 aconnector
->dc_sink
);
146 dc_sink_release(aconnector
->dc_sink
);
149 kfree(aconnector
->edid
);
151 drm_encoder_cleanup(&amdgpu_encoder
->base
);
152 kfree(amdgpu_encoder
);
153 drm_connector_cleanup(connector
);
154 drm_dp_mst_put_port_malloc(aconnector
->port
);
159 amdgpu_dm_mst_connector_late_register(struct drm_connector
*connector
)
161 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
162 to_amdgpu_dm_connector(connector
);
165 amdgpu_dm_connector
->dm_dp_aux
.aux
.dev
= connector
->kdev
;
166 r
= drm_dp_aux_register(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
170 #if defined(CONFIG_DEBUG_FS)
171 connector_debugfs_init(amdgpu_dm_connector
);
178 amdgpu_dm_mst_connector_early_unregister(struct drm_connector
*connector
)
180 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
181 to_amdgpu_dm_connector(connector
);
182 struct drm_dp_mst_port
*port
= amdgpu_dm_connector
->port
;
184 drm_dp_mst_connector_early_unregister(connector
, port
);
187 static const struct drm_connector_funcs dm_dp_mst_connector_funcs
= {
188 .fill_modes
= drm_helper_probe_single_connector_modes
,
189 .destroy
= dm_dp_mst_connector_destroy
,
190 .reset
= amdgpu_dm_connector_funcs_reset
,
191 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
192 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
193 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
194 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
195 .late_register
= amdgpu_dm_mst_connector_late_register
,
196 .early_unregister
= amdgpu_dm_mst_connector_early_unregister
,
199 #if defined(CONFIG_DRM_AMD_DC_DCN)
200 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector
*aconnector
)
202 struct dc_sink
*dc_sink
= aconnector
->dc_sink
;
203 struct drm_dp_mst_port
*port
= aconnector
->port
;
204 u8 dsc_caps
[16] = { 0 };
206 aconnector
->dsc_aux
= drm_dp_mst_dsc_aux_for_port(port
);
208 if (!aconnector
->dsc_aux
)
211 if (drm_dp_dpcd_read(aconnector
->dsc_aux
, DP_DSC_SUPPORT
, dsc_caps
, 16) < 0)
214 if (!dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
216 &dc_sink
->dsc_caps
.dsc_dec_caps
))
223 static int dm_dp_mst_get_modes(struct drm_connector
*connector
)
225 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
229 return drm_add_edid_modes(connector
, NULL
);
231 if (!aconnector
->edid
) {
233 edid
= drm_dp_mst_get_edid(connector
, &aconnector
->mst_port
->mst_mgr
, aconnector
->port
);
236 drm_connector_update_edid_property(
242 aconnector
->edid
= edid
;
245 if (aconnector
->dc_sink
&& aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_VIRTUAL
) {
246 dc_sink_release(aconnector
->dc_sink
);
247 aconnector
->dc_sink
= NULL
;
250 if (!aconnector
->dc_sink
) {
251 struct dc_sink
*dc_sink
;
252 struct dc_sink_init_data init_params
= {
253 .link
= aconnector
->dc_link
,
254 .sink_signal
= SIGNAL_TYPE_DISPLAY_PORT_MST
};
255 dc_sink
= dc_link_add_remote_sink(
257 (uint8_t *)aconnector
->edid
,
258 (aconnector
->edid
->extensions
+ 1) * EDID_LENGTH
,
261 dc_sink
->priv
= aconnector
;
262 /* dc_link_add_remote_sink returns a new reference */
263 aconnector
->dc_sink
= dc_sink
;
265 if (aconnector
->dc_sink
) {
266 amdgpu_dm_update_freesync_caps(
267 connector
, aconnector
->edid
);
269 #if defined(CONFIG_DRM_AMD_DC_DCN)
270 if (!validate_dsc_caps_on_connector(aconnector
))
271 memset(&aconnector
->dc_sink
->dsc_caps
,
272 0, sizeof(aconnector
->dc_sink
->dsc_caps
));
277 drm_connector_update_edid_property(
278 &aconnector
->base
, aconnector
->edid
);
280 ret
= drm_add_edid_modes(connector
, aconnector
->edid
);
285 static struct drm_encoder
*
286 dm_mst_atomic_best_encoder(struct drm_connector
*connector
,
287 struct drm_connector_state
*connector_state
)
289 return &to_amdgpu_dm_connector(connector
)->mst_encoder
->base
;
293 dm_dp_mst_detect(struct drm_connector
*connector
,
294 struct drm_modeset_acquire_ctx
*ctx
, bool force
)
296 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
297 struct amdgpu_dm_connector
*master
= aconnector
->mst_port
;
299 return drm_dp_mst_detect_port(connector
, ctx
, &master
->mst_mgr
,
303 static int dm_dp_mst_atomic_check(struct drm_connector
*connector
,
304 struct drm_atomic_state
*state
)
306 struct drm_connector_state
*new_conn_state
=
307 drm_atomic_get_new_connector_state(state
, connector
);
308 struct drm_connector_state
*old_conn_state
=
309 drm_atomic_get_old_connector_state(state
, connector
);
310 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
311 struct drm_crtc_state
*new_crtc_state
;
312 struct drm_dp_mst_topology_mgr
*mst_mgr
;
313 struct drm_dp_mst_port
*mst_port
;
315 mst_port
= aconnector
->port
;
316 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
318 if (!old_conn_state
->crtc
)
321 if (new_conn_state
->crtc
) {
322 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_conn_state
->crtc
);
323 if (!new_crtc_state
||
324 !drm_atomic_crtc_needs_modeset(new_crtc_state
) ||
325 new_crtc_state
->enable
)
329 return drm_dp_atomic_release_vcpi_slots(state
,
334 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
= {
335 .get_modes
= dm_dp_mst_get_modes
,
336 .mode_valid
= amdgpu_dm_connector_mode_valid
,
337 .atomic_best_encoder
= dm_mst_atomic_best_encoder
,
338 .detect_ctx
= dm_dp_mst_detect
,
339 .atomic_check
= dm_dp_mst_atomic_check
,
342 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
344 drm_encoder_cleanup(encoder
);
348 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
349 .destroy
= amdgpu_dm_encoder_destroy
,
352 static struct amdgpu_encoder
*
353 dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector
*connector
)
355 struct drm_device
*dev
= connector
->base
.dev
;
356 struct amdgpu_device
*adev
= dev
->dev_private
;
357 struct amdgpu_encoder
*amdgpu_encoder
;
358 struct drm_encoder
*encoder
;
360 amdgpu_encoder
= kzalloc(sizeof(*amdgpu_encoder
), GFP_KERNEL
);
364 encoder
= &amdgpu_encoder
->base
;
365 encoder
->possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
369 &amdgpu_encoder
->base
,
370 &amdgpu_dm_encoder_funcs
,
371 DRM_MODE_ENCODER_DPMST
,
374 drm_encoder_helper_add(encoder
, &amdgpu_dm_encoder_helper_funcs
);
376 return amdgpu_encoder
;
379 static struct drm_connector
*
380 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr
*mgr
,
381 struct drm_dp_mst_port
*port
,
382 const char *pathprop
)
384 struct amdgpu_dm_connector
*master
= container_of(mgr
, struct amdgpu_dm_connector
, mst_mgr
);
385 struct drm_device
*dev
= master
->base
.dev
;
386 struct amdgpu_device
*adev
= dev
->dev_private
;
387 struct amdgpu_dm_connector
*aconnector
;
388 struct drm_connector
*connector
;
390 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
394 connector
= &aconnector
->base
;
395 aconnector
->port
= port
;
396 aconnector
->mst_port
= master
;
398 if (drm_connector_init(
401 &dm_dp_mst_connector_funcs
,
402 DRM_MODE_CONNECTOR_DisplayPort
)) {
406 drm_connector_helper_add(connector
, &dm_dp_mst_connector_helper_funcs
);
408 amdgpu_dm_connector_init_helper(
411 DRM_MODE_CONNECTOR_DisplayPort
,
413 master
->connector_id
);
415 aconnector
->mst_encoder
= dm_dp_create_fake_mst_encoder(master
);
416 drm_connector_attach_encoder(&aconnector
->base
,
417 &aconnector
->mst_encoder
->base
);
419 connector
->max_bpc_property
= master
->base
.max_bpc_property
;
420 if (connector
->max_bpc_property
)
421 drm_connector_attach_max_bpc_property(connector
, 8, 16);
423 connector
->vrr_capable_property
= master
->base
.vrr_capable_property
;
424 if (connector
->vrr_capable_property
)
425 drm_connector_attach_vrr_capable_property(connector
);
427 drm_object_attach_property(
429 dev
->mode_config
.path_property
,
431 drm_object_attach_property(
433 dev
->mode_config
.tile_property
,
436 drm_connector_set_path_property(connector
, pathprop
);
439 * Initialize connector state before adding the connectror to drm and
442 amdgpu_dm_connector_funcs_reset(connector
);
444 drm_dp_mst_get_port_malloc(port
);
449 static const struct drm_dp_mst_topology_cbs dm_mst_cbs
= {
450 .add_connector
= dm_dp_add_mst_connector
,
453 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager
*dm
,
454 struct amdgpu_dm_connector
*aconnector
)
456 aconnector
->dm_dp_aux
.aux
.name
= "dmdc";
457 aconnector
->dm_dp_aux
.aux
.transfer
= dm_dp_aux_transfer
;
458 aconnector
->dm_dp_aux
.ddc_service
= aconnector
->dc_link
->ddc
;
460 drm_dp_aux_init(&aconnector
->dm_dp_aux
.aux
);
461 drm_dp_cec_register_connector(&aconnector
->dm_dp_aux
.aux
,
464 if (aconnector
->base
.connector_type
== DRM_MODE_CONNECTOR_eDP
)
467 aconnector
->mst_mgr
.cbs
= &dm_mst_cbs
;
468 drm_dp_mst_topology_mgr_init(
469 &aconnector
->mst_mgr
,
471 &aconnector
->dm_dp_aux
.aux
,
474 aconnector
->connector_id
);
477 int dm_mst_get_pbn_divider(struct dc_link
*link
)
482 return dc_link_bandwidth_kbps(link
,
483 dc_link_get_link_cap(link
)) / (8 * 1000 * 54);
486 #if defined(CONFIG_DRM_AMD_DC_DCN)
488 struct dsc_mst_fairness_params
{
489 struct dc_crtc_timing
*timing
;
490 struct dc_sink
*sink
;
491 struct dc_dsc_bw_range bw_range
;
492 bool compression_possible
;
493 struct drm_dp_mst_port
*port
;
496 struct dsc_mst_fairness_vars
{
502 static int kbps_to_peak_pbn(int kbps
)
504 u64 peak_kbps
= kbps
;
507 peak_kbps
= div_u64(peak_kbps
, 1000);
508 return (int) DIV64_U64_ROUND_UP(peak_kbps
* 64, (54 * 8 * 1000));
511 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params
*params
,
512 struct dsc_mst_fairness_vars
*vars
,
517 for (i
= 0; i
< count
; i
++) {
518 memset(¶ms
[i
].timing
->dsc_cfg
, 0, sizeof(params
[i
].timing
->dsc_cfg
));
519 if (vars
[i
].dsc_enabled
&& dc_dsc_compute_config(
520 params
[i
].sink
->ctx
->dc
->res_pool
->dscs
[0],
521 ¶ms
[i
].sink
->dsc_caps
.dsc_dec_caps
,
522 params
[i
].sink
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
525 ¶ms
[i
].timing
->dsc_cfg
)) {
526 params
[i
].timing
->flags
.DSC
= 1;
527 params
[i
].timing
->dsc_cfg
.bits_per_pixel
= vars
[i
].bpp_x16
;
529 params
[i
].timing
->flags
.DSC
= 0;
534 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param
, int pbn
)
536 struct dc_dsc_config dsc_config
;
539 kbps
= div_u64((u64
)pbn
* 994 * 8 * 54, 64);
540 dc_dsc_compute_config(
541 param
.sink
->ctx
->dc
->res_pool
->dscs
[0],
542 ¶m
.sink
->dsc_caps
.dsc_dec_caps
,
543 param
.sink
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
544 (int) kbps
, param
.timing
, &dsc_config
);
546 return dsc_config
.bits_per_pixel
;
549 static void increase_dsc_bpp(struct drm_atomic_state
*state
,
550 struct dc_link
*dc_link
,
551 struct dsc_mst_fairness_params
*params
,
552 struct dsc_mst_fairness_vars
*vars
,
556 bool bpp_increased
[MAX_PIPES
];
557 int initial_slack
[MAX_PIPES
];
558 int min_initial_slack
;
560 int remaining_to_increase
= 0;
561 int pbn_per_timeslot
;
562 int link_timeslots_used
;
565 for (i
= 0; i
< count
; i
++) {
566 if (vars
[i
].dsc_enabled
) {
567 initial_slack
[i
] = kbps_to_peak_pbn(params
[i
].bw_range
.max_kbps
) - vars
[i
].pbn
;
568 bpp_increased
[i
] = false;
569 remaining_to_increase
+= 1;
571 initial_slack
[i
] = 0;
572 bpp_increased
[i
] = true;
576 pbn_per_timeslot
= dc_link_bandwidth_kbps(dc_link
,
577 dc_link_get_link_cap(dc_link
)) / (8 * 1000 * 54);
579 while (remaining_to_increase
) {
581 min_initial_slack
= -1;
582 for (i
= 0; i
< count
; i
++) {
583 if (!bpp_increased
[i
]) {
584 if (min_initial_slack
== -1 || min_initial_slack
> initial_slack
[i
]) {
585 min_initial_slack
= initial_slack
[i
];
591 if (next_index
== -1)
594 link_timeslots_used
= 0;
596 for (i
= 0; i
< count
; i
++)
597 link_timeslots_used
+= DIV_ROUND_UP(vars
[i
].pbn
, pbn_per_timeslot
);
599 fair_pbn_alloc
= (63 - link_timeslots_used
) / remaining_to_increase
* pbn_per_timeslot
;
601 if (initial_slack
[next_index
] > fair_pbn_alloc
) {
602 vars
[next_index
].pbn
+= fair_pbn_alloc
;
603 if (drm_dp_atomic_find_vcpi_slots(state
,
604 params
[next_index
].port
->mgr
,
605 params
[next_index
].port
,
606 vars
[next_index
].pbn
,
607 dm_mst_get_pbn_divider(dc_link
)) < 0)
609 if (!drm_dp_mst_atomic_check(state
)) {
610 vars
[next_index
].bpp_x16
= bpp_x16_from_pbn(params
[next_index
], vars
[next_index
].pbn
);
612 vars
[next_index
].pbn
-= fair_pbn_alloc
;
613 if (drm_dp_atomic_find_vcpi_slots(state
,
614 params
[next_index
].port
->mgr
,
615 params
[next_index
].port
,
616 vars
[next_index
].pbn
,
617 dm_mst_get_pbn_divider(dc_link
)) < 0)
621 vars
[next_index
].pbn
+= initial_slack
[next_index
];
622 if (drm_dp_atomic_find_vcpi_slots(state
,
623 params
[next_index
].port
->mgr
,
624 params
[next_index
].port
,
625 vars
[next_index
].pbn
,
626 dm_mst_get_pbn_divider(dc_link
)) < 0)
628 if (!drm_dp_mst_atomic_check(state
)) {
629 vars
[next_index
].bpp_x16
= params
[next_index
].bw_range
.max_target_bpp_x16
;
631 vars
[next_index
].pbn
-= initial_slack
[next_index
];
632 if (drm_dp_atomic_find_vcpi_slots(state
,
633 params
[next_index
].port
->mgr
,
634 params
[next_index
].port
,
635 vars
[next_index
].pbn
,
636 dm_mst_get_pbn_divider(dc_link
)) < 0)
641 bpp_increased
[next_index
] = true;
642 remaining_to_increase
--;
646 static void try_disable_dsc(struct drm_atomic_state
*state
,
647 struct dc_link
*dc_link
,
648 struct dsc_mst_fairness_params
*params
,
649 struct dsc_mst_fairness_vars
*vars
,
653 bool tried
[MAX_PIPES
];
654 int kbps_increase
[MAX_PIPES
];
655 int max_kbps_increase
;
657 int remaining_to_try
= 0;
659 for (i
= 0; i
< count
; i
++) {
660 if (vars
[i
].dsc_enabled
&& vars
[i
].bpp_x16
== params
[i
].bw_range
.max_target_bpp_x16
) {
661 kbps_increase
[i
] = params
[i
].bw_range
.stream_kbps
- params
[i
].bw_range
.max_kbps
;
663 remaining_to_try
+= 1;
665 kbps_increase
[i
] = 0;
670 while (remaining_to_try
) {
672 max_kbps_increase
= -1;
673 for (i
= 0; i
< count
; i
++) {
675 if (max_kbps_increase
== -1 || max_kbps_increase
< kbps_increase
[i
]) {
676 max_kbps_increase
= kbps_increase
[i
];
682 if (next_index
== -1)
685 vars
[next_index
].pbn
= kbps_to_peak_pbn(params
[next_index
].bw_range
.stream_kbps
);
686 if (drm_dp_atomic_find_vcpi_slots(state
,
687 params
[next_index
].port
->mgr
,
688 params
[next_index
].port
,
689 vars
[next_index
].pbn
,
693 if (!drm_dp_mst_atomic_check(state
)) {
694 vars
[next_index
].dsc_enabled
= false;
695 vars
[next_index
].bpp_x16
= 0;
697 vars
[next_index
].pbn
= kbps_to_peak_pbn(params
[next_index
].bw_range
.max_kbps
);
698 if (drm_dp_atomic_find_vcpi_slots(state
,
699 params
[next_index
].port
->mgr
,
700 params
[next_index
].port
,
701 vars
[next_index
].pbn
,
702 dm_mst_get_pbn_divider(dc_link
)) < 0)
706 tried
[next_index
] = true;
711 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state
*state
,
712 struct dc_state
*dc_state
,
713 struct dc_link
*dc_link
)
716 struct dc_stream_state
*stream
;
717 struct dsc_mst_fairness_params params
[MAX_PIPES
];
718 struct dsc_mst_fairness_vars vars
[MAX_PIPES
];
719 struct amdgpu_dm_connector
*aconnector
;
722 memset(params
, 0, sizeof(params
));
725 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
726 struct dc_dsc_policy dsc_policy
= {0};
728 stream
= dc_state
->streams
[i
];
730 if (stream
->link
!= dc_link
)
733 stream
->timing
.flags
.DSC
= 0;
735 params
[count
].timing
= &stream
->timing
;
736 params
[count
].sink
= stream
->sink
;
737 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
738 params
[count
].port
= aconnector
->port
;
739 params
[count
].compression_possible
= stream
->sink
->dsc_caps
.dsc_dec_caps
.is_dsc_supported
;
740 dc_dsc_get_policy_for_timing(params
[count
].timing
, &dsc_policy
);
741 if (!dc_dsc_compute_bandwidth_range(
742 stream
->sink
->ctx
->dc
->res_pool
->dscs
[0],
743 stream
->sink
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
744 dsc_policy
.min_target_bpp
,
745 dsc_policy
.max_target_bpp
,
746 &stream
->sink
->dsc_caps
.dsc_dec_caps
,
747 &stream
->timing
, ¶ms
[count
].bw_range
))
748 params
[count
].bw_range
.stream_kbps
= dc_bandwidth_in_kbps_from_timing(&stream
->timing
);
752 /* Try no compression */
753 for (i
= 0; i
< count
; i
++) {
754 vars
[i
].pbn
= kbps_to_peak_pbn(params
[i
].bw_range
.stream_kbps
);
755 vars
[i
].dsc_enabled
= false;
757 if (drm_dp_atomic_find_vcpi_slots(state
,
764 if (!drm_dp_mst_atomic_check(state
)) {
765 set_dsc_configs_from_fairness_vars(params
, vars
, count
);
769 /* Try max compression */
770 for (i
= 0; i
< count
; i
++) {
771 if (params
[i
].compression_possible
) {
772 vars
[i
].pbn
= kbps_to_peak_pbn(params
[i
].bw_range
.min_kbps
);
773 vars
[i
].dsc_enabled
= true;
774 vars
[i
].bpp_x16
= params
[i
].bw_range
.min_target_bpp_x16
;
775 if (drm_dp_atomic_find_vcpi_slots(state
,
779 dm_mst_get_pbn_divider(dc_link
)) < 0)
782 vars
[i
].pbn
= kbps_to_peak_pbn(params
[i
].bw_range
.stream_kbps
);
783 vars
[i
].dsc_enabled
= false;
785 if (drm_dp_atomic_find_vcpi_slots(state
,
793 if (drm_dp_mst_atomic_check(state
))
796 /* Optimize degree of compression */
797 increase_dsc_bpp(state
, dc_link
, params
, vars
, count
);
799 try_disable_dsc(state
, dc_link
, params
, vars
, count
);
801 set_dsc_configs_from_fairness_vars(params
, vars
, count
);
806 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state
*state
,
807 struct dc_state
*dc_state
)
810 struct dc_stream_state
*stream
;
811 bool computed_streams
[MAX_PIPES
];
812 struct amdgpu_dm_connector
*aconnector
;
814 for (i
= 0; i
< dc_state
->stream_count
; i
++)
815 computed_streams
[i
] = false;
817 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
818 stream
= dc_state
->streams
[i
];
820 if (stream
->signal
!= SIGNAL_TYPE_DISPLAY_PORT_MST
)
823 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
825 if (!aconnector
|| !aconnector
->dc_sink
)
828 if (!aconnector
->dc_sink
->dsc_caps
.dsc_dec_caps
.is_dsc_supported
)
831 if (computed_streams
[i
])
834 mutex_lock(&aconnector
->mst_mgr
.lock
);
835 if (!compute_mst_dsc_configs_for_link(state
, dc_state
, stream
->link
)) {
836 mutex_unlock(&aconnector
->mst_mgr
.lock
);
839 mutex_unlock(&aconnector
->mst_mgr
.lock
);
841 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
842 if (dc_state
->streams
[j
]->link
== stream
->link
)
843 computed_streams
[j
] = true;
847 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
848 stream
= dc_state
->streams
[i
];
850 if (stream
->timing
.flags
.DSC
== 1)
851 dcn20_add_dsc_to_stream_resource(stream
->ctx
->dc
, dc_state
, stream
);