2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
32 #include "dce/dce_hwseq.h"
33 #include "dcn30/dcn30_cm_common.h"
34 #include "reg_helper.h"
38 #include "timing_generator.h"
43 #include "dc_dmub_srv.h"
44 #include "link_hwss.h"
45 #include "dpcd_defs.h"
46 #include "dcn32_hwseq.h"
49 #include "dcn20/dcn20_optc.h"
50 #include "dmub_subvp_state.h"
51 #include "dce/dmub_hw_lock_mgr.h"
52 #include "dc_link_dp.h"
53 #include "dmub/inc/dmub_subvp_state.h"
55 #define DC_LOGGER_INIT(logger)
66 #define FN(reg_name, field_name) \
67 hws->shifts->field_name, hws->masks->field_name
69 void dcn32_dsc_pg_control(
70 struct dce_hwseq
*hws
,
71 unsigned int dsc_inst
,
74 uint32_t power_gate
= power_on
? 0 : 1;
75 uint32_t pwr_status
= power_on
? 0 : 2;
76 uint32_t org_ip_request_cntl
= 0;
78 if (hws
->ctx
->dc
->debug
.disable_dsc_power_gate
)
81 REG_GET(DC_IP_REQUEST_CNTL
, IP_REQUEST_EN
, &org_ip_request_cntl
);
82 if (org_ip_request_cntl
== 0)
83 REG_SET(DC_IP_REQUEST_CNTL
, 0, IP_REQUEST_EN
, 1);
87 REG_UPDATE(DOMAIN16_PG_CONFIG
,
88 DOMAIN_POWER_GATE
, power_gate
);
90 REG_WAIT(DOMAIN16_PG_STATUS
,
91 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
95 REG_UPDATE(DOMAIN17_PG_CONFIG
,
96 DOMAIN_POWER_GATE
, power_gate
);
98 REG_WAIT(DOMAIN17_PG_STATUS
,
99 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
103 REG_UPDATE(DOMAIN18_PG_CONFIG
,
104 DOMAIN_POWER_GATE
, power_gate
);
106 REG_WAIT(DOMAIN18_PG_STATUS
,
107 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
111 REG_UPDATE(DOMAIN19_PG_CONFIG
,
112 DOMAIN_POWER_GATE
, power_gate
);
114 REG_WAIT(DOMAIN19_PG_STATUS
,
115 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
123 if (org_ip_request_cntl
== 0)
124 REG_SET(DC_IP_REQUEST_CNTL
, 0, IP_REQUEST_EN
, 0);
128 void dcn32_enable_power_gating_plane(
129 struct dce_hwseq
*hws
,
132 bool force_on
= true; /* disable power gating */
138 REG_UPDATE(DOMAIN0_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
139 REG_UPDATE(DOMAIN1_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
140 REG_UPDATE(DOMAIN2_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
141 REG_UPDATE(DOMAIN3_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
144 REG_UPDATE(DOMAIN16_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
145 REG_UPDATE(DOMAIN17_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
146 REG_UPDATE(DOMAIN18_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
147 REG_UPDATE(DOMAIN19_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
150 void dcn32_hubp_pg_control(struct dce_hwseq
*hws
, unsigned int hubp_inst
, bool power_on
)
152 uint32_t power_gate
= power_on
? 0 : 1;
153 uint32_t pwr_status
= power_on
? 0 : 2;
155 if (hws
->ctx
->dc
->debug
.disable_hubp_power_gate
)
158 if (REG(DOMAIN0_PG_CONFIG
) == 0)
163 REG_SET(DOMAIN0_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
164 REG_WAIT(DOMAIN0_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
167 REG_SET(DOMAIN1_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
168 REG_WAIT(DOMAIN1_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
171 REG_SET(DOMAIN2_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
172 REG_WAIT(DOMAIN2_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
175 REG_SET(DOMAIN3_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
176 REG_WAIT(DOMAIN3_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
184 static bool dcn32_check_no_memory_request_for_cab(struct dc
*dc
)
188 /* First, check no-memory-request case */
189 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
190 if (dc
->current_state
->stream_status
[i
].plane_count
)
191 /* Fail eligibility on a visible stream */
195 if (i
== dc
->current_state
->stream_count
)
201 /* This function takes in the start address and surface size to be cached in CAB
202 * and calculates the total number of cache lines required to store the surface.
203 * The number of cache lines used for each surface is calculated independently of
204 * one another. For example, if there is a primary surface(1), meta surface(2), and
205 * cursor(3), this function should be called 3 times to calculate the number of cache
206 * lines used for each of those surfaces.
208 static uint32_t dcn32_cache_lines_for_surface(struct dc
*dc
, uint32_t surface_size
, uint64_t start_address
)
210 uint32_t lines_used
= 1;
211 uint32_t num_cached_bytes
= 0;
212 uint32_t remaining_size
= 0;
213 uint32_t cache_line_size
= dc
->caps
.cache_line_size
;
214 uint32_t remainder
= 0;
216 /* 1. Calculate surface size minus the number of bytes stored
217 * in the first cache line (all bytes in first cache line might
218 * not be fully used).
220 div_u64_rem(start_address
, cache_line_size
, &remainder
);
221 num_cached_bytes
= cache_line_size
- remainder
;
222 remaining_size
= surface_size
- num_cached_bytes
;
224 /* 2. Calculate number of cache lines that will be fully used with
225 * the remaining number of bytes to be stored.
227 lines_used
+= (remaining_size
/ cache_line_size
);
229 /* 3. Check if we need an extra line due to the remaining size not being
230 * a multiple of CACHE_LINE_SIZE.
232 if (remaining_size
% cache_line_size
> 0)
238 /* This function loops through every surface that needs to be cached in CAB for SS,
239 * and calculates the total number of ways required to store all surfaces (primary,
242 static uint32_t dcn32_calculate_cab_allocation(struct dc
*dc
, struct dc_state
*ctx
)
245 struct dc_stream_state
*stream
= NULL
;
246 struct dc_plane_state
*plane
= NULL
;
247 uint32_t surface_size
= 0;
248 uint32_t cursor_size
= 0;
249 uint32_t cache_lines_used
= 0;
250 uint32_t total_lines
= 0;
251 uint32_t lines_per_way
= 0;
252 uint32_t num_ways
= 0;
253 uint32_t prev_addr_low
= 0;
255 for (i
= 0; i
< ctx
->stream_count
; i
++) {
256 stream
= ctx
->streams
[i
];
258 // Don't include PSR surface in the total surface size for CAB allocation
259 if (stream
->link
->psr_settings
.psr_version
!= DC_PSR_VERSION_UNSUPPORTED
)
262 if (ctx
->stream_status
[i
].plane_count
== 0)
265 // For each stream, loop through each plane to calculate the number of cache
266 // lines required to store the surface in CAB
267 for (j
= 0; j
< ctx
->stream_status
[i
].plane_count
; j
++) {
268 plane
= ctx
->stream_status
[i
].plane_states
[j
];
270 // Calculate total surface size
271 if (prev_addr_low
!= plane
->address
.grph
.addr
.u
.low_part
) {
272 /* if plane address are different from prev FB, then userspace allocated separate FBs*/
273 surface_size
+= plane
->plane_size
.surface_pitch
*
274 plane
->plane_size
.surface_size
.height
*
275 (plane
->format
>= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616
? 8 : 4);
277 prev_addr_low
= plane
->address
.grph
.addr
.u
.low_part
;
279 /* We have the same fb for all the planes.
280 * Xorg always creates one giant fb that holds all surfaces,
281 * so allocating it once is sufficient.
285 // Convert surface size + starting address to number of cache lines required
286 // (alignment accounted for)
287 cache_lines_used
+= dcn32_cache_lines_for_surface(dc
, surface_size
,
288 plane
->address
.grph
.addr
.quad_part
);
290 if (plane
->address
.grph
.meta_addr
.quad_part
) {
292 cache_lines_used
+= dcn32_cache_lines_for_surface(dc
, surface_size
,
293 plane
->address
.grph
.meta_addr
.quad_part
);
297 // Include cursor size for CAB allocation
298 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
299 struct pipe_ctx
*pipe
= &ctx
->res_ctx
.pipe_ctx
[j
];
300 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
302 if (pipe
->stream
&& pipe
->plane_state
&& hubp
)
303 /* Find the cursor plane and use the exact size instead of
304 * using the max for calculation
306 if (hubp
->curs_attr
.width
> 0) {
307 cursor_size
= hubp
->curs_attr
.width
* hubp
->curs_attr
.height
;
312 switch (stream
->cursor_attributes
.color_format
) {
313 case CURSOR_MODE_MONO
:
316 case CURSOR_MODE_COLOR_1BIT_AND
:
317 case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
:
318 case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
:
322 case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED
:
323 case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED
:
328 if (stream
->cursor_position
.enable
&& plane
->address
.grph
.cursor_cache_addr
.quad_part
) {
329 cache_lines_used
+= dcn32_cache_lines_for_surface(dc
, cursor_size
,
330 plane
->address
.grph
.cursor_cache_addr
.quad_part
);
334 // Convert number of cache lines required to number of ways
335 total_lines
= dc
->caps
.max_cab_allocation_bytes
/ dc
->caps
.cache_line_size
;
336 lines_per_way
= total_lines
/ dc
->caps
.cache_num_ways
;
337 num_ways
= cache_lines_used
/ lines_per_way
;
339 if (cache_lines_used
% lines_per_way
> 0)
342 for (i
= 0; i
< ctx
->stream_count
; i
++) {
343 stream
= ctx
->streams
[i
];
344 for (j
= 0; j
< ctx
->stream_status
[i
].plane_count
; j
++) {
345 plane
= ctx
->stream_status
[i
].plane_states
[j
];
347 if (stream
->cursor_position
.enable
&& plane
&&
348 !plane
->address
.grph
.cursor_cache_addr
.quad_part
&&
349 cursor_size
> 16384) {
350 /* Cursor caching is not supported since it won't be on the same line.
351 * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
352 * this case triggers corruption. If we're at the edge, then dont trigger display refresh
353 * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
356 /* We only expect one cursor plane */
365 bool dcn32_apply_idle_power_optimizations(struct dc
*dc
, bool enable
)
367 union dmub_rb_cmd cmd
;
370 bool stereo_in_use
= false;
371 struct dc_plane_state
*plane
= NULL
;
373 if (!dc
->ctx
->dmub_srv
)
377 if (dc
->current_state
) {
379 /* 1. Check no memory request case for CAB.
380 * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message
382 if (dcn32_check_no_memory_request_for_cab(dc
)) {
383 /* Enable no-memory-requests case */
384 memset(&cmd
, 0, sizeof(cmd
));
385 cmd
.cab
.header
.type
= DMUB_CMD__CAB_FOR_SS
;
386 cmd
.cab
.header
.sub_type
= DMUB_CMD__CAB_NO_DCN_REQ
;
387 cmd
.cab
.header
.payload_bytes
= sizeof(cmd
.cab
) - sizeof(cmd
.cab
.header
);
389 dc_dmub_srv_cmd_queue(dc
->ctx
->dmub_srv
, &cmd
);
390 dc_dmub_srv_cmd_execute(dc
->ctx
->dmub_srv
);
395 /* 2. Check if all surfaces can fit in CAB.
396 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
397 * and configure HUBP's to fetch from MALL
399 ways
= dcn32_calculate_cab_allocation(dc
, dc
->current_state
);
401 /* MALL not supported with Stereo3D. If any plane is using stereo,
402 * don't try to enter MALL.
404 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
405 for (j
= 0; j
< dc
->current_state
->stream_status
[i
].plane_count
; j
++) {
406 plane
= dc
->current_state
->stream_status
[i
].plane_states
[j
];
408 if (plane
->address
.type
== PLN_ADDR_TYPE_GRPH_STEREO
) {
409 stereo_in_use
= true;
416 if (ways
<= dc
->caps
.cache_num_ways
&& !stereo_in_use
) {
417 memset(&cmd
, 0, sizeof(cmd
));
418 cmd
.cab
.header
.type
= DMUB_CMD__CAB_FOR_SS
;
419 cmd
.cab
.header
.sub_type
= DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB
;
420 cmd
.cab
.header
.payload_bytes
= sizeof(cmd
.cab
) - sizeof(cmd
.cab
.header
);
421 cmd
.cab
.cab_alloc_ways
= ways
;
423 dc_dmub_srv_cmd_queue(dc
->ctx
->dmub_srv
, &cmd
);
424 dc_dmub_srv_cmd_execute(dc
->ctx
->dmub_srv
);
434 memset(&cmd
, 0, sizeof(cmd
));
435 cmd
.cab
.header
.type
= DMUB_CMD__CAB_FOR_SS
;
436 cmd
.cab
.header
.sub_type
= DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION
;
437 cmd
.cab
.header
.payload_bytes
=
438 sizeof(cmd
.cab
) - sizeof(cmd
.cab
.header
);
440 dc_dmub_srv_cmd_queue(dc
->ctx
->dmub_srv
, &cmd
);
441 dc_dmub_srv_cmd_execute(dc
->ctx
->dmub_srv
);
442 dc_dmub_srv_wait_idle(dc
->ctx
->dmub_srv
);
447 /* Send DMCUB message with SubVP pipe info
448 * - For each pipe in context, populate payload with required SubVP information
449 * if the pipe is using SubVP for MCLK switch
450 * - This function must be called while the DMUB HW lock is acquired by driver
452 void dcn32_commit_subvp_config(struct dc
*dc
, struct dc_state
*context
)
456 bool enable_subvp = false;
458 if (!dc->ctx || !dc->ctx->dmub_srv)
461 for (i = 0; i < dc->res_pool->pipe_count; i++) {
462 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
464 if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream &&
465 pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
466 // There is at least 1 SubVP pipe, so enable SubVP
471 dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp);
475 /* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
476 * 1. Any full update for any SubVP main pipe
477 * 2. Any immediate flip for any SubVP pipe
478 * 3. Any flip for DRR pipe
479 * 4. If SubVP was previously in use (i.e. in old context)
481 void dcn32_subvp_pipe_control_lock(struct dc
*dc
,
482 struct dc_state
*context
,
484 bool should_lock_all_pipes
,
485 struct pipe_ctx
*top_pipe_to_program
,
489 bool subvp_immediate_flip
= false;
490 bool subvp_in_use
= false;
491 struct pipe_ctx
*pipe
;
493 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
494 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
496 if (pipe
->stream
&& pipe
->plane_state
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
) {
502 if (top_pipe_to_program
&& top_pipe_to_program
->stream
&& top_pipe_to_program
->plane_state
) {
503 if (top_pipe_to_program
->stream
->mall_stream_config
.type
== SUBVP_MAIN
&&
504 top_pipe_to_program
->plane_state
->flip_immediate
)
505 subvp_immediate_flip
= true;
508 // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
509 if ((subvp_in_use
&& (should_lock_all_pipes
|| subvp_immediate_flip
)) || (!subvp_in_use
&& subvp_prev_use
)) {
510 union dmub_inbox0_cmd_lock_hw hw_lock_cmd
= { 0 };
513 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
514 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
515 if (pipe
->stream
&& pipe
->plane_state
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
&&
516 should_lock_all_pipes
)
517 pipe
->stream_res
.tg
->funcs
->wait_for_state(pipe
->stream_res
.tg
, CRTC_STATE_VBLANK
);
521 hw_lock_cmd
.bits
.command_code
= DMUB_INBOX0_CMD__HW_LOCK
;
522 hw_lock_cmd
.bits
.hw_lock_client
= HW_LOCK_CLIENT_DRIVER
;
523 hw_lock_cmd
.bits
.lock
= lock
;
524 hw_lock_cmd
.bits
.should_release
= !lock
;
525 dmub_hw_lock_mgr_inbox0_cmd(dc
->ctx
->dmub_srv
, hw_lock_cmd
);
530 static bool dcn32_set_mpc_shaper_3dlut(
531 struct pipe_ctx
*pipe_ctx
, const struct dc_stream_state
*stream
)
533 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
534 int mpcc_id
= pipe_ctx
->plane_res
.hubp
->inst
;
535 struct mpc
*mpc
= pipe_ctx
->stream_res
.opp
->ctx
->dc
->res_pool
->mpc
;
538 const struct pwl_params
*shaper_lut
= NULL
;
539 //get the shaper lut params
540 if (stream
->func_shaper
) {
541 if (stream
->func_shaper
->type
== TF_TYPE_HWPWL
)
542 shaper_lut
= &stream
->func_shaper
->pwl
;
543 else if (stream
->func_shaper
->type
== TF_TYPE_DISTRIBUTED_POINTS
) {
544 cm_helper_translate_curve_to_hw_format(
546 &dpp_base
->shaper_params
, true);
547 shaper_lut
= &dpp_base
->shaper_params
;
551 if (stream
->lut3d_func
&&
552 stream
->lut3d_func
->state
.bits
.initialized
== 1) {
554 result
= mpc
->funcs
->program_3dlut(mpc
,
555 &stream
->lut3d_func
->lut_3d
,
558 result
= mpc
->funcs
->program_shaper(mpc
,
566 bool dcn32_set_mcm_luts(
567 struct pipe_ctx
*pipe_ctx
, const struct dc_plane_state
*plane_state
)
569 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
570 int mpcc_id
= pipe_ctx
->plane_res
.hubp
->inst
;
571 struct mpc
*mpc
= pipe_ctx
->stream_res
.opp
->ctx
->dc
->res_pool
->mpc
;
573 struct pwl_params
*lut_params
= NULL
;
576 if (plane_state
->blend_tf
) {
577 if (plane_state
->blend_tf
->type
== TF_TYPE_HWPWL
)
578 lut_params
= &plane_state
->blend_tf
->pwl
;
579 else if (plane_state
->blend_tf
->type
== TF_TYPE_DISTRIBUTED_POINTS
) {
580 cm_helper_translate_curve_to_hw_format(
581 plane_state
->blend_tf
,
582 &dpp_base
->regamma_params
, false);
583 lut_params
= &dpp_base
->regamma_params
;
586 result
= mpc
->funcs
->program_1dlut(mpc
, lut_params
, mpcc_id
);
589 if (plane_state
->in_shaper_func
) {
590 if (plane_state
->in_shaper_func
->type
== TF_TYPE_HWPWL
)
591 lut_params
= &plane_state
->in_shaper_func
->pwl
;
592 else if (plane_state
->in_shaper_func
->type
== TF_TYPE_DISTRIBUTED_POINTS
) {
593 // TODO: dpp_base replace
595 cm_helper_translate_curve_to_hw_format(
596 plane_state
->in_shaper_func
,
597 &dpp_base
->shaper_params
, true);
598 lut_params
= &dpp_base
->shaper_params
;
602 result
= mpc
->funcs
->program_shaper(mpc
, lut_params
, mpcc_id
);
605 if (plane_state
->lut3d_func
&& plane_state
->lut3d_func
->state
.bits
.initialized
== 1)
606 result
= mpc
->funcs
->program_3dlut(mpc
, &plane_state
->lut3d_func
->lut_3d
, mpcc_id
);
608 result
= mpc
->funcs
->program_3dlut(mpc
, NULL
, mpcc_id
);
613 bool dcn32_set_input_transfer_func(struct dc
*dc
,
614 struct pipe_ctx
*pipe_ctx
,
615 const struct dc_plane_state
*plane_state
)
617 struct dce_hwseq
*hws
= dc
->hwseq
;
618 struct mpc
*mpc
= dc
->res_pool
->mpc
;
619 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
621 enum dc_transfer_func_predefined tf
;
623 struct pwl_params
*params
= NULL
;
625 if (mpc
== NULL
|| plane_state
== NULL
)
628 tf
= TRANSFER_FUNCTION_UNITY
;
630 if (plane_state
->in_transfer_func
&&
631 plane_state
->in_transfer_func
->type
== TF_TYPE_PREDEFINED
)
632 tf
= plane_state
->in_transfer_func
->tf
;
634 dpp_base
->funcs
->dpp_set_pre_degam(dpp_base
, tf
);
636 if (plane_state
->in_transfer_func
) {
637 if (plane_state
->in_transfer_func
->type
== TF_TYPE_HWPWL
)
638 params
= &plane_state
->in_transfer_func
->pwl
;
639 else if (plane_state
->in_transfer_func
->type
== TF_TYPE_DISTRIBUTED_POINTS
&&
640 cm3_helper_translate_curve_to_hw_format(plane_state
->in_transfer_func
,
641 &dpp_base
->degamma_params
, false))
642 params
= &dpp_base
->degamma_params
;
645 result
= dpp_base
->funcs
->dpp_program_gamcor_lut(dpp_base
, params
);
648 pipe_ctx
->stream_res
.opp
&&
649 pipe_ctx
->stream_res
.opp
->ctx
&&
650 hws
->funcs
.set_mcm_luts
)
651 result
= hws
->funcs
.set_mcm_luts(pipe_ctx
, plane_state
);
656 bool dcn32_set_output_transfer_func(struct dc
*dc
,
657 struct pipe_ctx
*pipe_ctx
,
658 const struct dc_stream_state
*stream
)
660 int mpcc_id
= pipe_ctx
->plane_res
.hubp
->inst
;
661 struct mpc
*mpc
= pipe_ctx
->stream_res
.opp
->ctx
->dc
->res_pool
->mpc
;
662 struct pwl_params
*params
= NULL
;
665 /* program OGAM or 3DLUT only for the top pipe*/
666 if (pipe_ctx
->top_pipe
== NULL
) {
667 /*program shaper and 3dlut in MPC*/
668 ret
= dcn32_set_mpc_shaper_3dlut(pipe_ctx
, stream
);
669 if (ret
== false && mpc
->funcs
->set_output_gamma
&& stream
->out_transfer_func
) {
670 if (stream
->out_transfer_func
->type
== TF_TYPE_HWPWL
)
671 params
= &stream
->out_transfer_func
->pwl
;
672 else if (pipe_ctx
->stream
->out_transfer_func
->type
==
673 TF_TYPE_DISTRIBUTED_POINTS
&&
674 cm3_helper_translate_curve_to_hw_format(
675 stream
->out_transfer_func
,
676 &mpc
->blender_params
, false))
677 params
= &mpc
->blender_params
;
678 /* there are no ROM LUTs in OUTGAM */
679 if (stream
->out_transfer_func
->type
== TF_TYPE_PREDEFINED
)
684 mpc
->funcs
->set_output_gamma(mpc
, mpcc_id
, params
);
688 /* Program P-State force value according to if pipe is using SubVP or not:
689 * 1. Reset P-State force on all pipes first
690 * 2. For each main pipe, force P-State disallow (P-State allow moderated by DMUB)
692 void dcn32_subvp_update_force_pstate(struct dc
*dc
, struct dc_state
*context
)
696 /* Unforce p-state for each pipe
698 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
699 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
700 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
702 if (hubp
&& hubp
->funcs
->hubp_update_force_pstate_disallow
)
703 hubp
->funcs
->hubp_update_force_pstate_disallow(hubp
, false);
704 if (pipe
->stream
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
)
711 /* Loop through each pipe -- for each subvp main pipe force p-state allow equal to false.
713 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
714 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
716 // For SubVP + DRR, also force disallow on the DRR pipe
717 // (We will force allow in the DMUB sequence -- some DRR timings by default won't allow P-State so we have
718 // to force once the vblank is stretched).
719 if (pipe
->stream
&& pipe
->plane_state
&& (pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
||
720 (pipe
->stream
->mall_stream_config
.type
== SUBVP_NONE
&& pipe
->stream
->ignore_msa_timing_param
))) {
721 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
723 if (hubp
&& hubp
->funcs
->hubp_update_force_pstate_disallow
)
724 hubp
->funcs
->hubp_update_force_pstate_disallow(hubp
, true);
729 /* Update MALL_SEL register based on if pipe / plane
730 * is a phantom pipe, main pipe, and if using MALL
733 void dcn32_update_mall_sel(struct dc
*dc
, struct dc_state
*context
)
736 unsigned int num_ways
= dcn32_calculate_cab_allocation(dc
, context
);
737 bool cache_cursor
= false;
739 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
740 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
741 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
743 if (pipe
->stream
&& pipe
->plane_state
&& hubp
&& hubp
->funcs
->hubp_update_mall_sel
) {
744 if (hubp
->curs_attr
.width
* hubp
->curs_attr
.height
* 4 > 16384)
747 if (pipe
->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
) {
748 hubp
->funcs
->hubp_update_mall_sel(hubp
, 1, false);
750 // MALL not supported with Stereo3D
751 hubp
->funcs
->hubp_update_mall_sel(hubp
,
752 num_ways
<= dc
->caps
.cache_num_ways
&&
753 pipe
->stream
->link
->psr_settings
.psr_version
== DC_PSR_VERSION_UNSUPPORTED
&&
754 pipe
->plane_state
->address
.type
!= PLN_ADDR_TYPE_GRPH_STEREO
? 2 : 0,
761 /* Program the sub-viewport pipe configuration after the main / phantom pipes
762 * have been programmed in hardware.
763 * 1. Update force P-State for all the main pipes (disallow P-state)
764 * 2. Update MALL_SEL register
765 * 3. Program FORCE_ONE_ROW_FOR_FRAME for main subvp pipes
767 void dcn32_program_mall_pipe_config(struct dc
*dc
, struct dc_state
*context
)
770 struct dce_hwseq
*hws
= dc
->hwseq
;
772 // Don't force p-state disallow -- can't block dummy p-state
774 // Update MALL_SEL register for each pipe
775 if (hws
&& hws
->funcs
.update_mall_sel
)
776 hws
->funcs
.update_mall_sel(dc
, context
);
778 // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
779 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
780 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
781 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
783 if (pipe
->stream
&& hubp
&& hubp
->funcs
->hubp_prepare_subvp_buffering
) {
784 /* TODO - remove setting CURSOR_REQ_MODE to 0 for legacy cases
785 * - need to investigate single pipe MPO + SubVP case to
786 * see if CURSOR_REQ_MODE will be back to 1 for SubVP
787 * when it should be 0 for MPO
789 if (pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
) {
790 hubp
->funcs
->hubp_prepare_subvp_buffering(hubp
, true);
796 void dcn32_init_hw(struct dc
*dc
)
798 struct abm
**abms
= dc
->res_pool
->multiple_abms
;
799 struct dce_hwseq
*hws
= dc
->hwseq
;
800 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
801 struct resource_pool
*res_pool
= dc
->res_pool
;
804 uint32_t backlight
= MAX_BACKLIGHT_LEVEL
;
806 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->init_clocks
)
807 dc
->clk_mgr
->funcs
->init_clocks(dc
->clk_mgr
);
809 // Initialize the dccg
810 if (res_pool
->dccg
->funcs
->dccg_init
)
811 res_pool
->dccg
->funcs
->dccg_init(res_pool
->dccg
);
813 if (!dcb
->funcs
->is_accelerated_mode(dcb
)) {
814 hws
->funcs
.bios_golden_init(dc
);
815 hws
->funcs
.disable_vga(dc
->hwseq
);
818 // Set default OPTC memory power states
819 if (dc
->debug
.enable_mem_low_power
.bits
.optc
) {
820 // Shutdown when unassigned and light sleep in VBLANK
821 REG_SET_2(ODM_MEM_PWR_CTRL3
, 0, ODM_MEM_UNASSIGNED_PWR_MODE
, 3, ODM_MEM_VBLANK_PWR_MODE
, 1);
824 if (dc
->debug
.enable_mem_low_power
.bits
.vga
) {
825 // Power down VGA memory
826 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL
, VGA_MEM_PWR_FORCE
, 1);
829 if (dc
->ctx
->dc_bios
->fw_info_valid
) {
830 res_pool
->ref_clocks
.xtalin_clock_inKhz
=
831 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
;
833 if (res_pool
->dccg
&& res_pool
->hubbub
) {
834 (res_pool
->dccg
->funcs
->get_dccg_ref_freq
)(res_pool
->dccg
,
835 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
,
836 &res_pool
->ref_clocks
.dccg_ref_clock_inKhz
);
838 (res_pool
->hubbub
->funcs
->get_dchub_ref_freq
)(res_pool
->hubbub
,
839 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
,
840 &res_pool
->ref_clocks
.dchub_ref_clock_inKhz
);
842 // Not all ASICs have DCCG sw component
843 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
=
844 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
845 res_pool
->ref_clocks
.dchub_ref_clock_inKhz
=
846 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
849 ASSERT_CRITICAL(false);
851 for (i
= 0; i
< dc
->link_count
; i
++) {
852 /* Power up AND update implementation according to the
853 * required signal (which may be different from the
854 * default signal on connector).
856 struct dc_link
*link
= dc
->links
[i
];
858 link
->link_enc
->funcs
->hw_init(link
->link_enc
);
860 /* Check for enabled DIG to identify enabled display */
861 if (link
->link_enc
->funcs
->is_dig_enabled
&&
862 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
)) {
863 link
->link_status
.link_active
= true;
864 if (link
->link_enc
->funcs
->fec_is_active
&&
865 link
->link_enc
->funcs
->fec_is_active(link
->link_enc
))
866 link
->fec_state
= dc_link_fec_enabled
;
870 /* Power gate DSCs */
871 for (i
= 0; i
< res_pool
->res_cap
->num_dsc
; i
++)
872 if (hws
->funcs
.dsc_pg_control
!= NULL
)
873 hws
->funcs
.dsc_pg_control(hws
, res_pool
->dscs
[i
]->inst
, false);
875 /* we want to turn off all dp displays before doing detection */
876 dc_link_blank_all_dp_displays(dc
);
878 /* If taking control over from VBIOS, we may want to optimize our first
879 * mode set, so we need to skip powering down pipes until we know which
880 * pipes we want to use.
881 * Otherwise, if taking control is not possible, we need to power
884 if (dcb
->funcs
->is_accelerated_mode(dcb
) || !dc
->config
.seamless_boot_edp_requested
) {
885 hws
->funcs
.init_pipes(dc
, dc
->current_state
);
886 if (dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control
)
887 dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control(dc
->res_pool
->hubbub
,
888 !dc
->res_pool
->hubbub
->ctx
->dc
->debug
.disable_stutter
);
891 /* In headless boot cases, DIG may be turned
892 * on which causes HW/SW discrepancies.
893 * To avoid this, power down hardware on boot
894 * if DIG is turned on and seamless boot not enabled
896 if (!dc
->config
.seamless_boot_edp_requested
) {
897 struct dc_link
*edp_links
[MAX_NUM_EDP
];
898 struct dc_link
*edp_link
;
900 get_edp_links(dc
, edp_links
, &edp_num
);
902 for (i
= 0; i
< edp_num
; i
++) {
903 edp_link
= edp_links
[i
];
904 if (edp_link
->link_enc
->funcs
->is_dig_enabled
&&
905 edp_link
->link_enc
->funcs
->is_dig_enabled(edp_link
->link_enc
) &&
906 dc
->hwss
.edp_backlight_control
&&
907 dc
->hwss
.power_down
&&
908 dc
->hwss
.edp_power_control
) {
909 dc
->hwss
.edp_backlight_control(edp_link
, false);
910 dc
->hwss
.power_down(dc
);
911 dc
->hwss
.edp_power_control(edp_link
, false);
915 for (i
= 0; i
< dc
->link_count
; i
++) {
916 struct dc_link
*link
= dc
->links
[i
];
918 if (link
->link_enc
->funcs
->is_dig_enabled
&&
919 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
) &&
920 dc
->hwss
.power_down
) {
921 dc
->hwss
.power_down(dc
);
929 for (i
= 0; i
< res_pool
->audio_count
; i
++) {
930 struct audio
*audio
= res_pool
->audios
[i
];
932 audio
->funcs
->hw_init(audio
);
935 for (i
= 0; i
< dc
->link_count
; i
++) {
936 struct dc_link
*link
= dc
->links
[i
];
938 if (link
->panel_cntl
)
939 backlight
= link
->panel_cntl
->funcs
->hw_init(link
->panel_cntl
);
942 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
943 if (abms
[i
] != NULL
&& abms
[i
]->funcs
!= NULL
)
944 abms
[i
]->funcs
->abm_init(abms
[i
], backlight
);
947 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
948 REG_WRITE(DIO_MEM_PWR_CTRL
, 0);
950 if (!dc
->debug
.disable_clock_gate
) {
951 /* enable all DCN clock gating */
952 REG_WRITE(DCCG_GATE_DISABLE_CNTL
, 0);
954 REG_WRITE(DCCG_GATE_DISABLE_CNTL2
, 0);
956 REG_UPDATE(DCFCLK_CNTL
, DCFCLK_GATE_DIS
, 0);
958 if (hws
->funcs
.enable_power_gating_plane
)
959 hws
->funcs
.enable_power_gating_plane(dc
->hwseq
, true);
961 if (!dcb
->funcs
->is_accelerated_mode(dcb
) && dc
->res_pool
->hubbub
->funcs
->init_watermarks
)
962 dc
->res_pool
->hubbub
->funcs
->init_watermarks(dc
->res_pool
->hubbub
);
964 if (dc
->clk_mgr
->funcs
->notify_wm_ranges
)
965 dc
->clk_mgr
->funcs
->notify_wm_ranges(dc
->clk_mgr
);
967 if (dc
->clk_mgr
->funcs
->set_hard_max_memclk
)
968 dc
->clk_mgr
->funcs
->set_hard_max_memclk(dc
->clk_mgr
);
970 if (dc
->res_pool
->hubbub
->funcs
->force_pstate_change_control
)
971 dc
->res_pool
->hubbub
->funcs
->force_pstate_change_control(
972 dc
->res_pool
->hubbub
, false, false);
974 if (dc
->res_pool
->hubbub
->funcs
->init_crb
)
975 dc
->res_pool
->hubbub
->funcs
->init_crb(dc
->res_pool
->hubbub
);
977 // Get DMCUB capabilities
978 if (dc
->ctx
->dmub_srv
) {
979 dc_dmub_srv_query_caps_cmd(dc
->ctx
->dmub_srv
->dmub
);
980 dc
->caps
.dmub_caps
.psr
= dc
->ctx
->dmub_srv
->dmub
->feature_caps
.psr
;
984 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state
*stream
,
987 bool hblank_halved
= optc2_is_two_pixels_per_containter(&stream
->timing
);
991 hblank_halved
= true;
993 flow_ctrl_cnt
= stream
->timing
.h_total
- stream
->timing
.h_addressable
-
994 stream
->timing
.h_border_left
-
995 stream
->timing
.h_border_right
;
1000 /* ODM combine 4:1 case */
1004 return flow_ctrl_cnt
;
1007 static void update_dsc_on_stream(struct pipe_ctx
*pipe_ctx
, bool enable
)
1009 struct display_stream_compressor
*dsc
= pipe_ctx
->stream_res
.dsc
;
1010 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1011 struct pipe_ctx
*odm_pipe
;
1015 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
1019 struct dsc_config dsc_cfg
;
1020 struct dsc_optc_config dsc_optc_cfg
;
1021 enum optc_dsc_mode optc_dsc_mode
;
1023 /* Enable DSC hw block */
1024 dsc_cfg
.pic_width
= (stream
->timing
.h_addressable
+ stream
->timing
.h_border_left
+ stream
->timing
.h_border_right
) / opp_cnt
;
1025 dsc_cfg
.pic_height
= stream
->timing
.v_addressable
+ stream
->timing
.v_border_top
+ stream
->timing
.v_border_bottom
;
1026 dsc_cfg
.pixel_encoding
= stream
->timing
.pixel_encoding
;
1027 dsc_cfg
.color_depth
= stream
->timing
.display_color_depth
;
1028 dsc_cfg
.is_odm
= pipe_ctx
->next_odm_pipe
? true : false;
1029 dsc_cfg
.dc_dsc_cfg
= stream
->timing
.dsc_cfg
;
1030 ASSERT(dsc_cfg
.dc_dsc_cfg
.num_slices_h
% opp_cnt
== 0);
1031 dsc_cfg
.dc_dsc_cfg
.num_slices_h
/= opp_cnt
;
1033 dsc
->funcs
->dsc_set_config(dsc
, &dsc_cfg
, &dsc_optc_cfg
);
1034 dsc
->funcs
->dsc_enable(dsc
, pipe_ctx
->stream_res
.opp
->inst
);
1035 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1036 struct display_stream_compressor
*odm_dsc
= odm_pipe
->stream_res
.dsc
;
1039 odm_dsc
->funcs
->dsc_set_config(odm_dsc
, &dsc_cfg
, &dsc_optc_cfg
);
1040 odm_dsc
->funcs
->dsc_enable(odm_dsc
, odm_pipe
->stream_res
.opp
->inst
);
1042 dsc_cfg
.dc_dsc_cfg
.num_slices_h
*= opp_cnt
;
1043 dsc_cfg
.pic_width
*= opp_cnt
;
1045 optc_dsc_mode
= dsc_optc_cfg
.is_pixel_format_444
? OPTC_DSC_ENABLED_444
: OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED
;
1047 /* Enable DSC in OPTC */
1048 DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx
->stream_res
.tg
->inst
);
1049 pipe_ctx
->stream_res
.tg
->funcs
->set_dsc_config(pipe_ctx
->stream_res
.tg
,
1051 dsc_optc_cfg
.bytes_per_pixel
,
1052 dsc_optc_cfg
.slice_width
);
1054 /* disable DSC in OPTC */
1055 pipe_ctx
->stream_res
.tg
->funcs
->set_dsc_config(
1056 pipe_ctx
->stream_res
.tg
,
1057 OPTC_DSC_DISABLED
, 0, 0);
1059 /* disable DSC block */
1060 dsc
->funcs
->dsc_disable(pipe_ctx
->stream_res
.dsc
);
1061 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1062 ASSERT(odm_pipe
->stream_res
.dsc
);
1063 odm_pipe
->stream_res
.dsc
->funcs
->dsc_disable(odm_pipe
->stream_res
.dsc
);
1069 * Given any pipe_ctx, return the total ODM combine factor, and optionally return
1070 * the OPPids which are used
1072 static unsigned int get_odm_config(struct pipe_ctx
*pipe_ctx
, unsigned int *opp_instances
)
1074 unsigned int opp_count
= 1;
1075 struct pipe_ctx
*odm_pipe
;
1077 /* First get to the top pipe */
1078 for (odm_pipe
= pipe_ctx
; odm_pipe
->prev_odm_pipe
; odm_pipe
= odm_pipe
->prev_odm_pipe
)
1081 /* First pipe is always used */
1083 opp_instances
[0] = odm_pipe
->stream_res
.opp
->inst
;
1085 /* Find and count odm pipes, if any */
1086 for (odm_pipe
= odm_pipe
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1088 opp_instances
[opp_count
] = odm_pipe
->stream_res
.opp
->inst
;
1095 void dcn32_update_odm(struct dc
*dc
, struct dc_state
*context
, struct pipe_ctx
*pipe_ctx
)
1097 struct pipe_ctx
*odm_pipe
;
1099 int opp_inst
[MAX_PIPES
] = {0};
1100 bool rate_control_2x_pclk
= (pipe_ctx
->stream
->timing
.flags
.INTERLACE
|| optc2_is_two_pixels_per_containter(&pipe_ctx
->stream
->timing
));
1101 struct mpc_dwb_flow_control flow_control
;
1102 struct mpc
*mpc
= dc
->res_pool
->mpc
;
1105 opp_cnt
= get_odm_config(pipe_ctx
, opp_inst
);
1108 pipe_ctx
->stream_res
.tg
->funcs
->set_odm_combine(
1109 pipe_ctx
->stream_res
.tg
,
1111 &pipe_ctx
->stream
->timing
);
1113 pipe_ctx
->stream_res
.tg
->funcs
->set_odm_bypass(
1114 pipe_ctx
->stream_res
.tg
, &pipe_ctx
->stream
->timing
);
1116 rate_control_2x_pclk
= rate_control_2x_pclk
|| opp_cnt
> 1;
1117 flow_control
.flow_ctrl_mode
= 0;
1118 flow_control
.flow_ctrl_cnt0
= 0x80;
1119 flow_control
.flow_ctrl_cnt1
= calc_mpc_flow_ctrl_cnt(pipe_ctx
->stream
, opp_cnt
);
1120 if (mpc
->funcs
->set_out_rate_control
) {
1121 for (i
= 0; i
< opp_cnt
; ++i
) {
1122 mpc
->funcs
->set_out_rate_control(
1125 rate_control_2x_pclk
,
1130 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1131 odm_pipe
->stream_res
.opp
->funcs
->opp_pipe_clock_control(
1132 odm_pipe
->stream_res
.opp
,
1136 // Don't program pixel clock after link is already enabled
1137 /* if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
1138 pipe_ctx->clock_source,
1139 &pipe_ctx->stream_res.pix_clk_params,
1140 &pipe_ctx->pll_settings)) {
1141 BREAK_TO_DEBUGGER();
1144 if (pipe_ctx
->stream_res
.dsc
)
1145 update_dsc_on_stream(pipe_ctx
, pipe_ctx
->stream
->timing
.flags
.DSC
);
1148 unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx
*pipe_ctx
, unsigned int *k1_div
, unsigned int *k2_div
)
1150 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1151 unsigned int odm_combine_factor
= 0;
1152 struct dc
*dc
= pipe_ctx
->stream
->ctx
->dc
;
1153 bool two_pix_per_container
= false;
1155 // For phantom pipes, use the same programming as the main pipes
1156 if (pipe_ctx
->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
) {
1157 stream
= pipe_ctx
->stream
->mall_stream_config
.paired_stream
;
1159 two_pix_per_container
= optc2_is_two_pixels_per_containter(&stream
->timing
);
1160 odm_combine_factor
= get_odm_config(pipe_ctx
, NULL
);
1162 if (is_dp_128b_132b_signal(pipe_ctx
)) {
1163 *k2_div
= PIXEL_RATE_DIV_BY_1
;
1164 } else if (dc_is_hdmi_tmds_signal(pipe_ctx
->stream
->signal
) || dc_is_dvi_signal(pipe_ctx
->stream
->signal
)) {
1165 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1166 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
1167 *k2_div
= PIXEL_RATE_DIV_BY_2
;
1169 *k2_div
= PIXEL_RATE_DIV_BY_4
;
1170 } else if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
1171 if (two_pix_per_container
) {
1172 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1173 *k2_div
= PIXEL_RATE_DIV_BY_2
;
1175 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1176 *k2_div
= PIXEL_RATE_DIV_BY_4
;
1177 if ((odm_combine_factor
== 2) || dc
->debug
.enable_dp_dig_pixel_rate_div_policy
)
1178 *k2_div
= PIXEL_RATE_DIV_BY_2
;
1182 if ((*k1_div
== PIXEL_RATE_DIV_NA
) && (*k2_div
== PIXEL_RATE_DIV_NA
))
1185 return odm_combine_factor
;
1188 void dcn32_set_pixels_per_cycle(struct pipe_ctx
*pipe_ctx
)
1190 uint32_t pix_per_cycle
= 1;
1191 uint32_t odm_combine_factor
= 1;
1193 if (!pipe_ctx
|| !pipe_ctx
->stream
|| !pipe_ctx
->stream_res
.stream_enc
)
1196 odm_combine_factor
= get_odm_config(pipe_ctx
, NULL
);
1197 if (optc2_is_two_pixels_per_containter(&pipe_ctx
->stream
->timing
) || odm_combine_factor
> 1
1198 || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx
))
1201 if (pipe_ctx
->stream_res
.stream_enc
->funcs
->set_input_mode
)
1202 pipe_ctx
->stream_res
.stream_enc
->funcs
->set_input_mode(pipe_ctx
->stream_res
.stream_enc
,
1206 void dcn32_unblank_stream(struct pipe_ctx
*pipe_ctx
,
1207 struct dc_link_settings
*link_settings
)
1209 struct encoder_unblank_param params
= {0};
1210 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1211 struct dc_link
*link
= stream
->link
;
1212 struct dce_hwseq
*hws
= link
->dc
->hwseq
;
1213 struct pipe_ctx
*odm_pipe
;
1214 struct dc
*dc
= pipe_ctx
->stream
->ctx
->dc
;
1215 uint32_t pix_per_cycle
= 1;
1218 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
1221 /* only 3 items below are used by unblank */
1222 params
.timing
= pipe_ctx
->stream
->timing
;
1224 params
.link_settings
.link_rate
= link_settings
->link_rate
;
1226 if (is_dp_128b_132b_signal(pipe_ctx
)) {
1227 /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
1228 pipe_ctx
->stream_res
.hpo_dp_stream_enc
->funcs
->dp_unblank(
1229 pipe_ctx
->stream_res
.hpo_dp_stream_enc
,
1230 pipe_ctx
->stream_res
.tg
->inst
);
1231 } else if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
1232 if (optc2_is_two_pixels_per_containter(&stream
->timing
) || params
.opp_cnt
> 1
1233 || dc
->debug
.enable_dp_dig_pixel_rate_div_policy
) {
1234 params
.timing
.pix_clk_100hz
/= 2;
1237 pipe_ctx
->stream_res
.stream_enc
->funcs
->dp_set_odm_combine(
1238 pipe_ctx
->stream_res
.stream_enc
, pix_per_cycle
> 1);
1239 pipe_ctx
->stream_res
.stream_enc
->funcs
->dp_unblank(link
, pipe_ctx
->stream_res
.stream_enc
, ¶ms
);
1242 if (link
->local_sink
&& link
->local_sink
->sink_signal
== SIGNAL_TYPE_EDP
)
1243 hws
->funcs
.edp_backlight_control(link
, true);
1246 bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx
*pipe_ctx
)
1248 struct dc
*dc
= pipe_ctx
->stream
->ctx
->dc
;
1250 if (dc_is_dp_signal(pipe_ctx
->stream
->signal
) && !is_dp_128b_132b_signal(pipe_ctx
) &&
1251 dc
->debug
.enable_dp_dig_pixel_rate_div_policy
)