2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
32 #include "dce/dce_hwseq.h"
33 #include "dcn30/dcn30_cm_common.h"
34 #include "reg_helper.h"
38 #include "timing_generator.h"
43 #include "dc_dmub_srv.h"
44 #include "link_hwss.h"
45 #include "dpcd_defs.h"
46 #include "dcn32_hwseq.h"
49 #include "dcn20/dcn20_optc.h"
50 #include "dce/dmub_hw_lock_mgr.h"
51 #include "dcn32/dcn32_resource.h"
53 #include "../dcn20/dcn20_hwseq.h"
55 #define DC_LOGGER_INIT(logger)
66 #define FN(reg_name, field_name) \
67 hws->shifts->field_name, hws->masks->field_name
69 void dcn32_dsc_pg_control(
70 struct dce_hwseq
*hws
,
71 unsigned int dsc_inst
,
74 uint32_t power_gate
= power_on
? 0 : 1;
75 uint32_t pwr_status
= power_on
? 0 : 2;
76 uint32_t org_ip_request_cntl
= 0;
78 if (hws
->ctx
->dc
->debug
.disable_dsc_power_gate
)
81 if (!hws
->ctx
->dc
->debug
.enable_double_buffered_dsc_pg_support
)
84 REG_GET(DC_IP_REQUEST_CNTL
, IP_REQUEST_EN
, &org_ip_request_cntl
);
85 if (org_ip_request_cntl
== 0)
86 REG_SET(DC_IP_REQUEST_CNTL
, 0, IP_REQUEST_EN
, 1);
90 REG_UPDATE(DOMAIN16_PG_CONFIG
,
91 DOMAIN_POWER_GATE
, power_gate
);
93 REG_WAIT(DOMAIN16_PG_STATUS
,
94 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
98 REG_UPDATE(DOMAIN17_PG_CONFIG
,
99 DOMAIN_POWER_GATE
, power_gate
);
101 REG_WAIT(DOMAIN17_PG_STATUS
,
102 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
106 REG_UPDATE(DOMAIN18_PG_CONFIG
,
107 DOMAIN_POWER_GATE
, power_gate
);
109 REG_WAIT(DOMAIN18_PG_STATUS
,
110 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
114 REG_UPDATE(DOMAIN19_PG_CONFIG
,
115 DOMAIN_POWER_GATE
, power_gate
);
117 REG_WAIT(DOMAIN19_PG_STATUS
,
118 DOMAIN_PGFSM_PWR_STATUS
, pwr_status
,
126 if (org_ip_request_cntl
== 0)
127 REG_SET(DC_IP_REQUEST_CNTL
, 0, IP_REQUEST_EN
, 0);
131 void dcn32_enable_power_gating_plane(
132 struct dce_hwseq
*hws
,
135 bool force_on
= true; /* disable power gating */
136 uint32_t org_ip_request_cntl
= 0;
141 REG_GET(DC_IP_REQUEST_CNTL
, IP_REQUEST_EN
, &org_ip_request_cntl
);
142 if (org_ip_request_cntl
== 0)
143 REG_SET(DC_IP_REQUEST_CNTL
, 0, IP_REQUEST_EN
, 1);
146 REG_UPDATE(DOMAIN0_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
147 REG_UPDATE(DOMAIN1_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
148 REG_UPDATE(DOMAIN2_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
149 REG_UPDATE(DOMAIN3_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
152 REG_UPDATE(DOMAIN16_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
153 REG_UPDATE(DOMAIN17_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
154 REG_UPDATE(DOMAIN18_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
155 REG_UPDATE(DOMAIN19_PG_CONFIG
, DOMAIN_POWER_FORCEON
, force_on
);
157 if (org_ip_request_cntl
== 0)
158 REG_SET(DC_IP_REQUEST_CNTL
, 0, IP_REQUEST_EN
, 0);
161 void dcn32_hubp_pg_control(struct dce_hwseq
*hws
, unsigned int hubp_inst
, bool power_on
)
163 uint32_t power_gate
= power_on
? 0 : 1;
164 uint32_t pwr_status
= power_on
? 0 : 2;
166 if (hws
->ctx
->dc
->debug
.disable_hubp_power_gate
)
169 if (REG(DOMAIN0_PG_CONFIG
) == 0)
174 REG_SET(DOMAIN0_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
175 REG_WAIT(DOMAIN0_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
178 REG_SET(DOMAIN1_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
179 REG_WAIT(DOMAIN1_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
182 REG_SET(DOMAIN2_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
183 REG_WAIT(DOMAIN2_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
186 REG_SET(DOMAIN3_PG_CONFIG
, 0, DOMAIN_POWER_GATE
, power_gate
);
187 REG_WAIT(DOMAIN3_PG_STATUS
, DOMAIN_PGFSM_PWR_STATUS
, pwr_status
, 1, 1000);
195 static bool dcn32_check_no_memory_request_for_cab(struct dc
*dc
)
199 /* First, check no-memory-request case */
200 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
201 if ((dc
->current_state
->stream_status
[i
].plane_count
) &&
202 (dc
->current_state
->streams
[i
]->link
->psr_settings
.psr_version
== DC_PSR_VERSION_UNSUPPORTED
))
203 /* Fail eligibility on a visible stream */
207 if (i
== dc
->current_state
->stream_count
)
214 /* This function loops through every surface that needs to be cached in CAB for SS,
215 * and calculates the total number of ways required to store all surfaces (primary,
218 static uint32_t dcn32_calculate_cab_allocation(struct dc
*dc
, struct dc_state
*ctx
)
221 uint32_t num_ways
= 0;
222 uint32_t mall_ss_size_bytes
= 0;
224 mall_ss_size_bytes
= ctx
->bw_ctx
.bw
.dcn
.mall_ss_size_bytes
;
225 // TODO add additional logic for PSR active stream exclusion optimization
226 // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
228 // Include cursor size for CAB allocation
229 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
230 struct pipe_ctx
*pipe
= &ctx
->res_ctx
.pipe_ctx
[i
];
232 if (!pipe
->stream
|| !pipe
->plane_state
)
235 mall_ss_size_bytes
+= dcn32_helper_calculate_mall_bytes_for_cursor(dc
, pipe
, false);
238 // Convert number of cache lines required to number of ways
239 if (dc
->debug
.force_mall_ss_num_ways
> 0) {
240 num_ways
= dc
->debug
.force_mall_ss_num_ways
;
242 num_ways
= dcn32_helper_mall_bytes_to_ways(dc
, mall_ss_size_bytes
);
248 bool dcn32_apply_idle_power_optimizations(struct dc
*dc
, bool enable
)
250 union dmub_rb_cmd cmd
;
254 bool mall_ss_unsupported
= false;
255 struct dc_plane_state
*plane
= NULL
;
257 if (!dc
->ctx
->dmub_srv
)
260 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
261 /* MALL SS messaging is not supported with PSR at this time */
262 if (dc
->current_state
->streams
[i
] != NULL
&&
263 dc
->current_state
->streams
[i
]->link
->psr_settings
.psr_version
!= DC_PSR_VERSION_UNSUPPORTED
)
268 if (dc
->current_state
) {
270 /* 1. Check no memory request case for CAB.
271 * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message
273 if (dcn32_check_no_memory_request_for_cab(dc
)) {
274 /* Enable no-memory-requests case */
275 memset(&cmd
, 0, sizeof(cmd
));
276 cmd
.cab
.header
.type
= DMUB_CMD__CAB_FOR_SS
;
277 cmd
.cab
.header
.sub_type
= DMUB_CMD__CAB_NO_DCN_REQ
;
278 cmd
.cab
.header
.payload_bytes
= sizeof(cmd
.cab
) - sizeof(cmd
.cab
.header
);
280 dm_execute_dmub_cmd(dc
->ctx
, &cmd
, DM_DMUB_WAIT_TYPE_NO_WAIT
);
285 /* 2. Check if all surfaces can fit in CAB.
286 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
287 * and configure HUBP's to fetch from MALL
289 ways
= dcn32_calculate_cab_allocation(dc
, dc
->current_state
);
291 /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
292 * or TMZ surface, don't try to enter MALL.
294 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
295 for (j
= 0; j
< dc
->current_state
->stream_status
[i
].plane_count
; j
++) {
296 plane
= dc
->current_state
->stream_status
[i
].plane_states
[j
];
298 if (plane
->address
.type
== PLN_ADDR_TYPE_GRPH_STEREO
||
299 plane
->address
.tmz_surface
) {
300 mall_ss_unsupported
= true;
304 if (mall_ss_unsupported
)
307 if (ways
<= dc
->caps
.cache_num_ways
&& !mall_ss_unsupported
) {
308 memset(&cmd
, 0, sizeof(cmd
));
309 cmd
.cab
.header
.type
= DMUB_CMD__CAB_FOR_SS
;
310 cmd
.cab
.header
.sub_type
= DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB
;
311 cmd
.cab
.header
.payload_bytes
= sizeof(cmd
.cab
) - sizeof(cmd
.cab
.header
);
312 cmd
.cab
.cab_alloc_ways
= (uint8_t)ways
;
314 dm_execute_dmub_cmd(dc
->ctx
, &cmd
, DM_DMUB_WAIT_TYPE_NO_WAIT
);
324 memset(&cmd
, 0, sizeof(cmd
));
325 cmd
.cab
.header
.type
= DMUB_CMD__CAB_FOR_SS
;
326 cmd
.cab
.header
.sub_type
= DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION
;
327 cmd
.cab
.header
.payload_bytes
=
328 sizeof(cmd
.cab
) - sizeof(cmd
.cab
.header
);
330 dm_execute_dmub_cmd(dc
->ctx
, &cmd
, DM_DMUB_WAIT_TYPE_WAIT
);
335 /* Send DMCUB message with SubVP pipe info
336 * - For each pipe in context, populate payload with required SubVP information
337 * if the pipe is using SubVP for MCLK switch
338 * - This function must be called while the DMUB HW lock is acquired by driver
340 void dcn32_commit_subvp_config(struct dc
*dc
, struct dc_state
*context
)
343 bool enable_subvp
= false;
345 if (!dc
->ctx
|| !dc
->ctx
->dmub_srv
)
348 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
349 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
351 if (pipe_ctx
->stream
&& pipe_ctx
->stream
->mall_stream_config
.paired_stream
&&
352 pipe_ctx
->stream
->mall_stream_config
.type
== SUBVP_MAIN
) {
353 // There is at least 1 SubVP pipe, so enable SubVP
358 dc_dmub_setup_subvp_dmub_command(dc
, context
, enable_subvp
);
361 /* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
362 * 1. Any full update for any SubVP main pipe
363 * 2. Any immediate flip for any SubVP pipe
364 * 3. Any flip for DRR pipe
365 * 4. If SubVP was previously in use (i.e. in old context)
367 void dcn32_subvp_pipe_control_lock(struct dc
*dc
,
368 struct dc_state
*context
,
370 bool should_lock_all_pipes
,
371 struct pipe_ctx
*top_pipe_to_program
,
375 bool subvp_immediate_flip
= false;
376 bool subvp_in_use
= false;
377 struct pipe_ctx
*pipe
;
379 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
380 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
382 if (pipe
->stream
&& pipe
->plane_state
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
) {
388 if (top_pipe_to_program
&& top_pipe_to_program
->stream
&& top_pipe_to_program
->plane_state
) {
389 if (top_pipe_to_program
->stream
->mall_stream_config
.type
== SUBVP_MAIN
&&
390 top_pipe_to_program
->plane_state
->flip_immediate
)
391 subvp_immediate_flip
= true;
394 // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
395 if ((subvp_in_use
&& (should_lock_all_pipes
|| subvp_immediate_flip
)) || (!subvp_in_use
&& subvp_prev_use
)) {
396 union dmub_inbox0_cmd_lock_hw hw_lock_cmd
= { 0 };
399 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
400 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
401 if (pipe
->stream
&& pipe
->plane_state
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
&&
402 should_lock_all_pipes
)
403 pipe
->stream_res
.tg
->funcs
->wait_for_state(pipe
->stream_res
.tg
, CRTC_STATE_VBLANK
);
407 hw_lock_cmd
.bits
.command_code
= DMUB_INBOX0_CMD__HW_LOCK
;
408 hw_lock_cmd
.bits
.hw_lock_client
= HW_LOCK_CLIENT_DRIVER
;
409 hw_lock_cmd
.bits
.lock
= lock
;
410 hw_lock_cmd
.bits
.should_release
= !lock
;
411 dmub_hw_lock_mgr_inbox0_cmd(dc
->ctx
->dmub_srv
, hw_lock_cmd
);
415 void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params
*params
)
417 struct dc
*dc
= params
->subvp_pipe_control_lock_fast_params
.dc
;
418 bool lock
= params
->subvp_pipe_control_lock_fast_params
.lock
;
419 struct pipe_ctx
*pipe_ctx
= params
->subvp_pipe_control_lock_fast_params
.pipe_ctx
;
420 bool subvp_immediate_flip
= false;
422 if (pipe_ctx
&& pipe_ctx
->stream
&& pipe_ctx
->plane_state
) {
423 if (pipe_ctx
->stream
->mall_stream_config
.type
== SUBVP_MAIN
&&
424 pipe_ctx
->plane_state
->flip_immediate
)
425 subvp_immediate_flip
= true;
428 // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
429 if (subvp_immediate_flip
) {
430 union dmub_inbox0_cmd_lock_hw hw_lock_cmd
= { 0 };
432 hw_lock_cmd
.bits
.command_code
= DMUB_INBOX0_CMD__HW_LOCK
;
433 hw_lock_cmd
.bits
.hw_lock_client
= HW_LOCK_CLIENT_DRIVER
;
434 hw_lock_cmd
.bits
.lock
= lock
;
435 hw_lock_cmd
.bits
.should_release
= !lock
;
436 dmub_hw_lock_mgr_inbox0_cmd(dc
->ctx
->dmub_srv
, hw_lock_cmd
);
440 bool dcn32_set_mpc_shaper_3dlut(
441 struct pipe_ctx
*pipe_ctx
, const struct dc_stream_state
*stream
)
443 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
444 int mpcc_id
= pipe_ctx
->plane_res
.hubp
->inst
;
445 struct mpc
*mpc
= pipe_ctx
->stream_res
.opp
->ctx
->dc
->res_pool
->mpc
;
448 const struct pwl_params
*shaper_lut
= NULL
;
449 //get the shaper lut params
450 if (stream
->func_shaper
) {
451 if (stream
->func_shaper
->type
== TF_TYPE_HWPWL
)
452 shaper_lut
= &stream
->func_shaper
->pwl
;
453 else if (stream
->func_shaper
->type
== TF_TYPE_DISTRIBUTED_POINTS
) {
454 cm_helper_translate_curve_to_hw_format(stream
->ctx
,
456 &dpp_base
->shaper_params
, true);
457 shaper_lut
= &dpp_base
->shaper_params
;
461 if (stream
->lut3d_func
&&
462 stream
->lut3d_func
->state
.bits
.initialized
== 1) {
464 result
= mpc
->funcs
->program_3dlut(mpc
,
465 &stream
->lut3d_func
->lut_3d
,
468 result
= mpc
->funcs
->program_shaper(mpc
,
476 bool dcn32_set_mcm_luts(
477 struct pipe_ctx
*pipe_ctx
, const struct dc_plane_state
*plane_state
)
479 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
480 int mpcc_id
= pipe_ctx
->plane_res
.hubp
->inst
;
481 struct mpc
*mpc
= pipe_ctx
->stream_res
.opp
->ctx
->dc
->res_pool
->mpc
;
483 struct pwl_params
*lut_params
= NULL
;
486 if (plane_state
->blend_tf
) {
487 if (plane_state
->blend_tf
->type
== TF_TYPE_HWPWL
)
488 lut_params
= &plane_state
->blend_tf
->pwl
;
489 else if (plane_state
->blend_tf
->type
== TF_TYPE_DISTRIBUTED_POINTS
) {
490 cm3_helper_translate_curve_to_hw_format(plane_state
->blend_tf
,
491 &dpp_base
->regamma_params
, false);
492 lut_params
= &dpp_base
->regamma_params
;
495 result
= mpc
->funcs
->program_1dlut(mpc
, lut_params
, mpcc_id
);
499 if (plane_state
->in_shaper_func
) {
500 if (plane_state
->in_shaper_func
->type
== TF_TYPE_HWPWL
)
501 lut_params
= &plane_state
->in_shaper_func
->pwl
;
502 else if (plane_state
->in_shaper_func
->type
== TF_TYPE_DISTRIBUTED_POINTS
) {
503 // TODO: dpp_base replace
505 cm3_helper_translate_curve_to_hw_format(plane_state
->in_shaper_func
,
506 &dpp_base
->shaper_params
, true);
507 lut_params
= &dpp_base
->shaper_params
;
511 result
= mpc
->funcs
->program_shaper(mpc
, lut_params
, mpcc_id
);
514 if (plane_state
->lut3d_func
&& plane_state
->lut3d_func
->state
.bits
.initialized
== 1)
515 result
= mpc
->funcs
->program_3dlut(mpc
, &plane_state
->lut3d_func
->lut_3d
, mpcc_id
);
517 result
= mpc
->funcs
->program_3dlut(mpc
, NULL
, mpcc_id
);
522 bool dcn32_set_input_transfer_func(struct dc
*dc
,
523 struct pipe_ctx
*pipe_ctx
,
524 const struct dc_plane_state
*plane_state
)
526 struct dce_hwseq
*hws
= dc
->hwseq
;
527 struct mpc
*mpc
= dc
->res_pool
->mpc
;
528 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
530 enum dc_transfer_func_predefined tf
;
532 struct pwl_params
*params
= NULL
;
534 if (mpc
== NULL
|| plane_state
== NULL
)
537 tf
= TRANSFER_FUNCTION_UNITY
;
539 if (plane_state
->in_transfer_func
&&
540 plane_state
->in_transfer_func
->type
== TF_TYPE_PREDEFINED
)
541 tf
= plane_state
->in_transfer_func
->tf
;
543 dpp_base
->funcs
->dpp_set_pre_degam(dpp_base
, tf
);
545 if (plane_state
->in_transfer_func
) {
546 if (plane_state
->in_transfer_func
->type
== TF_TYPE_HWPWL
)
547 params
= &plane_state
->in_transfer_func
->pwl
;
548 else if (plane_state
->in_transfer_func
->type
== TF_TYPE_DISTRIBUTED_POINTS
&&
549 cm3_helper_translate_curve_to_hw_format(plane_state
->in_transfer_func
,
550 &dpp_base
->degamma_params
, false))
551 params
= &dpp_base
->degamma_params
;
554 dpp_base
->funcs
->dpp_program_gamcor_lut(dpp_base
, params
);
556 if (pipe_ctx
->stream_res
.opp
&&
557 pipe_ctx
->stream_res
.opp
->ctx
&&
558 hws
->funcs
.set_mcm_luts
)
559 result
= hws
->funcs
.set_mcm_luts(pipe_ctx
, plane_state
);
564 bool dcn32_set_output_transfer_func(struct dc
*dc
,
565 struct pipe_ctx
*pipe_ctx
,
566 const struct dc_stream_state
*stream
)
568 int mpcc_id
= pipe_ctx
->plane_res
.hubp
->inst
;
569 struct mpc
*mpc
= pipe_ctx
->stream_res
.opp
->ctx
->dc
->res_pool
->mpc
;
570 struct pwl_params
*params
= NULL
;
573 /* program OGAM or 3DLUT only for the top pipe*/
574 if (resource_is_pipe_type(pipe_ctx
, OPP_HEAD
)) {
575 /*program shaper and 3dlut in MPC*/
576 ret
= dcn32_set_mpc_shaper_3dlut(pipe_ctx
, stream
);
577 if (ret
== false && mpc
->funcs
->set_output_gamma
&& stream
->out_transfer_func
) {
578 if (stream
->out_transfer_func
->type
== TF_TYPE_HWPWL
)
579 params
= &stream
->out_transfer_func
->pwl
;
580 else if (pipe_ctx
->stream
->out_transfer_func
->type
==
581 TF_TYPE_DISTRIBUTED_POINTS
&&
582 cm3_helper_translate_curve_to_hw_format(
583 stream
->out_transfer_func
,
584 &mpc
->blender_params
, false))
585 params
= &mpc
->blender_params
;
586 /* there are no ROM LUTs in OUTGAM */
587 if (stream
->out_transfer_func
->type
== TF_TYPE_PREDEFINED
)
592 mpc
->funcs
->set_output_gamma(mpc
, mpcc_id
, params
);
596 /* Program P-State force value according to if pipe is using SubVP / FPO or not:
597 * 1. Reset P-State force on all pipes first
598 * 2. For each main pipe, force P-State disallow (P-State allow moderated by DMUB)
600 void dcn32_update_force_pstate(struct dc
*dc
, struct dc_state
*context
)
604 /* Unforce p-state for each pipe if it is not FPO or SubVP.
605 * For FPO and SubVP, if it's already forced disallow, leave
608 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
609 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
610 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
612 if (!pipe
->stream
|| !(pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
||
613 pipe
->stream
->fpo_in_use
)) {
614 if (hubp
&& hubp
->funcs
->hubp_update_force_pstate_disallow
)
615 hubp
->funcs
->hubp_update_force_pstate_disallow(hubp
, false);
616 if (hubp
&& hubp
->funcs
->hubp_update_force_cursor_pstate_disallow
)
617 hubp
->funcs
->hubp_update_force_cursor_pstate_disallow(hubp
, false);
621 /* Loop through each pipe -- for each subvp main pipe force p-state allow equal to false.
623 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
624 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
625 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
627 if (pipe
->stream
&& (pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
||
628 pipe
->stream
->fpo_in_use
)) {
629 if (hubp
&& hubp
->funcs
->hubp_update_force_pstate_disallow
)
630 hubp
->funcs
->hubp_update_force_pstate_disallow(hubp
, true);
631 if (hubp
&& hubp
->funcs
->hubp_update_force_cursor_pstate_disallow
)
632 hubp
->funcs
->hubp_update_force_cursor_pstate_disallow(hubp
, true);
637 /* Update MALL_SEL register based on if pipe / plane
638 * is a phantom pipe, main pipe, and if using MALL
641 void dcn32_update_mall_sel(struct dc
*dc
, struct dc_state
*context
)
644 unsigned int num_ways
= dcn32_calculate_cab_allocation(dc
, context
);
645 bool cache_cursor
= false;
647 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
648 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
649 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
651 if (pipe
->stream
&& pipe
->plane_state
&& hubp
&& hubp
->funcs
->hubp_update_mall_sel
) {
652 int cursor_size
= hubp
->curs_attr
.pitch
* hubp
->curs_attr
.height
;
654 switch (hubp
->curs_attr
.color_format
) {
655 case CURSOR_MODE_MONO
:
658 case CURSOR_MODE_COLOR_1BIT_AND
:
659 case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
:
660 case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
:
664 case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED
:
665 case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED
:
671 if (cursor_size
> 16384)
674 if (pipe
->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
) {
675 hubp
->funcs
->hubp_update_mall_sel(hubp
, 1, false);
677 // MALL not supported with Stereo3D
678 hubp
->funcs
->hubp_update_mall_sel(hubp
,
679 num_ways
<= dc
->caps
.cache_num_ways
&&
680 pipe
->stream
->link
->psr_settings
.psr_version
== DC_PSR_VERSION_UNSUPPORTED
&&
681 pipe
->plane_state
->address
.type
!= PLN_ADDR_TYPE_GRPH_STEREO
&&
682 !pipe
->plane_state
->address
.tmz_surface
? 2 : 0,
689 /* Program the sub-viewport pipe configuration after the main / phantom pipes
690 * have been programmed in hardware.
691 * 1. Update force P-State for all the main pipes (disallow P-state)
692 * 2. Update MALL_SEL register
693 * 3. Program FORCE_ONE_ROW_FOR_FRAME for main subvp pipes
695 void dcn32_program_mall_pipe_config(struct dc
*dc
, struct dc_state
*context
)
698 struct dce_hwseq
*hws
= dc
->hwseq
;
700 // Don't force p-state disallow -- can't block dummy p-state
702 // Update MALL_SEL register for each pipe
703 if (hws
&& hws
->funcs
.update_mall_sel
)
704 hws
->funcs
.update_mall_sel(dc
, context
);
706 // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
707 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
708 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
709 struct hubp
*hubp
= pipe
->plane_res
.hubp
;
711 if (pipe
->stream
&& hubp
&& hubp
->funcs
->hubp_prepare_subvp_buffering
) {
712 /* TODO - remove setting CURSOR_REQ_MODE to 0 for legacy cases
713 * - need to investigate single pipe MPO + SubVP case to
714 * see if CURSOR_REQ_MODE will be back to 1 for SubVP
715 * when it should be 0 for MPO
717 if (pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
) {
718 hubp
->funcs
->hubp_prepare_subvp_buffering(hubp
, true);
724 static void dcn32_initialize_min_clocks(struct dc
*dc
)
726 struct dc_clocks
*clocks
= &dc
->current_state
->bw_ctx
.bw
.dcn
.clk
;
728 clocks
->dcfclk_deep_sleep_khz
= DCN3_2_DCFCLK_DS_INIT_KHZ
;
729 clocks
->dcfclk_khz
= dc
->clk_mgr
->bw_params
->clk_table
.entries
[0].dcfclk_mhz
* 1000;
730 clocks
->socclk_khz
= dc
->clk_mgr
->bw_params
->clk_table
.entries
[0].socclk_mhz
* 1000;
731 clocks
->dramclk_khz
= dc
->clk_mgr
->bw_params
->clk_table
.entries
[0].memclk_mhz
* 1000;
732 clocks
->dppclk_khz
= dc
->clk_mgr
->bw_params
->clk_table
.entries
[0].dppclk_mhz
* 1000;
733 clocks
->ref_dtbclk_khz
= dc
->clk_mgr
->bw_params
->clk_table
.entries
[0].dtbclk_mhz
* 1000;
734 clocks
->fclk_p_state_change_support
= true;
735 clocks
->p_state_change_support
= true;
736 if (dc
->debug
.disable_boot_optimizations
) {
737 clocks
->dispclk_khz
= dc
->clk_mgr
->bw_params
->clk_table
.entries
[0].dispclk_mhz
* 1000;
739 /* Even though DPG_EN = 1 for the connected display, it still requires the
740 * correct timing so we cannot set DISPCLK to min freq or it could cause
741 * audio corruption. Read current DISPCLK from DENTIST and request the same
742 * freq to ensure that the timing is valid and unchanged.
744 clocks
->dispclk_khz
= dc
->clk_mgr
->funcs
->get_dispclk_from_dentist(dc
->clk_mgr
);
747 dc
->clk_mgr
->funcs
->update_clocks(
753 void dcn32_init_hw(struct dc
*dc
)
755 struct abm
**abms
= dc
->res_pool
->multiple_abms
;
756 struct dce_hwseq
*hws
= dc
->hwseq
;
757 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
758 struct resource_pool
*res_pool
= dc
->res_pool
;
761 uint32_t backlight
= MAX_BACKLIGHT_LEVEL
;
763 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->init_clocks
)
764 dc
->clk_mgr
->funcs
->init_clocks(dc
->clk_mgr
);
766 // Initialize the dccg
767 if (res_pool
->dccg
->funcs
->dccg_init
)
768 res_pool
->dccg
->funcs
->dccg_init(res_pool
->dccg
);
770 if (!dcb
->funcs
->is_accelerated_mode(dcb
)) {
771 hws
->funcs
.bios_golden_init(dc
);
772 hws
->funcs
.disable_vga(dc
->hwseq
);
775 // Set default OPTC memory power states
776 if (dc
->debug
.enable_mem_low_power
.bits
.optc
) {
777 // Shutdown when unassigned and light sleep in VBLANK
778 REG_SET_2(ODM_MEM_PWR_CTRL3
, 0, ODM_MEM_UNASSIGNED_PWR_MODE
, 3, ODM_MEM_VBLANK_PWR_MODE
, 1);
781 if (dc
->debug
.enable_mem_low_power
.bits
.vga
) {
782 // Power down VGA memory
783 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL
, VGA_MEM_PWR_FORCE
, 1);
786 if (dc
->ctx
->dc_bios
->fw_info_valid
) {
787 res_pool
->ref_clocks
.xtalin_clock_inKhz
=
788 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
;
790 if (res_pool
->dccg
&& res_pool
->hubbub
) {
791 (res_pool
->dccg
->funcs
->get_dccg_ref_freq
)(res_pool
->dccg
,
792 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
,
793 &res_pool
->ref_clocks
.dccg_ref_clock_inKhz
);
795 (res_pool
->hubbub
->funcs
->get_dchub_ref_freq
)(res_pool
->hubbub
,
796 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
,
797 &res_pool
->ref_clocks
.dchub_ref_clock_inKhz
);
799 // Not all ASICs have DCCG sw component
800 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
=
801 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
802 res_pool
->ref_clocks
.dchub_ref_clock_inKhz
=
803 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
806 ASSERT_CRITICAL(false);
808 for (i
= 0; i
< dc
->link_count
; i
++) {
809 /* Power up AND update implementation according to the
810 * required signal (which may be different from the
811 * default signal on connector).
813 struct dc_link
*link
= dc
->links
[i
];
815 link
->link_enc
->funcs
->hw_init(link
->link_enc
);
817 /* Check for enabled DIG to identify enabled display */
818 if (link
->link_enc
->funcs
->is_dig_enabled
&&
819 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
)) {
820 link
->link_status
.link_active
= true;
821 link
->phy_state
.symclk_state
= SYMCLK_ON_TX_ON
;
822 if (link
->link_enc
->funcs
->fec_is_active
&&
823 link
->link_enc
->funcs
->fec_is_active(link
->link_enc
))
824 link
->fec_state
= dc_link_fec_enabled
;
828 /* enable_power_gating_plane before dsc_pg_control because
829 * FORCEON = 1 with hw default value on bootup, resume from s3
831 if (hws
->funcs
.enable_power_gating_plane
)
832 hws
->funcs
.enable_power_gating_plane(dc
->hwseq
, true);
834 /* we want to turn off all dp displays before doing detection */
835 dc
->link_srv
->blank_all_dp_displays(dc
);
837 /* If taking control over from VBIOS, we may want to optimize our first
838 * mode set, so we need to skip powering down pipes until we know which
839 * pipes we want to use.
840 * Otherwise, if taking control is not possible, we need to power
843 if (dcb
->funcs
->is_accelerated_mode(dcb
) || !dc
->config
.seamless_boot_edp_requested
) {
844 /* Disable boot optimizations means power down everything including PHY, DIG,
845 * and OTG (i.e. the boot is not optimized because we do a full power down).
847 if (dc
->hwss
.enable_accelerated_mode
&& dc
->debug
.disable_boot_optimizations
)
848 dc
->hwss
.enable_accelerated_mode(dc
, dc
->current_state
);
850 hws
->funcs
.init_pipes(dc
, dc
->current_state
);
852 if (dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control
)
853 dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control(dc
->res_pool
->hubbub
,
854 !dc
->res_pool
->hubbub
->ctx
->dc
->debug
.disable_stutter
);
856 dcn32_initialize_min_clocks(dc
);
858 /* On HW init, allow idle optimizations after pipes have been turned off.
860 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
861 * is reset (i.e. not in idle at the time hw init is called), but software state
862 * still has idle_optimizations = true, so we must disable idle optimizations first
863 * (i.e. set false), then re-enable (set true).
865 dc_allow_idle_optimizations(dc
, false);
866 dc_allow_idle_optimizations(dc
, true);
869 /* In headless boot cases, DIG may be turned
870 * on which causes HW/SW discrepancies.
871 * To avoid this, power down hardware on boot
872 * if DIG is turned on and seamless boot not enabled
874 if (!dc
->config
.seamless_boot_edp_requested
) {
875 struct dc_link
*edp_links
[MAX_NUM_EDP
];
876 struct dc_link
*edp_link
;
878 dc_get_edp_links(dc
, edp_links
, &edp_num
);
880 for (i
= 0; i
< edp_num
; i
++) {
881 edp_link
= edp_links
[i
];
882 if (edp_link
->link_enc
->funcs
->is_dig_enabled
&&
883 edp_link
->link_enc
->funcs
->is_dig_enabled(edp_link
->link_enc
) &&
884 dc
->hwss
.edp_backlight_control
&&
885 dc
->hwss
.power_down
&&
886 dc
->hwss
.edp_power_control
) {
887 dc
->hwss
.edp_backlight_control(edp_link
, false);
888 dc
->hwss
.power_down(dc
);
889 dc
->hwss
.edp_power_control(edp_link
, false);
893 for (i
= 0; i
< dc
->link_count
; i
++) {
894 struct dc_link
*link
= dc
->links
[i
];
896 if (link
->link_enc
->funcs
->is_dig_enabled
&&
897 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
) &&
898 dc
->hwss
.power_down
) {
899 dc
->hwss
.power_down(dc
);
907 for (i
= 0; i
< res_pool
->audio_count
; i
++) {
908 struct audio
*audio
= res_pool
->audios
[i
];
910 audio
->funcs
->hw_init(audio
);
913 for (i
= 0; i
< dc
->link_count
; i
++) {
914 struct dc_link
*link
= dc
->links
[i
];
916 if (link
->panel_cntl
)
917 backlight
= link
->panel_cntl
->funcs
->hw_init(link
->panel_cntl
);
920 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
921 if (abms
[i
] != NULL
&& abms
[i
]->funcs
!= NULL
)
922 abms
[i
]->funcs
->abm_init(abms
[i
], backlight
);
925 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
926 REG_WRITE(DIO_MEM_PWR_CTRL
, 0);
928 if (!dc
->debug
.disable_clock_gate
) {
929 /* enable all DCN clock gating */
930 REG_WRITE(DCCG_GATE_DISABLE_CNTL
, 0);
932 REG_WRITE(DCCG_GATE_DISABLE_CNTL2
, 0);
934 REG_UPDATE(DCFCLK_CNTL
, DCFCLK_GATE_DIS
, 0);
937 if (!dcb
->funcs
->is_accelerated_mode(dcb
) && dc
->res_pool
->hubbub
->funcs
->init_watermarks
)
938 dc
->res_pool
->hubbub
->funcs
->init_watermarks(dc
->res_pool
->hubbub
);
940 if (dc
->clk_mgr
->funcs
->notify_wm_ranges
)
941 dc
->clk_mgr
->funcs
->notify_wm_ranges(dc
->clk_mgr
);
943 if (dc
->clk_mgr
->funcs
->set_hard_max_memclk
&& !dc
->clk_mgr
->dc_mode_softmax_enabled
)
944 dc
->clk_mgr
->funcs
->set_hard_max_memclk(dc
->clk_mgr
);
946 if (dc
->res_pool
->hubbub
->funcs
->force_pstate_change_control
)
947 dc
->res_pool
->hubbub
->funcs
->force_pstate_change_control(
948 dc
->res_pool
->hubbub
, false, false);
950 if (dc
->res_pool
->hubbub
->funcs
->init_crb
)
951 dc
->res_pool
->hubbub
->funcs
->init_crb(dc
->res_pool
->hubbub
);
953 if (dc
->res_pool
->hubbub
->funcs
->set_request_limit
&& dc
->config
.sdpif_request_limit_words_per_umc
> 0)
954 dc
->res_pool
->hubbub
->funcs
->set_request_limit(dc
->res_pool
->hubbub
, dc
->ctx
->dc_bios
->vram_info
.num_chans
, dc
->config
.sdpif_request_limit_words_per_umc
);
956 // Get DMCUB capabilities
957 if (dc
->ctx
->dmub_srv
) {
958 dc_dmub_srv_query_caps_cmd(dc
->ctx
->dmub_srv
);
959 dc
->caps
.dmub_caps
.psr
= dc
->ctx
->dmub_srv
->dmub
->feature_caps
.psr
;
960 dc
->caps
.dmub_caps
.subvp_psr
= dc
->ctx
->dmub_srv
->dmub
->feature_caps
.subvp_psr_support
;
961 dc
->caps
.dmub_caps
.gecc_enable
= dc
->ctx
->dmub_srv
->dmub
->feature_caps
.gecc_enable
;
962 dc
->caps
.dmub_caps
.mclk_sw
= dc
->ctx
->dmub_srv
->dmub
->feature_caps
.fw_assisted_mclk_switch
;
966 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state
*stream
,
969 bool hblank_halved
= optc2_is_two_pixels_per_containter(&stream
->timing
);
973 hblank_halved
= true;
975 flow_ctrl_cnt
= stream
->timing
.h_total
- stream
->timing
.h_addressable
-
976 stream
->timing
.h_border_left
-
977 stream
->timing
.h_border_right
;
982 /* ODM combine 4:1 case */
986 return flow_ctrl_cnt
;
989 static void update_dsc_on_stream(struct pipe_ctx
*pipe_ctx
, bool enable
)
991 struct display_stream_compressor
*dsc
= pipe_ctx
->stream_res
.dsc
;
992 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
993 struct pipe_ctx
*odm_pipe
;
997 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
1001 struct dsc_config dsc_cfg
;
1002 struct dsc_optc_config dsc_optc_cfg
;
1003 enum optc_dsc_mode optc_dsc_mode
;
1005 /* Enable DSC hw block */
1006 dsc_cfg
.pic_width
= (stream
->timing
.h_addressable
+ stream
->timing
.h_border_left
+ stream
->timing
.h_border_right
) / opp_cnt
;
1007 dsc_cfg
.pic_height
= stream
->timing
.v_addressable
+ stream
->timing
.v_border_top
+ stream
->timing
.v_border_bottom
;
1008 dsc_cfg
.pixel_encoding
= stream
->timing
.pixel_encoding
;
1009 dsc_cfg
.color_depth
= stream
->timing
.display_color_depth
;
1010 dsc_cfg
.is_odm
= pipe_ctx
->next_odm_pipe
? true : false;
1011 dsc_cfg
.dc_dsc_cfg
= stream
->timing
.dsc_cfg
;
1012 ASSERT(dsc_cfg
.dc_dsc_cfg
.num_slices_h
% opp_cnt
== 0);
1013 dsc_cfg
.dc_dsc_cfg
.num_slices_h
/= opp_cnt
;
1015 dsc
->funcs
->dsc_set_config(dsc
, &dsc_cfg
, &dsc_optc_cfg
);
1016 dsc
->funcs
->dsc_enable(dsc
, pipe_ctx
->stream_res
.opp
->inst
);
1017 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1018 struct display_stream_compressor
*odm_dsc
= odm_pipe
->stream_res
.dsc
;
1021 odm_dsc
->funcs
->dsc_set_config(odm_dsc
, &dsc_cfg
, &dsc_optc_cfg
);
1022 odm_dsc
->funcs
->dsc_enable(odm_dsc
, odm_pipe
->stream_res
.opp
->inst
);
1024 dsc_cfg
.dc_dsc_cfg
.num_slices_h
*= opp_cnt
;
1025 dsc_cfg
.pic_width
*= opp_cnt
;
1027 optc_dsc_mode
= dsc_optc_cfg
.is_pixel_format_444
? OPTC_DSC_ENABLED_444
: OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED
;
1029 /* Enable DSC in OPTC */
1030 DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx
->stream_res
.tg
->inst
);
1031 pipe_ctx
->stream_res
.tg
->funcs
->set_dsc_config(pipe_ctx
->stream_res
.tg
,
1033 dsc_optc_cfg
.bytes_per_pixel
,
1034 dsc_optc_cfg
.slice_width
);
1036 /* disable DSC in OPTC */
1037 pipe_ctx
->stream_res
.tg
->funcs
->set_dsc_config(
1038 pipe_ctx
->stream_res
.tg
,
1039 OPTC_DSC_DISABLED
, 0, 0);
1041 /* disable DSC block */
1042 dsc
->funcs
->dsc_disable(pipe_ctx
->stream_res
.dsc
);
1043 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1044 ASSERT(odm_pipe
->stream_res
.dsc
);
1045 odm_pipe
->stream_res
.dsc
->funcs
->dsc_disable(odm_pipe
->stream_res
.dsc
);
1051 * Given any pipe_ctx, return the total ODM combine factor, and optionally return
1052 * the OPPids which are used
1054 static unsigned int get_odm_config(struct pipe_ctx
*pipe_ctx
, unsigned int *opp_instances
)
1056 unsigned int opp_count
= 1;
1057 struct pipe_ctx
*odm_pipe
;
1059 /* First get to the top pipe */
1060 for (odm_pipe
= pipe_ctx
; odm_pipe
->prev_odm_pipe
; odm_pipe
= odm_pipe
->prev_odm_pipe
)
1063 /* First pipe is always used */
1065 opp_instances
[0] = odm_pipe
->stream_res
.opp
->inst
;
1067 /* Find and count odm pipes, if any */
1068 for (odm_pipe
= odm_pipe
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1070 opp_instances
[opp_count
] = odm_pipe
->stream_res
.opp
->inst
;
1077 void dcn32_update_odm(struct dc
*dc
, struct dc_state
*context
, struct pipe_ctx
*pipe_ctx
)
1079 struct pipe_ctx
*odm_pipe
;
1081 int opp_inst
[MAX_PIPES
] = {0};
1082 bool rate_control_2x_pclk
= (pipe_ctx
->stream
->timing
.flags
.INTERLACE
|| optc2_is_two_pixels_per_containter(&pipe_ctx
->stream
->timing
));
1083 struct mpc_dwb_flow_control flow_control
;
1084 struct mpc
*mpc
= dc
->res_pool
->mpc
;
1087 opp_cnt
= get_odm_config(pipe_ctx
, opp_inst
);
1090 pipe_ctx
->stream_res
.tg
->funcs
->set_odm_combine(
1091 pipe_ctx
->stream_res
.tg
,
1093 &pipe_ctx
->stream
->timing
);
1095 pipe_ctx
->stream_res
.tg
->funcs
->set_odm_bypass(
1096 pipe_ctx
->stream_res
.tg
, &pipe_ctx
->stream
->timing
);
1098 rate_control_2x_pclk
= rate_control_2x_pclk
|| opp_cnt
> 1;
1099 flow_control
.flow_ctrl_mode
= 0;
1100 flow_control
.flow_ctrl_cnt0
= 0x80;
1101 flow_control
.flow_ctrl_cnt1
= calc_mpc_flow_ctrl_cnt(pipe_ctx
->stream
, opp_cnt
);
1102 if (mpc
->funcs
->set_out_rate_control
) {
1103 for (i
= 0; i
< opp_cnt
; ++i
) {
1104 mpc
->funcs
->set_out_rate_control(
1107 rate_control_2x_pclk
,
1112 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
) {
1113 odm_pipe
->stream_res
.opp
->funcs
->opp_pipe_clock_control(
1114 odm_pipe
->stream_res
.opp
,
1118 if (pipe_ctx
->stream_res
.dsc
) {
1119 struct pipe_ctx
*current_pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[pipe_ctx
->pipe_idx
];
1121 update_dsc_on_stream(pipe_ctx
, pipe_ctx
->stream
->timing
.flags
.DSC
);
1123 /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
1124 if (!pipe_ctx
->next_odm_pipe
&& current_pipe_ctx
->next_odm_pipe
&&
1125 current_pipe_ctx
->next_odm_pipe
->stream_res
.dsc
) {
1126 struct display_stream_compressor
*dsc
= current_pipe_ctx
->next_odm_pipe
->stream_res
.dsc
;
1127 /* disconnect DSC block from stream */
1128 dsc
->funcs
->dsc_disconnect(dsc
);
1133 unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx
*pipe_ctx
, unsigned int *k1_div
, unsigned int *k2_div
)
1135 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1136 unsigned int odm_combine_factor
= 0;
1137 bool two_pix_per_container
= false;
1139 two_pix_per_container
= optc2_is_two_pixels_per_containter(&stream
->timing
);
1140 odm_combine_factor
= get_odm_config(pipe_ctx
, NULL
);
1142 if (stream
->ctx
->dc
->link_srv
->dp_is_128b_132b_signal(pipe_ctx
)) {
1143 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1144 *k2_div
= PIXEL_RATE_DIV_BY_1
;
1145 } else if (dc_is_hdmi_tmds_signal(stream
->signal
) || dc_is_dvi_signal(stream
->signal
)) {
1146 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1147 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
1148 *k2_div
= PIXEL_RATE_DIV_BY_2
;
1150 *k2_div
= PIXEL_RATE_DIV_BY_4
;
1151 } else if (dc_is_dp_signal(stream
->signal
) || dc_is_virtual_signal(stream
->signal
)) {
1152 if (two_pix_per_container
) {
1153 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1154 *k2_div
= PIXEL_RATE_DIV_BY_2
;
1156 *k1_div
= PIXEL_RATE_DIV_BY_1
;
1157 *k2_div
= PIXEL_RATE_DIV_BY_4
;
1158 if ((odm_combine_factor
== 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx
))
1159 *k2_div
= PIXEL_RATE_DIV_BY_2
;
1163 if ((*k1_div
== PIXEL_RATE_DIV_NA
) && (*k2_div
== PIXEL_RATE_DIV_NA
))
1166 return odm_combine_factor
;
1169 void dcn32_set_pixels_per_cycle(struct pipe_ctx
*pipe_ctx
)
1171 uint32_t pix_per_cycle
= 1;
1172 uint32_t odm_combine_factor
= 1;
1174 if (!pipe_ctx
|| !pipe_ctx
->stream
|| !pipe_ctx
->stream_res
.stream_enc
)
1177 odm_combine_factor
= get_odm_config(pipe_ctx
, NULL
);
1178 if (optc2_is_two_pixels_per_containter(&pipe_ctx
->stream
->timing
) || odm_combine_factor
> 1
1179 || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx
))
1182 if (pipe_ctx
->stream_res
.stream_enc
->funcs
->set_input_mode
)
1183 pipe_ctx
->stream_res
.stream_enc
->funcs
->set_input_mode(pipe_ctx
->stream_res
.stream_enc
,
1187 void dcn32_resync_fifo_dccg_dio(struct dce_hwseq
*hws
, struct dc
*dc
, struct dc_state
*context
)
1190 struct pipe_ctx
*pipe
= NULL
;
1191 bool otg_disabled
[MAX_PIPES
] = {false};
1193 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1194 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1196 if (!resource_is_pipe_type(pipe
, OTG_MASTER
))
1199 if ((pipe
->stream
->dpms_off
|| dc_is_virtual_signal(pipe
->stream
->signal
))
1200 && pipe
->stream
->mall_stream_config
.type
!= SUBVP_PHANTOM
) {
1201 pipe
->stream_res
.tg
->funcs
->disable_crtc(pipe
->stream_res
.tg
);
1202 reset_sync_context_for_pipe(dc
, context
, i
);
1203 otg_disabled
[i
] = true;
1207 hws
->ctx
->dc
->res_pool
->dccg
->funcs
->trigger_dio_fifo_resync(hws
->ctx
->dc
->res_pool
->dccg
);
1209 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1210 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1212 if (otg_disabled
[i
])
1213 pipe
->stream_res
.tg
->funcs
->enable_crtc(pipe
->stream_res
.tg
);
1217 void dcn32_unblank_stream(struct pipe_ctx
*pipe_ctx
,
1218 struct dc_link_settings
*link_settings
)
1220 struct encoder_unblank_param params
= {0};
1221 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1222 struct dc_link
*link
= stream
->link
;
1223 struct dce_hwseq
*hws
= link
->dc
->hwseq
;
1224 struct pipe_ctx
*odm_pipe
;
1225 uint32_t pix_per_cycle
= 1;
1228 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
1231 /* only 3 items below are used by unblank */
1232 params
.timing
= pipe_ctx
->stream
->timing
;
1234 params
.link_settings
.link_rate
= link_settings
->link_rate
;
1236 if (link
->dc
->link_srv
->dp_is_128b_132b_signal(pipe_ctx
)) {
1237 /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
1238 pipe_ctx
->stream_res
.hpo_dp_stream_enc
->funcs
->dp_unblank(
1239 pipe_ctx
->stream_res
.hpo_dp_stream_enc
,
1240 pipe_ctx
->stream_res
.tg
->inst
);
1241 } else if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
1242 if (optc2_is_two_pixels_per_containter(&stream
->timing
) || params
.opp_cnt
> 1
1243 || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx
)) {
1244 params
.timing
.pix_clk_100hz
/= 2;
1247 pipe_ctx
->stream_res
.stream_enc
->funcs
->dp_set_odm_combine(
1248 pipe_ctx
->stream_res
.stream_enc
, pix_per_cycle
> 1);
1249 pipe_ctx
->stream_res
.stream_enc
->funcs
->dp_unblank(link
, pipe_ctx
->stream_res
.stream_enc
, ¶ms
);
1252 if (link
->local_sink
&& link
->local_sink
->sink_signal
== SIGNAL_TYPE_EDP
)
1253 hws
->funcs
.edp_backlight_control(link
, true);
1256 bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx
*pipe_ctx
)
1258 struct dc
*dc
= pipe_ctx
->stream
->ctx
->dc
;
1260 if (!is_h_timing_divisible_by_2(pipe_ctx
->stream
))
1263 if (dc_is_dp_signal(pipe_ctx
->stream
->signal
) && !dc
->link_srv
->dp_is_128b_132b_signal(pipe_ctx
) &&
1264 dc
->debug
.enable_dp_dig_pixel_rate_div_policy
)
1269 static void apply_symclk_on_tx_off_wa(struct dc_link
*link
)
1271 /* There are use cases where SYMCLK is referenced by OTG. For instance
1272 * for TMDS signal, OTG relies SYMCLK even if TX video output is off.
1273 * However current link interface will power off PHY when disabling link
1274 * output. This will turn off SYMCLK generated by PHY. The workaround is
1275 * to identify such case where SYMCLK is still in use by OTG when we
1276 * power off PHY. When this is detected, we will temporarily power PHY
1277 * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
1278 * program_pix_clk interface. When OTG is disabled, we will then power
1279 * off PHY by calling disable link output again.
1281 * In future dcn generations, we plan to rework transmitter control
1282 * interface so that we could have an option to set SYMCLK ON TX OFF
1283 * state in one step without this workaround
1286 struct dc
*dc
= link
->ctx
->dc
;
1287 struct pipe_ctx
*pipe_ctx
= NULL
;
1290 if (link
->phy_state
.symclk_ref_cnts
.otg
> 0) {
1291 for (i
= 0; i
< MAX_PIPES
; i
++) {
1292 pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1293 if (resource_is_pipe_type(pipe_ctx
, OPP_HEAD
) && pipe_ctx
->stream
->link
== link
) {
1294 pipe_ctx
->clock_source
->funcs
->program_pix_clk(
1295 pipe_ctx
->clock_source
,
1296 &pipe_ctx
->stream_res
.pix_clk_params
,
1297 dc
->link_srv
->dp_get_encoding_format(
1298 &pipe_ctx
->link_config
.dp_link_settings
),
1299 &pipe_ctx
->pll_settings
);
1300 link
->phy_state
.symclk_state
= SYMCLK_ON_TX_OFF
;
1307 void dcn32_disable_link_output(struct dc_link
*link
,
1308 const struct link_resource
*link_res
,
1309 enum signal_type signal
)
1311 struct dc
*dc
= link
->ctx
->dc
;
1312 const struct link_hwss
*link_hwss
= get_link_hwss(link
, link_res
);
1313 struct dmcu
*dmcu
= dc
->res_pool
->dmcu
;
1315 if (signal
== SIGNAL_TYPE_EDP
&&
1316 link
->dc
->hwss
.edp_backlight_control
&&
1317 !link
->skip_implict_edp_power_control
)
1318 link
->dc
->hwss
.edp_backlight_control(link
, false);
1319 else if (dmcu
!= NULL
&& dmcu
->funcs
->lock_phy
)
1320 dmcu
->funcs
->lock_phy(dmcu
);
1322 link_hwss
->disable_link_output(link
, link_res
, signal
);
1323 link
->phy_state
.symclk_state
= SYMCLK_OFF_TX_OFF
;
1325 if (signal
== SIGNAL_TYPE_EDP
&&
1326 link
->dc
->hwss
.edp_backlight_control
&&
1327 !link
->skip_implict_edp_power_control
)
1328 link
->dc
->hwss
.edp_power_control(link
, false);
1329 else if (dmcu
!= NULL
&& dmcu
->funcs
->lock_phy
)
1330 dmcu
->funcs
->unlock_phy(dmcu
);
1332 dc
->link_srv
->dp_trace_source_sequence(link
, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY
);
1334 apply_symclk_on_tx_off_wa(link
);
1337 /* For SubVP the main pipe can have a viewport position change
1338 * without a full update. In this case we must also update the
1339 * viewport positions for the phantom pipe accordingly.
1341 void dcn32_update_phantom_vp_position(struct dc
*dc
,
1342 struct dc_state
*context
,
1343 struct pipe_ctx
*phantom_pipe
)
1346 struct dc_plane_state
*phantom_plane
= phantom_pipe
->plane_state
;
1348 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1349 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
1351 if (pipe
->stream
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_MAIN
&&
1352 pipe
->stream
->mall_stream_config
.paired_stream
== phantom_pipe
->stream
) {
1353 if (pipe
->plane_state
&& pipe
->plane_state
->update_flags
.bits
.position_change
) {
1355 phantom_plane
->src_rect
.x
= pipe
->plane_state
->src_rect
.x
;
1356 phantom_plane
->src_rect
.y
= pipe
->plane_state
->src_rect
.y
;
1357 phantom_plane
->clip_rect
.x
= pipe
->plane_state
->clip_rect
.x
;
1358 phantom_plane
->dst_rect
.x
= pipe
->plane_state
->dst_rect
.x
;
1359 phantom_plane
->dst_rect
.y
= pipe
->plane_state
->dst_rect
.y
;
1361 phantom_pipe
->plane_state
->update_flags
.bits
.position_change
= 1;
1362 resource_build_scaling_params(phantom_pipe
);
1369 /* Treat the phantom pipe as if it needs to be fully enabled.
1370 * If the pipe was previously in use but not phantom, it would
1371 * have been disabled earlier in the sequence so we need to run
1372 * the full enable sequence.
1374 void dcn32_apply_update_flags_for_phantom(struct pipe_ctx
*phantom_pipe
)
1376 phantom_pipe
->update_flags
.raw
= 0;
1377 if (phantom_pipe
->stream
&& phantom_pipe
->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
) {
1378 if (resource_is_pipe_type(phantom_pipe
, DPP_PIPE
)) {
1379 phantom_pipe
->update_flags
.bits
.enable
= 1;
1380 phantom_pipe
->update_flags
.bits
.mpcc
= 1;
1381 phantom_pipe
->update_flags
.bits
.dppclk
= 1;
1382 phantom_pipe
->update_flags
.bits
.hubp_interdependent
= 1;
1383 phantom_pipe
->update_flags
.bits
.hubp_rq_dlg_ttu
= 1;
1384 phantom_pipe
->update_flags
.bits
.gamut_remap
= 1;
1385 phantom_pipe
->update_flags
.bits
.scaler
= 1;
1386 phantom_pipe
->update_flags
.bits
.viewport
= 1;
1387 phantom_pipe
->update_flags
.bits
.det_size
= 1;
1388 if (resource_is_pipe_type(phantom_pipe
, OTG_MASTER
)) {
1389 phantom_pipe
->update_flags
.bits
.odm
= 1;
1390 phantom_pipe
->update_flags
.bits
.global_sync
= 1;
1396 bool dcn32_dsc_pg_status(
1397 struct dce_hwseq
*hws
,
1398 unsigned int dsc_inst
)
1400 uint32_t pwr_status
= 0;
1404 REG_GET(DOMAIN16_PG_STATUS
,
1405 DOMAIN_PGFSM_PWR_STATUS
, &pwr_status
);
1409 REG_GET(DOMAIN17_PG_STATUS
,
1410 DOMAIN_PGFSM_PWR_STATUS
, &pwr_status
);
1413 REG_GET(DOMAIN18_PG_STATUS
,
1414 DOMAIN_PGFSM_PWR_STATUS
, &pwr_status
);
1417 REG_GET(DOMAIN19_PG_STATUS
,
1418 DOMAIN_PGFSM_PWR_STATUS
, &pwr_status
);
1421 BREAK_TO_DEBUGGER();
1425 return pwr_status
== 0;
1428 void dcn32_update_dsc_pg(struct dc
*dc
,
1429 struct dc_state
*context
,
1430 bool safe_to_disable
)
1432 struct dce_hwseq
*hws
= dc
->hwseq
;
1435 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_dsc
; i
++) {
1436 struct display_stream_compressor
*dsc
= dc
->res_pool
->dscs
[i
];
1437 bool is_dsc_ungated
= hws
->funcs
.dsc_pg_status(hws
, dsc
->inst
);
1439 if (context
->res_ctx
.is_dsc_acquired
[i
]) {
1440 if (!is_dsc_ungated
) {
1441 hws
->funcs
.dsc_pg_control(hws
, dsc
->inst
, true);
1443 } else if (safe_to_disable
) {
1444 if (is_dsc_ungated
) {
1445 hws
->funcs
.dsc_pg_control(hws
, dsc
->inst
, false);
1451 void dcn32_enable_phantom_streams(struct dc
*dc
, struct dc_state
*context
)
1455 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1456 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
1457 struct pipe_ctx
*old_pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1459 /* If an active, non-phantom pipe is being transitioned into a phantom
1460 * pipe, wait for the double buffer update to complete first before we do
1461 * ANY phantom pipe programming.
1463 if (pipe
->stream
&& pipe
->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
&&
1464 old_pipe
->stream
&& old_pipe
->stream
->mall_stream_config
.type
!= SUBVP_PHANTOM
) {
1465 old_pipe
->stream_res
.tg
->funcs
->wait_for_state(
1466 old_pipe
->stream_res
.tg
,
1468 old_pipe
->stream_res
.tg
->funcs
->wait_for_state(
1469 old_pipe
->stream_res
.tg
,
1470 CRTC_STATE_VACTIVE
);
1473 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1474 struct pipe_ctx
*new_pipe
= &context
->res_ctx
.pipe_ctx
[i
];
1476 if (new_pipe
->stream
&& new_pipe
->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
) {
1477 // If old context or new context has phantom pipes, apply
1478 // the phantom timings now. We can't change the phantom
1479 // pipe configuration safely without driver acquiring
1480 // the DMCUB lock first.
1481 dc
->hwss
.apply_ctx_to_hw(dc
, context
);
1487 /* Blank pixel data during initialization */
1488 void dcn32_init_blank(
1490 struct timing_generator
*tg
)
1492 struct dce_hwseq
*hws
= dc
->hwseq
;
1493 enum dc_color_space color_space
;
1494 struct tg_color black_color
= {0};
1495 struct output_pixel_processor
*opp
= NULL
;
1496 struct output_pixel_processor
*bottom_opp
= NULL
;
1497 uint32_t num_opps
, opp_id_src0
, opp_id_src1
;
1498 uint32_t otg_active_width
, otg_active_height
;
1501 /* program opp dpg blank color */
1502 color_space
= COLOR_SPACE_SRGB
;
1503 color_space_to_black_color(dc
, color_space
, &black_color
);
1505 /* get the OTG active size */
1506 tg
->funcs
->get_otg_active_size(tg
,
1508 &otg_active_height
);
1510 /* get the OPTC source */
1511 tg
->funcs
->get_optc_source(tg
, &num_opps
, &opp_id_src0
, &opp_id_src1
);
1513 if (opp_id_src0
>= dc
->res_pool
->res_cap
->num_opp
) {
1518 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_opp
; i
++) {
1519 if (dc
->res_pool
->opps
[i
] != NULL
&& dc
->res_pool
->opps
[i
]->inst
== opp_id_src0
) {
1520 opp
= dc
->res_pool
->opps
[i
];
1525 if (num_opps
== 2) {
1526 otg_active_width
= otg_active_width
/ 2;
1528 if (opp_id_src1
>= dc
->res_pool
->res_cap
->num_opp
) {
1532 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_opp
; i
++) {
1533 if (dc
->res_pool
->opps
[i
] != NULL
&& dc
->res_pool
->opps
[i
]->inst
== opp_id_src1
) {
1534 bottom_opp
= dc
->res_pool
->opps
[i
];
1540 if (opp
&& opp
->funcs
->opp_set_disp_pattern_generator
)
1541 opp
->funcs
->opp_set_disp_pattern_generator(
1543 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR
,
1544 CONTROLLER_DP_COLOR_SPACE_UDEFINED
,
1545 COLOR_DEPTH_UNDEFINED
,
1551 if (num_opps
== 2) {
1552 if (bottom_opp
&& bottom_opp
->funcs
->opp_set_disp_pattern_generator
) {
1553 bottom_opp
->funcs
->opp_set_disp_pattern_generator(
1555 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR
,
1556 CONTROLLER_DP_COLOR_SPACE_UDEFINED
,
1557 COLOR_DEPTH_UNDEFINED
,
1562 hws
->funcs
.wait_for_blank_complete(bottom_opp
);
1567 hws
->funcs
.wait_for_blank_complete(opp
);
1570 void dcn32_blank_phantom(struct dc
*dc
,
1571 struct timing_generator
*tg
,
1575 struct dce_hwseq
*hws
= dc
->hwseq
;
1576 enum dc_color_space color_space
;
1577 struct tg_color black_color
= {0};
1578 struct output_pixel_processor
*opp
= NULL
;
1579 uint32_t num_opps
, opp_id_src0
, opp_id_src1
;
1580 uint32_t otg_active_width
, otg_active_height
;
1583 /* program opp dpg blank color */
1584 color_space
= COLOR_SPACE_SRGB
;
1585 color_space_to_black_color(dc
, color_space
, &black_color
);
1587 otg_active_width
= width
;
1588 otg_active_height
= height
;
1590 /* get the OPTC source */
1591 tg
->funcs
->get_optc_source(tg
, &num_opps
, &opp_id_src0
, &opp_id_src1
);
1592 ASSERT(opp_id_src0
< dc
->res_pool
->res_cap
->num_opp
);
1594 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_opp
; i
++) {
1595 if (dc
->res_pool
->opps
[i
] != NULL
&& dc
->res_pool
->opps
[i
]->inst
== opp_id_src0
) {
1596 opp
= dc
->res_pool
->opps
[i
];
1601 if (opp
&& opp
->funcs
->opp_set_disp_pattern_generator
)
1602 opp
->funcs
->opp_set_disp_pattern_generator(
1604 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR
,
1605 CONTROLLER_DP_COLOR_SPACE_UDEFINED
,
1606 COLOR_DEPTH_UNDEFINED
,
1612 if (tg
->funcs
->is_tg_enabled(tg
))
1613 hws
->funcs
.wait_for_blank_complete(opp
);
1616 bool dcn32_is_pipe_topology_transition_seamless(struct dc
*dc
,
1617 const struct dc_state
*cur_ctx
,
1618 const struct dc_state
*new_ctx
)
1621 const struct pipe_ctx
*cur_pipe
, *new_pipe
;
1622 bool is_seamless
= true;
1624 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1625 cur_pipe
= &cur_ctx
->res_ctx
.pipe_ctx
[i
];
1626 new_pipe
= &new_ctx
->res_ctx
.pipe_ctx
[i
];
1628 if (resource_is_pipe_type(cur_pipe
, FREE_PIPE
) ||
1629 resource_is_pipe_type(new_pipe
, FREE_PIPE
))
1630 /* adding or removing free pipes is always seamless */
1632 else if (resource_is_pipe_type(cur_pipe
, OTG_MASTER
)) {
1633 if (resource_is_pipe_type(new_pipe
, OTG_MASTER
))
1634 if (cur_pipe
->stream
->stream_id
== new_pipe
->stream
->stream_id
)
1635 /* OTG master with the same stream is seamless */
1637 } else if (resource_is_pipe_type(cur_pipe
, OPP_HEAD
)) {
1638 if (resource_is_pipe_type(new_pipe
, OPP_HEAD
)) {
1639 if (cur_pipe
->stream_res
.tg
== new_pipe
->stream_res
.tg
)
1641 * OPP heads sharing the same timing
1642 * generator is seamless
1646 } else if (resource_is_pipe_type(cur_pipe
, DPP_PIPE
)) {
1647 if (resource_is_pipe_type(new_pipe
, DPP_PIPE
)) {
1648 if (cur_pipe
->stream_res
.opp
== new_pipe
->stream_res
.opp
)
1650 * DPP pipes sharing the same OPP head is
1658 * This pipe's transition doesn't fall under any seamless
1661 is_seamless
= false;
1668 void dcn32_prepare_bandwidth(struct dc
*dc
,
1669 struct dc_state
*context
)
1671 bool p_state_change_support
= context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
;
1672 /* Any transition into an FPO config should disable MCLK switching first to avoid
1673 * driver and FW P-State synchronization issues.
1675 if (context
->bw_ctx
.bw
.dcn
.clk
.fw_based_mclk_switching
|| dc
->clk_mgr
->clks
.fw_based_mclk_switching
) {
1676 dc
->optimized_required
= true;
1677 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
= false;
1680 if (dc
->clk_mgr
->dc_mode_softmax_enabled
)
1681 if (dc
->clk_mgr
->clks
.dramclk_khz
<= dc
->clk_mgr
->bw_params
->dc_mode_softmax_memclk
* 1000 &&
1682 context
->bw_ctx
.bw
.dcn
.clk
.dramclk_khz
> dc
->clk_mgr
->bw_params
->dc_mode_softmax_memclk
* 1000)
1683 dc
->clk_mgr
->funcs
->set_max_memclk(dc
->clk_mgr
, dc
->clk_mgr
->bw_params
->clk_table
.entries
[dc
->clk_mgr
->bw_params
->clk_table
.num_entries
- 1].memclk_mhz
);
1685 dcn20_prepare_bandwidth(dc
, context
);
1687 if (!context
->bw_ctx
.bw
.dcn
.clk
.fw_based_mclk_switching
)
1688 dc_dmub_srv_p_state_delegate(dc
, false, context
);
1690 if (context
->bw_ctx
.bw
.dcn
.clk
.fw_based_mclk_switching
|| dc
->clk_mgr
->clks
.fw_based_mclk_switching
) {
1691 /* After disabling P-State, restore the original value to ensure we get the correct P-State
1692 * on the next optimize.
1694 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
= p_state_change_support
;