2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dce/dmub_outbox.h"
60 #define DC_LOGGER_INIT(logger)
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
75 #define GAMMA_HW_POINTS_NUM 256
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
80 static void print_microsec(struct dc_context
*dc_ctx
,
81 struct dc_log_buffer_ctx
*log_ctx
,
84 const uint32_t ref_clk_mhz
= dc_ctx
->dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000;
85 static const unsigned int frac
= 1000;
86 uint32_t us_x10
= (ref_cycle
* frac
) / ref_clk_mhz
;
88 DTN_INFO(" %11d.%03d",
93 void dcn10_lock_all_pipes(struct dc
*dc
,
94 struct dc_state
*context
,
97 struct pipe_ctx
*pipe_ctx
;
98 struct pipe_ctx
*old_pipe_ctx
;
99 struct timing_generator
*tg
;
102 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
103 old_pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
104 pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
105 tg
= pipe_ctx
->stream_res
.tg
;
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
111 if (pipe_ctx
->top_pipe
||
113 (!pipe_ctx
->plane_state
&& !old_pipe_ctx
->plane_state
) ||
114 !tg
->funcs
->is_tg_enabled(tg
))
118 dc
->hwss
.pipe_control_lock(dc
, pipe_ctx
, true);
120 dc
->hwss
.pipe_control_lock(dc
, pipe_ctx
, false);
124 static void log_mpc_crc(struct dc
*dc
,
125 struct dc_log_buffer_ctx
*log_ctx
)
127 struct dc_context
*dc_ctx
= dc
->ctx
;
128 struct dce_hwseq
*hws
= dc
->hwseq
;
130 if (REG(MPC_CRC_RESULT_GB
))
131 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 REG_READ(MPC_CRC_RESULT_GB
), REG_READ(MPC_CRC_RESULT_C
), REG_READ(MPC_CRC_RESULT_AR
));
133 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A
))
134 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A
), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G
));
138 static void dcn10_log_hubbub_state(struct dc
*dc
,
139 struct dc_log_buffer_ctx
*log_ctx
)
141 struct dc_context
*dc_ctx
= dc
->ctx
;
142 struct dcn_hubbub_wm wm
;
145 memset(&wm
, 0, sizeof(struct dcn_hubbub_wm
));
146 dc
->res_pool
->hubbub
->funcs
->wm_read_state(dc
->res_pool
->hubbub
, &wm
);
148 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
149 " sr_enter sr_exit dram_clk_change\n");
151 for (i
= 0; i
< 4; i
++) {
152 struct dcn_hubbub_wm_set
*s
;
155 DTN_INFO("WM_Set[%d]:", s
->wm_set
);
156 DTN_INFO_MICRO_SEC(s
->data_urgent
);
157 DTN_INFO_MICRO_SEC(s
->pte_meta_urgent
);
158 DTN_INFO_MICRO_SEC(s
->sr_enter
);
159 DTN_INFO_MICRO_SEC(s
->sr_exit
);
160 DTN_INFO_MICRO_SEC(s
->dram_clk_change
);
167 static void dcn10_log_hubp_states(struct dc
*dc
, void *log_ctx
)
169 struct dc_context
*dc_ctx
= dc
->ctx
;
170 struct resource_pool
*pool
= dc
->res_pool
;
174 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
175 for (i
= 0; i
< pool
->pipe_count
; i
++) {
176 struct hubp
*hubp
= pool
->hubps
[i
];
177 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(hubp
)->state
);
179 hubp
->funcs
->hubp_read_state(hubp
);
182 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
195 s
->underflow_status
);
196 DTN_INFO_MICRO_SEC(s
->min_ttu_vblank
);
197 DTN_INFO_MICRO_SEC(s
->qos_level_low_wm
);
198 DTN_INFO_MICRO_SEC(s
->qos_level_high_wm
);
203 DTN_INFO("\n=========RQ========\n");
204 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
207 for (i
= 0; i
< pool
->pipe_count
; i
++) {
208 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(pool
->hubps
[i
])->state
);
209 struct _vcs_dpi_display_rq_regs_st
*rq_regs
= &s
->rq_regs
;
212 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
213 pool
->hubps
[i
]->inst
, rq_regs
->drq_expansion_mode
, rq_regs
->prq_expansion_mode
, rq_regs
->mrq_expansion_mode
,
214 rq_regs
->crq_expansion_mode
, rq_regs
->plane1_base_address
, rq_regs
->rq_regs_l
.chunk_size
,
215 rq_regs
->rq_regs_l
.min_chunk_size
, rq_regs
->rq_regs_l
.meta_chunk_size
,
216 rq_regs
->rq_regs_l
.min_meta_chunk_size
, rq_regs
->rq_regs_l
.dpte_group_size
,
217 rq_regs
->rq_regs_l
.mpte_group_size
, rq_regs
->rq_regs_l
.swath_height
,
218 rq_regs
->rq_regs_l
.pte_row_height_linear
, rq_regs
->rq_regs_c
.chunk_size
, rq_regs
->rq_regs_c
.min_chunk_size
,
219 rq_regs
->rq_regs_c
.meta_chunk_size
, rq_regs
->rq_regs_c
.min_meta_chunk_size
,
220 rq_regs
->rq_regs_c
.dpte_group_size
, rq_regs
->rq_regs_c
.mpte_group_size
,
221 rq_regs
->rq_regs_c
.swath_height
, rq_regs
->rq_regs_c
.pte_row_height_linear
);
224 DTN_INFO("========DLG========\n");
225 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
226 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
227 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
228 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
229 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
230 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
231 " x_rp_dlay x_rr_sfl\n");
232 for (i
= 0; i
< pool
->pipe_count
; i
++) {
233 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(pool
->hubps
[i
])->state
);
234 struct _vcs_dpi_display_dlg_regs_st
*dlg_regs
= &s
->dlg_attr
;
237 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
240 pool
->hubps
[i
]->inst
, dlg_regs
->refcyc_h_blank_end
, dlg_regs
->dlg_vblank_end
, dlg_regs
->min_dst_y_next_start
,
241 dlg_regs
->refcyc_per_htotal
, dlg_regs
->refcyc_x_after_scaler
, dlg_regs
->dst_y_after_scaler
,
242 dlg_regs
->dst_y_prefetch
, dlg_regs
->dst_y_per_vm_vblank
, dlg_regs
->dst_y_per_row_vblank
,
243 dlg_regs
->dst_y_per_vm_flip
, dlg_regs
->dst_y_per_row_flip
, dlg_regs
->ref_freq_to_pix_freq
,
244 dlg_regs
->vratio_prefetch
, dlg_regs
->vratio_prefetch_c
, dlg_regs
->refcyc_per_pte_group_vblank_l
,
245 dlg_regs
->refcyc_per_pte_group_vblank_c
, dlg_regs
->refcyc_per_meta_chunk_vblank_l
,
246 dlg_regs
->refcyc_per_meta_chunk_vblank_c
, dlg_regs
->refcyc_per_pte_group_flip_l
,
247 dlg_regs
->refcyc_per_pte_group_flip_c
, dlg_regs
->refcyc_per_meta_chunk_flip_l
,
248 dlg_regs
->refcyc_per_meta_chunk_flip_c
, dlg_regs
->dst_y_per_pte_row_nom_l
,
249 dlg_regs
->dst_y_per_pte_row_nom_c
, dlg_regs
->refcyc_per_pte_group_nom_l
,
250 dlg_regs
->refcyc_per_pte_group_nom_c
, dlg_regs
->dst_y_per_meta_row_nom_l
,
251 dlg_regs
->dst_y_per_meta_row_nom_c
, dlg_regs
->refcyc_per_meta_chunk_nom_l
,
252 dlg_regs
->refcyc_per_meta_chunk_nom_c
, dlg_regs
->refcyc_per_line_delivery_pre_l
,
253 dlg_regs
->refcyc_per_line_delivery_pre_c
, dlg_regs
->refcyc_per_line_delivery_l
,
254 dlg_regs
->refcyc_per_line_delivery_c
, dlg_regs
->chunk_hdl_adjust_cur0
, dlg_regs
->dst_y_offset_cur1
,
255 dlg_regs
->chunk_hdl_adjust_cur1
, dlg_regs
->vready_after_vcount0
, dlg_regs
->dst_y_delta_drq_limit
,
256 dlg_regs
->xfc_reg_transfer_delay
, dlg_regs
->xfc_reg_precharge_delay
,
257 dlg_regs
->xfc_reg_remote_surface_flip_latency
);
260 DTN_INFO("========TTU========\n");
261 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
262 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
263 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
264 for (i
= 0; i
< pool
->pipe_count
; i
++) {
265 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(pool
->hubps
[i
])->state
);
266 struct _vcs_dpi_display_ttu_regs_st
*ttu_regs
= &s
->ttu_attr
;
269 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
270 pool
->hubps
[i
]->inst
, ttu_regs
->qos_level_low_wm
, ttu_regs
->qos_level_high_wm
, ttu_regs
->min_ttu_vblank
,
271 ttu_regs
->qos_level_flip
, ttu_regs
->refcyc_per_req_delivery_pre_l
, ttu_regs
->refcyc_per_req_delivery_l
,
272 ttu_regs
->refcyc_per_req_delivery_pre_c
, ttu_regs
->refcyc_per_req_delivery_c
, ttu_regs
->refcyc_per_req_delivery_cur0
,
273 ttu_regs
->refcyc_per_req_delivery_pre_cur0
, ttu_regs
->refcyc_per_req_delivery_cur1
,
274 ttu_regs
->refcyc_per_req_delivery_pre_cur1
, ttu_regs
->qos_level_fixed_l
, ttu_regs
->qos_ramp_disable_l
,
275 ttu_regs
->qos_level_fixed_c
, ttu_regs
->qos_ramp_disable_c
, ttu_regs
->qos_level_fixed_cur0
,
276 ttu_regs
->qos_ramp_disable_cur0
, ttu_regs
->qos_level_fixed_cur1
, ttu_regs
->qos_ramp_disable_cur1
);
281 void dcn10_log_hw_state(struct dc
*dc
,
282 struct dc_log_buffer_ctx
*log_ctx
)
284 struct dc_context
*dc_ctx
= dc
->ctx
;
285 struct resource_pool
*pool
= dc
->res_pool
;
290 dcn10_log_hubbub_state(dc
, log_ctx
);
292 dcn10_log_hubp_states(dc
, log_ctx
);
294 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
295 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
296 "C31 C32 C33 C34\n");
297 for (i
= 0; i
< pool
->pipe_count
; i
++) {
298 struct dpp
*dpp
= pool
->dpps
[i
];
299 struct dcn_dpp_state s
= {0};
301 dpp
->funcs
->dpp_read_state(dpp
, &s
);
306 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
307 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
310 (s
.igam_lut_mode
== 0) ? "BypassFixed" :
311 ((s
.igam_lut_mode
== 1) ? "BypassFloat" :
312 ((s
.igam_lut_mode
== 2) ? "RAM" :
313 ((s
.igam_lut_mode
== 3) ? "RAM" :
315 (s
.dgam_lut_mode
== 0) ? "Bypass" :
316 ((s
.dgam_lut_mode
== 1) ? "sRGB" :
317 ((s
.dgam_lut_mode
== 2) ? "Ycc" :
318 ((s
.dgam_lut_mode
== 3) ? "RAM" :
319 ((s
.dgam_lut_mode
== 4) ? "RAM" :
321 (s
.rgam_lut_mode
== 0) ? "Bypass" :
322 ((s
.rgam_lut_mode
== 1) ? "sRGB" :
323 ((s
.rgam_lut_mode
== 2) ? "Ycc" :
324 ((s
.rgam_lut_mode
== 3) ? "RAM" :
325 ((s
.rgam_lut_mode
== 4) ? "RAM" :
328 s
.gamut_remap_c11_c12
,
329 s
.gamut_remap_c13_c14
,
330 s
.gamut_remap_c21_c22
,
331 s
.gamut_remap_c23_c24
,
332 s
.gamut_remap_c31_c32
,
333 s
.gamut_remap_c33_c34
);
338 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
339 for (i
= 0; i
< pool
->pipe_count
; i
++) {
340 struct mpcc_state s
= {0};
342 pool
->mpc
->funcs
->read_mpcc_state(pool
->mpc
, i
, &s
);
344 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
345 i
, s
.opp_id
, s
.dpp_id
, s
.bot_mpcc_id
,
346 s
.mode
, s
.alpha_mode
, s
.pre_multiplied_alpha
, s
.overlap_only
,
351 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
353 for (i
= 0; i
< pool
->timing_generator_count
; i
++) {
354 struct timing_generator
*tg
= pool
->timing_generators
[i
];
355 struct dcn_otg_state s
= {0};
356 /* Read shared OTG state registers for all DCNx */
357 optc1_read_otg_state(DCN10TG_FROM_TG(tg
), &s
);
360 * For DCN2 and greater, a register on the OPP is used to
361 * determine if the CRTC is blanked instead of the OTG. So use
362 * dpg_is_blanked() if exists, otherwise fallback on otg.
364 * TODO: Implement DCN-specific read_otg_state hooks.
366 if (pool
->opps
[i
]->funcs
->dpg_is_blanked
)
367 s
.blank_enabled
= pool
->opps
[i
]->funcs
->dpg_is_blanked(pool
->opps
[i
]);
369 s
.blank_enabled
= tg
->funcs
->is_blanked(tg
);
371 //only print if OTG master is enabled
372 if ((s
.otg_enabled
& 1) == 0)
375 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
393 s
.underflow_occurred_status
,
396 // Clear underflow for debug purposes
397 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 // This function is called only from Windows or Diags test environment, hence it's safe to clear
399 // it from here without affecting the original intent.
400 tg
->funcs
->clear_optc_underflow(tg
);
404 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 // TODO: Update golden log header to reflect this name change
406 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
407 for (i
= 0; i
< pool
->res_cap
->num_dsc
; i
++) {
408 struct display_stream_compressor
*dsc
= pool
->dscs
[i
];
409 struct dcn_dsc_state s
= {0};
411 dsc
->funcs
->dsc_read_state(dsc
, &s
);
412 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
416 s
.dsc_bits_per_pixel
);
421 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
422 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
423 for (i
= 0; i
< pool
->stream_enc_count
; i
++) {
424 struct stream_encoder
*enc
= pool
->stream_enc
[i
];
425 struct enc_state s
= {0};
427 if (enc
->funcs
->enc_read_state
) {
428 enc
->funcs
->enc_read_state(enc
, &s
);
429 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
432 s
.sec_gsp_pps_line_num
,
433 s
.vbid6_line_reference
,
435 s
.sec_gsp_pps_enable
,
436 s
.sec_stream_enable
);
442 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
443 for (i
= 0; i
< dc
->link_count
; i
++) {
444 struct link_encoder
*lenc
= dc
->links
[i
]->link_enc
;
446 struct link_enc_state s
= {0};
448 if (lenc
&& lenc
->funcs
->read_state
) {
449 lenc
->funcs
->read_state(lenc
, &s
);
450 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
453 s
.dphy_fec_ready_shadow
,
454 s
.dphy_fec_active_status
,
455 s
.dp_link_training_complete
);
461 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
462 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
463 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dcfclk_khz
,
464 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dcfclk_deep_sleep_khz
,
465 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
,
466 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
,
467 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.max_supported_dppclk_khz
,
468 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.fclk_khz
,
469 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.socclk_khz
);
471 log_mpc_crc(dc
, log_ctx
);
474 if (pool
->hpo_dp_stream_enc_count
> 0) {
475 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
476 for (i
= 0; i
< pool
->hpo_dp_stream_enc_count
; i
++) {
477 struct hpo_dp_stream_encoder_state hpo_dp_se_state
= {0};
478 struct hpo_dp_stream_encoder
*hpo_dp_stream_enc
= pool
->hpo_dp_stream_enc
[i
];
480 if (hpo_dp_stream_enc
&& hpo_dp_stream_enc
->funcs
->read_state
) {
481 hpo_dp_stream_enc
->funcs
->read_state(hpo_dp_stream_enc
, &hpo_dp_se_state
);
483 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
484 hpo_dp_stream_enc
->id
- ENGINE_ID_HPO_DP_0
,
485 hpo_dp_se_state
.stream_enc_enabled
,
486 hpo_dp_se_state
.otg_inst
,
487 (hpo_dp_se_state
.pixel_encoding
== 0) ? "4:4:4" :
488 ((hpo_dp_se_state
.pixel_encoding
== 1) ? "4:2:2" :
489 (hpo_dp_se_state
.pixel_encoding
== 2) ? "4:2:0" : "Y-Only"),
490 (hpo_dp_se_state
.component_depth
== 0) ? 6 :
491 ((hpo_dp_se_state
.component_depth
== 1) ? 8 :
492 (hpo_dp_se_state
.component_depth
== 2) ? 10 : 12),
493 hpo_dp_se_state
.vid_stream_enabled
,
494 hpo_dp_se_state
.sdp_enabled
,
495 hpo_dp_se_state
.compressed_format
,
496 hpo_dp_se_state
.mapped_to_link_enc
);
503 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 if (pool
->hpo_dp_link_enc_count
) {
505 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
507 for (i
= 0; i
< pool
->hpo_dp_link_enc_count
; i
++) {
508 struct hpo_dp_link_encoder
*hpo_dp_link_enc
= pool
->hpo_dp_link_enc
[i
];
509 struct hpo_dp_link_enc_state hpo_dp_le_state
= {0};
511 if (hpo_dp_link_enc
->funcs
->read_state
) {
512 hpo_dp_link_enc
->funcs
->read_state(hpo_dp_link_enc
, &hpo_dp_le_state
);
513 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
514 hpo_dp_link_enc
->inst
,
515 hpo_dp_le_state
.link_enc_enabled
,
516 (hpo_dp_le_state
.link_mode
== 0) ? "TPS1" :
517 (hpo_dp_le_state
.link_mode
== 1) ? "TPS2" :
518 (hpo_dp_le_state
.link_mode
== 2) ? "ACTIVE" : "TEST",
519 hpo_dp_le_state
.lane_count
,
520 hpo_dp_le_state
.stream_src
[0],
521 hpo_dp_le_state
.slot_count
[0],
522 hpo_dp_le_state
.vc_rate_x
[0],
523 hpo_dp_le_state
.vc_rate_y
[0]);
535 bool dcn10_did_underflow_occur(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
537 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
538 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
540 if (tg
->funcs
->is_optc_underflow_occurred(tg
)) {
541 tg
->funcs
->clear_optc_underflow(tg
);
545 if (hubp
->funcs
->hubp_get_underflow_status(hubp
)) {
546 hubp
->funcs
->hubp_clear_underflow(hubp
);
552 void dcn10_enable_power_gating_plane(
553 struct dce_hwseq
*hws
,
556 bool force_on
= true; /* disable power gating */
562 REG_UPDATE(DOMAIN0_PG_CONFIG
, DOMAIN0_POWER_FORCEON
, force_on
);
563 REG_UPDATE(DOMAIN2_PG_CONFIG
, DOMAIN2_POWER_FORCEON
, force_on
);
564 REG_UPDATE(DOMAIN4_PG_CONFIG
, DOMAIN4_POWER_FORCEON
, force_on
);
565 REG_UPDATE(DOMAIN6_PG_CONFIG
, DOMAIN6_POWER_FORCEON
, force_on
);
568 REG_UPDATE(DOMAIN1_PG_CONFIG
, DOMAIN1_POWER_FORCEON
, force_on
);
569 REG_UPDATE(DOMAIN3_PG_CONFIG
, DOMAIN3_POWER_FORCEON
, force_on
);
570 REG_UPDATE(DOMAIN5_PG_CONFIG
, DOMAIN5_POWER_FORCEON
, force_on
);
571 REG_UPDATE(DOMAIN7_PG_CONFIG
, DOMAIN7_POWER_FORCEON
, force_on
);
574 void dcn10_disable_vga(
575 struct dce_hwseq
*hws
)
577 unsigned int in_vga1_mode
= 0;
578 unsigned int in_vga2_mode
= 0;
579 unsigned int in_vga3_mode
= 0;
580 unsigned int in_vga4_mode
= 0;
582 REG_GET(D1VGA_CONTROL
, D1VGA_MODE_ENABLE
, &in_vga1_mode
);
583 REG_GET(D2VGA_CONTROL
, D2VGA_MODE_ENABLE
, &in_vga2_mode
);
584 REG_GET(D3VGA_CONTROL
, D3VGA_MODE_ENABLE
, &in_vga3_mode
);
585 REG_GET(D4VGA_CONTROL
, D4VGA_MODE_ENABLE
, &in_vga4_mode
);
587 if (in_vga1_mode
== 0 && in_vga2_mode
== 0 &&
588 in_vga3_mode
== 0 && in_vga4_mode
== 0)
591 REG_WRITE(D1VGA_CONTROL
, 0);
592 REG_WRITE(D2VGA_CONTROL
, 0);
593 REG_WRITE(D3VGA_CONTROL
, 0);
594 REG_WRITE(D4VGA_CONTROL
, 0);
596 /* HW Engineer's Notes:
597 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
600 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 * VGA_TEST_ENABLE, to leave it in the same state as before.
603 REG_UPDATE(VGA_TEST_CONTROL
, VGA_TEST_ENABLE
, 1);
604 REG_UPDATE(VGA_TEST_CONTROL
, VGA_TEST_RENDER_START
, 1);
608 * dcn10_dpp_pg_control - DPP power gate control.
610 * @hws: dce_hwseq reference.
611 * @dpp_inst: DPP instance reference.
612 * @power_on: true if we want to enable power gate, false otherwise.
614 * Enable or disable power gate in the specific DPP instance.
616 void dcn10_dpp_pg_control(
617 struct dce_hwseq
*hws
,
618 unsigned int dpp_inst
,
621 uint32_t power_gate
= power_on
? 0 : 1;
622 uint32_t pwr_status
= power_on
? PGFSM_POWER_ON
: PGFSM_POWER_OFF
;
624 if (hws
->ctx
->dc
->debug
.disable_dpp_power_gate
)
626 if (REG(DOMAIN1_PG_CONFIG
) == 0)
631 REG_UPDATE(DOMAIN1_PG_CONFIG
,
632 DOMAIN1_POWER_GATE
, power_gate
);
634 REG_WAIT(DOMAIN1_PG_STATUS
,
635 DOMAIN1_PGFSM_PWR_STATUS
, pwr_status
,
639 REG_UPDATE(DOMAIN3_PG_CONFIG
,
640 DOMAIN3_POWER_GATE
, power_gate
);
642 REG_WAIT(DOMAIN3_PG_STATUS
,
643 DOMAIN3_PGFSM_PWR_STATUS
, pwr_status
,
647 REG_UPDATE(DOMAIN5_PG_CONFIG
,
648 DOMAIN5_POWER_GATE
, power_gate
);
650 REG_WAIT(DOMAIN5_PG_STATUS
,
651 DOMAIN5_PGFSM_PWR_STATUS
, pwr_status
,
655 REG_UPDATE(DOMAIN7_PG_CONFIG
,
656 DOMAIN7_POWER_GATE
, power_gate
);
658 REG_WAIT(DOMAIN7_PG_STATUS
,
659 DOMAIN7_PGFSM_PWR_STATUS
, pwr_status
,
669 * dcn10_hubp_pg_control - HUBP power gate control.
671 * @hws: dce_hwseq reference.
672 * @hubp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
675 * Enable or disable power gate in the specific HUBP instance.
677 void dcn10_hubp_pg_control(
678 struct dce_hwseq
*hws
,
679 unsigned int hubp_inst
,
682 uint32_t power_gate
= power_on
? 0 : 1;
683 uint32_t pwr_status
= power_on
? PGFSM_POWER_ON
: PGFSM_POWER_OFF
;
685 if (hws
->ctx
->dc
->debug
.disable_hubp_power_gate
)
687 if (REG(DOMAIN0_PG_CONFIG
) == 0)
691 case 0: /* DCHUBP0 */
692 REG_UPDATE(DOMAIN0_PG_CONFIG
,
693 DOMAIN0_POWER_GATE
, power_gate
);
695 REG_WAIT(DOMAIN0_PG_STATUS
,
696 DOMAIN0_PGFSM_PWR_STATUS
, pwr_status
,
699 case 1: /* DCHUBP1 */
700 REG_UPDATE(DOMAIN2_PG_CONFIG
,
701 DOMAIN2_POWER_GATE
, power_gate
);
703 REG_WAIT(DOMAIN2_PG_STATUS
,
704 DOMAIN2_PGFSM_PWR_STATUS
, pwr_status
,
707 case 2: /* DCHUBP2 */
708 REG_UPDATE(DOMAIN4_PG_CONFIG
,
709 DOMAIN4_POWER_GATE
, power_gate
);
711 REG_WAIT(DOMAIN4_PG_STATUS
,
712 DOMAIN4_PGFSM_PWR_STATUS
, pwr_status
,
715 case 3: /* DCHUBP3 */
716 REG_UPDATE(DOMAIN6_PG_CONFIG
,
717 DOMAIN6_POWER_GATE
, power_gate
);
719 REG_WAIT(DOMAIN6_PG_STATUS
,
720 DOMAIN6_PGFSM_PWR_STATUS
, pwr_status
,
729 static void power_on_plane_resources(
730 struct dce_hwseq
*hws
,
733 DC_LOGGER_INIT(hws
->ctx
->logger
);
735 if (hws
->funcs
.dpp_root_clock_control
)
736 hws
->funcs
.dpp_root_clock_control(hws
, plane_id
, true);
738 if (REG(DC_IP_REQUEST_CNTL
)) {
739 REG_SET(DC_IP_REQUEST_CNTL
, 0,
742 if (hws
->funcs
.dpp_pg_control
)
743 hws
->funcs
.dpp_pg_control(hws
, plane_id
, true);
745 if (hws
->funcs
.hubp_pg_control
)
746 hws
->funcs
.hubp_pg_control(hws
, plane_id
, true);
748 REG_SET(DC_IP_REQUEST_CNTL
, 0,
751 "Un-gated front end for pipe %d\n", plane_id
);
755 static void undo_DEGVIDCN10_253_wa(struct dc
*dc
)
757 struct dce_hwseq
*hws
= dc
->hwseq
;
758 struct hubp
*hubp
= dc
->res_pool
->hubps
[0];
760 if (!hws
->wa_state
.DEGVIDCN10_253_applied
)
763 hubp
->funcs
->set_blank(hubp
, true);
765 REG_SET(DC_IP_REQUEST_CNTL
, 0,
768 hws
->funcs
.hubp_pg_control(hws
, 0, false);
769 REG_SET(DC_IP_REQUEST_CNTL
, 0,
772 hws
->wa_state
.DEGVIDCN10_253_applied
= false;
775 static void apply_DEGVIDCN10_253_wa(struct dc
*dc
)
777 struct dce_hwseq
*hws
= dc
->hwseq
;
778 struct hubp
*hubp
= dc
->res_pool
->hubps
[0];
781 if (dc
->debug
.disable_stutter
)
784 if (!hws
->wa
.DEGVIDCN10_253
)
787 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
788 if (!dc
->res_pool
->hubps
[i
]->power_gated
)
792 /* all pipe power gated, apply work around to enable stutter. */
794 REG_SET(DC_IP_REQUEST_CNTL
, 0,
797 hws
->funcs
.hubp_pg_control(hws
, 0, true);
798 REG_SET(DC_IP_REQUEST_CNTL
, 0,
801 hubp
->funcs
->set_hubp_blank_en(hubp
, false);
802 hws
->wa_state
.DEGVIDCN10_253_applied
= true;
805 void dcn10_bios_golden_init(struct dc
*dc
)
807 struct dce_hwseq
*hws
= dc
->hwseq
;
808 struct dc_bios
*bp
= dc
->ctx
->dc_bios
;
810 bool allow_self_fresh_force_enable
= true;
812 if (hws
->funcs
.s0i3_golden_init_wa
&& hws
->funcs
.s0i3_golden_init_wa(dc
))
815 if (dc
->res_pool
->hubbub
->funcs
->is_allow_self_refresh_enabled
)
816 allow_self_fresh_force_enable
=
817 dc
->res_pool
->hubbub
->funcs
->is_allow_self_refresh_enabled(dc
->res_pool
->hubbub
);
820 /* WA for making DF sleep when idle after resume from S0i3.
821 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
822 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
823 * before calling command table and it changed to 1 after,
824 * it should be set back to 0.
827 /* initialize dcn global */
828 bp
->funcs
->enable_disp_power_gating(bp
,
829 CONTROLLER_ID_D0
, ASIC_PIPE_INIT
);
831 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
832 /* initialize dcn per pipe */
833 bp
->funcs
->enable_disp_power_gating(bp
,
834 CONTROLLER_ID_D0
+ i
, ASIC_PIPE_DISABLE
);
837 if (dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control
)
838 if (allow_self_fresh_force_enable
== false &&
839 dc
->res_pool
->hubbub
->funcs
->is_allow_self_refresh_enabled(dc
->res_pool
->hubbub
))
840 dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control(dc
->res_pool
->hubbub
,
841 !dc
->res_pool
->hubbub
->ctx
->dc
->debug
.disable_stutter
);
845 static void false_optc_underflow_wa(
847 const struct dc_stream_state
*stream
,
848 struct timing_generator
*tg
)
853 if (!dc
->hwseq
->wa
.false_optc_underflow
)
856 underflow
= tg
->funcs
->is_optc_underflow_occurred(tg
);
858 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
859 struct pipe_ctx
*old_pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
861 if (old_pipe_ctx
->stream
!= stream
)
864 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, old_pipe_ctx
);
867 if (tg
->funcs
->set_blank_data_double_buffer
)
868 tg
->funcs
->set_blank_data_double_buffer(tg
, true);
870 if (tg
->funcs
->is_optc_underflow_occurred(tg
) && !underflow
)
871 tg
->funcs
->clear_optc_underflow(tg
);
874 static int calculate_vready_offset_for_group(struct pipe_ctx
*pipe
)
876 struct pipe_ctx
*other_pipe
;
877 int vready_offset
= pipe
->pipe_dlg_param
.vready_offset
;
879 /* Always use the largest vready_offset of all connected pipes */
880 for (other_pipe
= pipe
->bottom_pipe
; other_pipe
!= NULL
; other_pipe
= other_pipe
->bottom_pipe
) {
881 if (other_pipe
->pipe_dlg_param
.vready_offset
> vready_offset
)
882 vready_offset
= other_pipe
->pipe_dlg_param
.vready_offset
;
884 for (other_pipe
= pipe
->top_pipe
; other_pipe
!= NULL
; other_pipe
= other_pipe
->top_pipe
) {
885 if (other_pipe
->pipe_dlg_param
.vready_offset
> vready_offset
)
886 vready_offset
= other_pipe
->pipe_dlg_param
.vready_offset
;
888 for (other_pipe
= pipe
->next_odm_pipe
; other_pipe
!= NULL
; other_pipe
= other_pipe
->next_odm_pipe
) {
889 if (other_pipe
->pipe_dlg_param
.vready_offset
> vready_offset
)
890 vready_offset
= other_pipe
->pipe_dlg_param
.vready_offset
;
892 for (other_pipe
= pipe
->prev_odm_pipe
; other_pipe
!= NULL
; other_pipe
= other_pipe
->prev_odm_pipe
) {
893 if (other_pipe
->pipe_dlg_param
.vready_offset
> vready_offset
)
894 vready_offset
= other_pipe
->pipe_dlg_param
.vready_offset
;
897 return vready_offset
;
900 enum dc_status
dcn10_enable_stream_timing(
901 struct pipe_ctx
*pipe_ctx
,
902 struct dc_state
*context
,
905 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
906 enum dc_color_space color_space
;
907 struct tg_color black_color
= {0};
909 /* by upper caller loop, pipe0 is parent pipe and be called first.
910 * back end is set up by for pipe0. Other children pipe share back end
911 * with pipe 0. No program is needed.
913 if (pipe_ctx
->top_pipe
!= NULL
)
916 /* TODO check if timing_changed, disable stream if timing changed */
918 /* HW program guide assume display already disable
919 * by unplug sequence. OTG assume stop.
921 pipe_ctx
->stream_res
.tg
->funcs
->enable_optc_clock(pipe_ctx
->stream_res
.tg
, true);
923 if (false == pipe_ctx
->clock_source
->funcs
->program_pix_clk(
924 pipe_ctx
->clock_source
,
925 &pipe_ctx
->stream_res
.pix_clk_params
,
926 dc
->link_srv
->dp_get_encoding_format(&pipe_ctx
->link_config
.dp_link_settings
),
927 &pipe_ctx
->pll_settings
)) {
929 return DC_ERROR_UNEXPECTED
;
932 if (dc_is_hdmi_tmds_signal(stream
->signal
)) {
933 stream
->link
->phy_state
.symclk_ref_cnts
.otg
= 1;
934 if (stream
->link
->phy_state
.symclk_state
== SYMCLK_OFF_TX_OFF
)
935 stream
->link
->phy_state
.symclk_state
= SYMCLK_ON_TX_OFF
;
937 stream
->link
->phy_state
.symclk_state
= SYMCLK_ON_TX_ON
;
940 pipe_ctx
->stream_res
.tg
->funcs
->program_timing(
941 pipe_ctx
->stream_res
.tg
,
943 calculate_vready_offset_for_group(pipe_ctx
),
944 pipe_ctx
->pipe_dlg_param
.vstartup_start
,
945 pipe_ctx
->pipe_dlg_param
.vupdate_offset
,
946 pipe_ctx
->pipe_dlg_param
.vupdate_width
,
947 pipe_ctx
->stream
->signal
,
950 #if 0 /* move to after enable_crtc */
951 /* TODO: OPP FMT, ABM. etc. should be done here. */
952 /* or FPGA now. instance 0 only. TODO: move to opp.c */
954 inst_offset
= reg_offsets
[pipe_ctx
->stream_res
.tg
->inst
].fmt
;
956 pipe_ctx
->stream_res
.opp
->funcs
->opp_program_fmt(
957 pipe_ctx
->stream_res
.opp
,
958 &stream
->bit_depth_params
,
961 /* program otg blank color */
962 color_space
= stream
->output_color_space
;
963 color_space_to_black_color(dc
, color_space
, &black_color
);
966 * The way 420 is packed, 2 channels carry Y component, 1 channel
967 * alternate between Cb and Cr, so both channels need the pixel
970 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
971 black_color
.color_r_cr
= black_color
.color_g_y
;
973 if (pipe_ctx
->stream_res
.tg
->funcs
->set_blank_color
)
974 pipe_ctx
->stream_res
.tg
->funcs
->set_blank_color(
975 pipe_ctx
->stream_res
.tg
,
978 if (pipe_ctx
->stream_res
.tg
->funcs
->is_blanked
&&
979 !pipe_ctx
->stream_res
.tg
->funcs
->is_blanked(pipe_ctx
->stream_res
.tg
)) {
980 pipe_ctx
->stream_res
.tg
->funcs
->set_blank(pipe_ctx
->stream_res
.tg
, true);
981 hwss_wait_for_blank_complete(pipe_ctx
->stream_res
.tg
);
982 false_optc_underflow_wa(dc
, pipe_ctx
->stream
, pipe_ctx
->stream_res
.tg
);
985 /* VTG is within DCHUB command block. DCFCLK is always on */
986 if (false == pipe_ctx
->stream_res
.tg
->funcs
->enable_crtc(pipe_ctx
->stream_res
.tg
)) {
988 return DC_ERROR_UNEXPECTED
;
991 /* TODO program crtc source select for non-virtual signal*/
992 /* TODO program FMT */
993 /* TODO setup link_enc */
994 /* TODO set stream attributes */
995 /* TODO program audio */
996 /* TODO enable stream if timing changed */
997 /* TODO unblank stream if DP */
1002 static void dcn10_reset_back_end_for_pipe(
1004 struct pipe_ctx
*pipe_ctx
,
1005 struct dc_state
*context
)
1008 struct dc_link
*link
;
1009 DC_LOGGER_INIT(dc
->ctx
->logger
);
1010 if (pipe_ctx
->stream_res
.stream_enc
== NULL
) {
1011 pipe_ctx
->stream
= NULL
;
1015 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
1016 link
= pipe_ctx
->stream
->link
;
1017 /* DPMS may already disable or */
1018 /* dpms_off status is incorrect due to fastboot
1019 * feature. When system resume from S4 with second
1020 * screen only, the dpms_off would be true but
1021 * VBIOS lit up eDP, so check link status too.
1023 if (!pipe_ctx
->stream
->dpms_off
|| link
->link_status
.link_active
)
1024 dc
->link_srv
->set_dpms_off(pipe_ctx
);
1025 else if (pipe_ctx
->stream_res
.audio
)
1026 dc
->hwss
.disable_audio_stream(pipe_ctx
);
1028 if (pipe_ctx
->stream_res
.audio
) {
1029 /*disable az_endpoint*/
1030 pipe_ctx
->stream_res
.audio
->funcs
->az_disable(pipe_ctx
->stream_res
.audio
);
1033 if (dc
->caps
.dynamic_audio
== true) {
1034 /*we have to dynamic arbitrate the audio endpoints*/
1035 /*we free the resource, need reset is_audio_acquired*/
1036 update_audio_usage(&dc
->current_state
->res_ctx
, dc
->res_pool
,
1037 pipe_ctx
->stream_res
.audio
, false);
1038 pipe_ctx
->stream_res
.audio
= NULL
;
1043 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1044 * back end share by all pipes and will be disable only when disable
1047 if (pipe_ctx
->top_pipe
== NULL
) {
1049 if (pipe_ctx
->stream_res
.abm
)
1050 dc
->hwss
.set_abm_immediate_disable(pipe_ctx
);
1052 pipe_ctx
->stream_res
.tg
->funcs
->disable_crtc(pipe_ctx
->stream_res
.tg
);
1054 pipe_ctx
->stream_res
.tg
->funcs
->enable_optc_clock(pipe_ctx
->stream_res
.tg
, false);
1055 if (pipe_ctx
->stream_res
.tg
->funcs
->set_drr
)
1056 pipe_ctx
->stream_res
.tg
->funcs
->set_drr(
1057 pipe_ctx
->stream_res
.tg
, NULL
);
1058 pipe_ctx
->stream
->link
->phy_state
.symclk_ref_cnts
.otg
= 0;
1061 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
1062 if (&dc
->current_state
->res_ctx
.pipe_ctx
[i
] == pipe_ctx
)
1065 if (i
== dc
->res_pool
->pipe_count
)
1068 pipe_ctx
->stream
= NULL
;
1069 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1070 pipe_ctx
->pipe_idx
, pipe_ctx
->stream_res
.tg
->inst
);
1073 static bool dcn10_hw_wa_force_recovery(struct dc
*dc
)
1077 bool need_recover
= true;
1079 if (!dc
->debug
.recovery_enabled
)
1082 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1083 struct pipe_ctx
*pipe_ctx
=
1084 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1085 if (pipe_ctx
!= NULL
) {
1086 hubp
= pipe_ctx
->plane_res
.hubp
;
1087 if (hubp
!= NULL
&& hubp
->funcs
->hubp_get_underflow_status
) {
1088 if (hubp
->funcs
->hubp_get_underflow_status(hubp
) != 0) {
1089 /* one pipe underflow, we will reset all the pipes*/
1090 need_recover
= true;
1098 DCHUBP_CNTL:HUBP_BLANK_EN=1
1099 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1100 DCHUBP_CNTL:HUBP_DISABLE=1
1101 DCHUBP_CNTL:HUBP_DISABLE=0
1102 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1103 DCSURF_PRIMARY_SURFACE_ADDRESS
1104 DCHUBP_CNTL:HUBP_BLANK_EN=0
1107 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1108 struct pipe_ctx
*pipe_ctx
=
1109 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1110 if (pipe_ctx
!= NULL
) {
1111 hubp
= pipe_ctx
->plane_res
.hubp
;
1112 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1113 if (hubp
!= NULL
&& hubp
->funcs
->set_hubp_blank_en
)
1114 hubp
->funcs
->set_hubp_blank_en(hubp
, true);
1117 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1118 hubbub1_soft_reset(dc
->res_pool
->hubbub
, true);
1120 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1121 struct pipe_ctx
*pipe_ctx
=
1122 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1123 if (pipe_ctx
!= NULL
) {
1124 hubp
= pipe_ctx
->plane_res
.hubp
;
1125 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1126 if (hubp
!= NULL
&& hubp
->funcs
->hubp_disable_control
)
1127 hubp
->funcs
->hubp_disable_control(hubp
, true);
1130 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1131 struct pipe_ctx
*pipe_ctx
=
1132 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1133 if (pipe_ctx
!= NULL
) {
1134 hubp
= pipe_ctx
->plane_res
.hubp
;
1135 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1136 if (hubp
!= NULL
&& hubp
->funcs
->hubp_disable_control
)
1137 hubp
->funcs
->hubp_disable_control(hubp
, true);
1140 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1141 hubbub1_soft_reset(dc
->res_pool
->hubbub
, false);
1142 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1143 struct pipe_ctx
*pipe_ctx
=
1144 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1145 if (pipe_ctx
!= NULL
) {
1146 hubp
= pipe_ctx
->plane_res
.hubp
;
1147 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1148 if (hubp
!= NULL
&& hubp
->funcs
->set_hubp_blank_en
)
1149 hubp
->funcs
->set_hubp_blank_en(hubp
, true);
1156 void dcn10_verify_allow_pstate_change_high(struct dc
*dc
)
1158 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
1159 static bool should_log_hw_state
; /* prevent hw state log by default */
1161 if (!hubbub
->funcs
->verify_allow_pstate_change_high
)
1164 if (!hubbub
->funcs
->verify_allow_pstate_change_high(hubbub
)) {
1167 if (should_log_hw_state
)
1168 dcn10_log_hw_state(dc
, NULL
);
1170 TRACE_DC_PIPE_STATE(pipe_ctx
, i
, MAX_PIPES
);
1171 BREAK_TO_DEBUGGER();
1172 if (dcn10_hw_wa_force_recovery(dc
)) {
1174 if (!hubbub
->funcs
->verify_allow_pstate_change_high(hubbub
))
1175 BREAK_TO_DEBUGGER();
1180 /* trigger HW to start disconnect plane from stream on the next vsync */
1181 void dcn10_plane_atomic_disconnect(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1183 struct dce_hwseq
*hws
= dc
->hwseq
;
1184 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
1185 int dpp_id
= pipe_ctx
->plane_res
.dpp
->inst
;
1186 struct mpc
*mpc
= dc
->res_pool
->mpc
;
1187 struct mpc_tree
*mpc_tree_params
;
1188 struct mpcc
*mpcc_to_remove
= NULL
;
1189 struct output_pixel_processor
*opp
= pipe_ctx
->stream_res
.opp
;
1191 mpc_tree_params
= &(opp
->mpc_tree_params
);
1192 mpcc_to_remove
= mpc
->funcs
->get_mpcc_for_dpp(mpc_tree_params
, dpp_id
);
1195 if (mpcc_to_remove
== NULL
)
1198 mpc
->funcs
->remove_mpcc(mpc
, mpc_tree_params
, mpcc_to_remove
);
1199 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1200 // so don't wait for MPCC_IDLE in the programming sequence
1201 if (opp
!= NULL
&& !pipe_ctx
->plane_state
->is_phantom
)
1202 opp
->mpcc_disconnect_pending
[pipe_ctx
->plane_res
.mpcc_inst
] = true;
1204 dc
->optimized_required
= true;
1206 if (hubp
->funcs
->hubp_disconnect
)
1207 hubp
->funcs
->hubp_disconnect(hubp
);
1209 if (dc
->debug
.sanity_checks
)
1210 hws
->funcs
.verify_allow_pstate_change_high(dc
);
1214 * dcn10_plane_atomic_power_down - Power down plane components.
1216 * @dc: dc struct reference. used for grab hwseq.
1217 * @dpp: dpp struct reference.
1218 * @hubp: hubp struct reference.
1220 * Keep in mind that this operation requires a power gate configuration;
1221 * however, requests for switch power gate are precisely controlled to avoid
1222 * problems. For this reason, power gate request is usually disabled. This
1223 * function first needs to enable the power gate request before disabling DPP
1224 * and HUBP. Finally, it disables the power gate request again.
1226 void dcn10_plane_atomic_power_down(struct dc
*dc
,
1230 struct dce_hwseq
*hws
= dc
->hwseq
;
1231 DC_LOGGER_INIT(dc
->ctx
->logger
);
1233 if (REG(DC_IP_REQUEST_CNTL
)) {
1234 REG_SET(DC_IP_REQUEST_CNTL
, 0,
1237 if (hws
->funcs
.dpp_pg_control
)
1238 hws
->funcs
.dpp_pg_control(hws
, dpp
->inst
, false);
1240 if (hws
->funcs
.hubp_pg_control
)
1241 hws
->funcs
.hubp_pg_control(hws
, hubp
->inst
, false);
1243 dpp
->funcs
->dpp_reset(dpp
);
1245 REG_SET(DC_IP_REQUEST_CNTL
, 0,
1248 "Power gated front end %d\n", hubp
->inst
);
1251 if (hws
->funcs
.dpp_root_clock_control
)
1252 hws
->funcs
.dpp_root_clock_control(hws
, dpp
->inst
, false);
1255 /* disable HW used by plane.
1256 * note: cannot disable until disconnect is complete
1258 void dcn10_plane_atomic_disable(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1260 struct dce_hwseq
*hws
= dc
->hwseq
;
1261 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
1262 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
1263 int opp_id
= hubp
->opp_id
;
1265 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe_ctx
);
1267 hubp
->funcs
->hubp_clk_cntl(hubp
, false);
1269 dpp
->funcs
->dpp_dppclk_control(dpp
, false, false);
1271 if (opp_id
!= 0xf && pipe_ctx
->stream_res
.opp
->mpc_tree_params
.opp_list
== NULL
)
1272 pipe_ctx
->stream_res
.opp
->funcs
->opp_pipe_clock_control(
1273 pipe_ctx
->stream_res
.opp
,
1276 hubp
->power_gated
= true;
1277 dc
->optimized_required
= false; /* We're powering off, no need to optimize */
1279 hws
->funcs
.plane_atomic_power_down(dc
,
1280 pipe_ctx
->plane_res
.dpp
,
1281 pipe_ctx
->plane_res
.hubp
);
1283 pipe_ctx
->stream
= NULL
;
1284 memset(&pipe_ctx
->stream_res
, 0, sizeof(pipe_ctx
->stream_res
));
1285 memset(&pipe_ctx
->plane_res
, 0, sizeof(pipe_ctx
->plane_res
));
1286 pipe_ctx
->top_pipe
= NULL
;
1287 pipe_ctx
->bottom_pipe
= NULL
;
1288 pipe_ctx
->plane_state
= NULL
;
1291 void dcn10_disable_plane(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1293 struct dce_hwseq
*hws
= dc
->hwseq
;
1294 DC_LOGGER_INIT(dc
->ctx
->logger
);
1296 if (!pipe_ctx
->plane_res
.hubp
|| pipe_ctx
->plane_res
.hubp
->power_gated
)
1299 hws
->funcs
.plane_atomic_disable(dc
, pipe_ctx
);
1301 apply_DEGVIDCN10_253_wa(dc
);
1303 DC_LOG_DC("Power down front end %d\n",
1304 pipe_ctx
->pipe_idx
);
1307 void dcn10_init_pipes(struct dc
*dc
, struct dc_state
*context
)
1310 struct dce_hwseq
*hws
= dc
->hwseq
;
1311 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
1312 bool can_apply_seamless_boot
= false;
1314 for (i
= 0; i
< context
->stream_count
; i
++) {
1315 if (context
->streams
[i
]->apply_seamless_boot_optimization
) {
1316 can_apply_seamless_boot
= true;
1321 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1322 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[i
];
1323 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1325 /* There is assumption that pipe_ctx is not mapping irregularly
1326 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1327 * we will use the pipe, so don't disable
1329 if (pipe_ctx
->stream
!= NULL
&& can_apply_seamless_boot
)
1332 /* Blank controller using driver code instead of
1335 if (tg
->funcs
->is_tg_enabled(tg
)) {
1336 if (hws
->funcs
.init_blank
!= NULL
) {
1337 hws
->funcs
.init_blank(dc
, tg
);
1338 tg
->funcs
->lock(tg
);
1340 tg
->funcs
->lock(tg
);
1341 tg
->funcs
->set_blank(tg
, true);
1342 hwss_wait_for_blank_complete(tg
);
1347 /* Reset det size */
1348 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1349 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1350 struct hubp
*hubp
= dc
->res_pool
->hubps
[i
];
1352 /* Do not need to reset for seamless boot */
1353 if (pipe_ctx
->stream
!= NULL
&& can_apply_seamless_boot
)
1356 if (hubbub
&& hubp
) {
1357 if (hubbub
->funcs
->program_det_size
)
1358 hubbub
->funcs
->program_det_size(hubbub
, hubp
->inst
, 0);
1362 /* num_opp will be equal to number of mpcc */
1363 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_opp
; i
++) {
1364 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1366 /* Cannot reset the MPC mux if seamless boot */
1367 if (pipe_ctx
->stream
!= NULL
&& can_apply_seamless_boot
)
1370 dc
->res_pool
->mpc
->funcs
->mpc_init_single_inst(
1371 dc
->res_pool
->mpc
, i
);
1374 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1375 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[i
];
1376 struct hubp
*hubp
= dc
->res_pool
->hubps
[i
];
1377 struct dpp
*dpp
= dc
->res_pool
->dpps
[i
];
1378 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1380 /* There is assumption that pipe_ctx is not mapping irregularly
1381 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1382 * we will use the pipe, so don't disable
1384 if (can_apply_seamless_boot
&&
1385 pipe_ctx
->stream
!= NULL
&&
1386 pipe_ctx
->stream_res
.tg
->funcs
->is_tg_enabled(
1387 pipe_ctx
->stream_res
.tg
)) {
1388 // Enable double buffering for OTG_BLANK no matter if
1389 // seamless boot is enabled or not to suppress global sync
1390 // signals when OTG blanked. This is to prevent pipe from
1391 // requesting data while in PSR.
1392 tg
->funcs
->tg_init(tg
);
1393 hubp
->power_gated
= true;
1397 /* Disable on the current state so the new one isn't cleared. */
1398 pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1400 dpp
->funcs
->dpp_reset(dpp
);
1402 pipe_ctx
->stream_res
.tg
= tg
;
1403 pipe_ctx
->pipe_idx
= i
;
1405 pipe_ctx
->plane_res
.hubp
= hubp
;
1406 pipe_ctx
->plane_res
.dpp
= dpp
;
1407 pipe_ctx
->plane_res
.mpcc_inst
= dpp
->inst
;
1408 hubp
->mpcc_id
= dpp
->inst
;
1409 hubp
->opp_id
= OPP_ID_INVALID
;
1410 hubp
->power_gated
= false;
1412 dc
->res_pool
->opps
[i
]->mpc_tree_params
.opp_id
= dc
->res_pool
->opps
[i
]->inst
;
1413 dc
->res_pool
->opps
[i
]->mpc_tree_params
.opp_list
= NULL
;
1414 dc
->res_pool
->opps
[i
]->mpcc_disconnect_pending
[pipe_ctx
->plane_res
.mpcc_inst
] = true;
1415 pipe_ctx
->stream_res
.opp
= dc
->res_pool
->opps
[i
];
1417 hws
->funcs
.plane_atomic_disconnect(dc
, pipe_ctx
);
1419 if (tg
->funcs
->is_tg_enabled(tg
))
1420 tg
->funcs
->unlock(tg
);
1422 dc
->hwss
.disable_plane(dc
, pipe_ctx
);
1424 pipe_ctx
->stream_res
.tg
= NULL
;
1425 pipe_ctx
->plane_res
.hubp
= NULL
;
1427 if (tg
->funcs
->is_tg_enabled(tg
)) {
1428 if (tg
->funcs
->init_odm
)
1429 tg
->funcs
->init_odm(tg
);
1432 tg
->funcs
->tg_init(tg
);
1435 /* Power gate DSCs */
1436 if (hws
->funcs
.dsc_pg_control
!= NULL
) {
1437 uint32_t num_opps
= 0;
1438 uint32_t opp_id_src0
= OPP_ID_INVALID
;
1439 uint32_t opp_id_src1
= OPP_ID_INVALID
;
1441 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1442 // We can't use res_pool->res_cap->num_timing_generator to check
1443 // Because it records display pipes default setting built in driver,
1444 // not display pipes of the current chip.
1445 // Some ASICs would be fused display pipes less than the default setting.
1446 // In dcnxx_resource_construct function, driver would obatin real information.
1447 for (i
= 0; i
< dc
->res_pool
->timing_generator_count
; i
++) {
1448 uint32_t optc_dsc_state
= 0;
1449 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[i
];
1451 if (tg
->funcs
->is_tg_enabled(tg
)) {
1452 if (tg
->funcs
->get_dsc_status
)
1453 tg
->funcs
->get_dsc_status(tg
, &optc_dsc_state
);
1454 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1455 // non-zero value is DSC enabled
1456 if (optc_dsc_state
!= 0) {
1457 tg
->funcs
->get_optc_source(tg
, &num_opps
, &opp_id_src0
, &opp_id_src1
);
1463 // Step 2: To power down DSC but skip DSC of running OPTC
1464 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_dsc
; i
++) {
1465 struct dcn_dsc_state s
= {0};
1467 dc
->res_pool
->dscs
[i
]->funcs
->dsc_read_state(dc
->res_pool
->dscs
[i
], &s
);
1469 if ((s
.dsc_opp_source
== opp_id_src0
|| s
.dsc_opp_source
== opp_id_src1
) &&
1470 s
.dsc_clock_en
&& s
.dsc_fw_en
)
1473 hws
->funcs
.dsc_pg_control(hws
, dc
->res_pool
->dscs
[i
]->inst
, false);
1478 void dcn10_init_hw(struct dc
*dc
)
1481 struct abm
*abm
= dc
->res_pool
->abm
;
1482 struct dmcu
*dmcu
= dc
->res_pool
->dmcu
;
1483 struct dce_hwseq
*hws
= dc
->hwseq
;
1484 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
1485 struct resource_pool
*res_pool
= dc
->res_pool
;
1486 uint32_t backlight
= MAX_BACKLIGHT_LEVEL
;
1487 bool is_optimized_init_done
= false;
1489 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->init_clocks
)
1490 dc
->clk_mgr
->funcs
->init_clocks(dc
->clk_mgr
);
1492 /* Align bw context with hw config when system resume. */
1493 if (dc
->clk_mgr
->clks
.dispclk_khz
!= 0 && dc
->clk_mgr
->clks
.dppclk_khz
!= 0) {
1494 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
= dc
->clk_mgr
->clks
.dispclk_khz
;
1495 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
= dc
->clk_mgr
->clks
.dppclk_khz
;
1498 // Initialize the dccg
1499 if (dc
->res_pool
->dccg
&& dc
->res_pool
->dccg
->funcs
->dccg_init
)
1500 dc
->res_pool
->dccg
->funcs
->dccg_init(res_pool
->dccg
);
1502 if (IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
1504 REG_WRITE(REFCLK_CNTL
, 0);
1505 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL
, DCHUBBUB_GLOBAL_TIMER_ENABLE
, 1);
1506 REG_WRITE(DIO_MEM_PWR_CTRL
, 0);
1508 if (!dc
->debug
.disable_clock_gate
) {
1509 /* enable all DCN clock gating */
1510 REG_WRITE(DCCG_GATE_DISABLE_CNTL
, 0);
1512 REG_WRITE(DCCG_GATE_DISABLE_CNTL2
, 0);
1514 REG_UPDATE(DCFCLK_CNTL
, DCFCLK_GATE_DIS
, 0);
1517 //Enable ability to power gate / don't force power on permanently
1518 if (hws
->funcs
.enable_power_gating_plane
)
1519 hws
->funcs
.enable_power_gating_plane(hws
, true);
1524 if (!dcb
->funcs
->is_accelerated_mode(dcb
))
1525 hws
->funcs
.disable_vga(dc
->hwseq
);
1527 hws
->funcs
.bios_golden_init(dc
);
1529 if (dc
->ctx
->dc_bios
->fw_info_valid
) {
1530 res_pool
->ref_clocks
.xtalin_clock_inKhz
=
1531 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
;
1533 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
1534 if (res_pool
->dccg
&& res_pool
->hubbub
) {
1536 (res_pool
->dccg
->funcs
->get_dccg_ref_freq
)(res_pool
->dccg
,
1537 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
,
1538 &res_pool
->ref_clocks
.dccg_ref_clock_inKhz
);
1540 (res_pool
->hubbub
->funcs
->get_dchub_ref_freq
)(res_pool
->hubbub
,
1541 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
,
1542 &res_pool
->ref_clocks
.dchub_ref_clock_inKhz
);
1544 // Not all ASICs have DCCG sw component
1545 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
=
1546 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
1547 res_pool
->ref_clocks
.dchub_ref_clock_inKhz
=
1548 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
1552 ASSERT_CRITICAL(false);
1554 for (i
= 0; i
< dc
->link_count
; i
++) {
1555 /* Power up AND update implementation according to the
1556 * required signal (which may be different from the
1557 * default signal on connector).
1559 struct dc_link
*link
= dc
->links
[i
];
1561 if (!is_optimized_init_done
)
1562 link
->link_enc
->funcs
->hw_init(link
->link_enc
);
1564 /* Check for enabled DIG to identify enabled display */
1565 if (link
->link_enc
->funcs
->is_dig_enabled
&&
1566 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
)) {
1567 link
->link_status
.link_active
= true;
1568 if (link
->link_enc
->funcs
->fec_is_active
&&
1569 link
->link_enc
->funcs
->fec_is_active(link
->link_enc
))
1570 link
->fec_state
= dc_link_fec_enabled
;
1574 /* we want to turn off all dp displays before doing detection */
1575 dc
->link_srv
->blank_all_dp_displays(dc
);
1577 if (hws
->funcs
.enable_power_gating_plane
)
1578 hws
->funcs
.enable_power_gating_plane(dc
->hwseq
, true);
1580 /* If taking control over from VBIOS, we may want to optimize our first
1581 * mode set, so we need to skip powering down pipes until we know which
1582 * pipes we want to use.
1583 * Otherwise, if taking control is not possible, we need to power
1586 if (dcb
->funcs
->is_accelerated_mode(dcb
) || !dc
->config
.seamless_boot_edp_requested
) {
1587 if (!is_optimized_init_done
) {
1588 hws
->funcs
.init_pipes(dc
, dc
->current_state
);
1589 if (dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control
)
1590 dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control(dc
->res_pool
->hubbub
,
1591 !dc
->res_pool
->hubbub
->ctx
->dc
->debug
.disable_stutter
);
1595 if (!is_optimized_init_done
) {
1597 for (i
= 0; i
< res_pool
->audio_count
; i
++) {
1598 struct audio
*audio
= res_pool
->audios
[i
];
1600 audio
->funcs
->hw_init(audio
);
1603 for (i
= 0; i
< dc
->link_count
; i
++) {
1604 struct dc_link
*link
= dc
->links
[i
];
1606 if (link
->panel_cntl
)
1607 backlight
= link
->panel_cntl
->funcs
->hw_init(link
->panel_cntl
);
1611 abm
->funcs
->abm_init(abm
, backlight
);
1613 if (dmcu
!= NULL
&& !dmcu
->auto_load_dmcu
)
1614 dmcu
->funcs
->dmcu_init(dmcu
);
1617 if (abm
!= NULL
&& dmcu
!= NULL
)
1618 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
1620 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1621 if (!is_optimized_init_done
)
1622 REG_WRITE(DIO_MEM_PWR_CTRL
, 0);
1624 if (!dc
->debug
.disable_clock_gate
) {
1625 /* enable all DCN clock gating */
1626 REG_WRITE(DCCG_GATE_DISABLE_CNTL
, 0);
1628 REG_WRITE(DCCG_GATE_DISABLE_CNTL2
, 0);
1630 REG_UPDATE(DCFCLK_CNTL
, DCFCLK_GATE_DIS
, 0);
1633 if (dc
->clk_mgr
->funcs
->notify_wm_ranges
)
1634 dc
->clk_mgr
->funcs
->notify_wm_ranges(dc
->clk_mgr
);
1637 /* In headless boot cases, DIG may be turned
1638 * on which causes HW/SW discrepancies.
1639 * To avoid this, power down hardware on boot
1640 * if DIG is turned on
1642 void dcn10_power_down_on_boot(struct dc
*dc
)
1644 struct dc_link
*edp_links
[MAX_NUM_EDP
];
1645 struct dc_link
*edp_link
= NULL
;
1649 dc_get_edp_links(dc
, edp_links
, &edp_num
);
1651 edp_link
= edp_links
[0];
1653 if (edp_link
&& edp_link
->link_enc
->funcs
->is_dig_enabled
&&
1654 edp_link
->link_enc
->funcs
->is_dig_enabled(edp_link
->link_enc
) &&
1655 dc
->hwseq
->funcs
.edp_backlight_control
&&
1656 dc
->hwss
.power_down
&&
1657 dc
->hwss
.edp_power_control
) {
1658 dc
->hwseq
->funcs
.edp_backlight_control(edp_link
, false);
1659 dc
->hwss
.power_down(dc
);
1660 dc
->hwss
.edp_power_control(edp_link
, false);
1662 for (i
= 0; i
< dc
->link_count
; i
++) {
1663 struct dc_link
*link
= dc
->links
[i
];
1665 if (link
->link_enc
&& link
->link_enc
->funcs
->is_dig_enabled
&&
1666 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
) &&
1667 dc
->hwss
.power_down
) {
1668 dc
->hwss
.power_down(dc
);
1676 * Call update_clocks with empty context
1677 * to send DISPLAY_OFF
1678 * Otherwise DISPLAY_OFF may not be asserted
1680 if (dc
->clk_mgr
->funcs
->set_low_power_state
)
1681 dc
->clk_mgr
->funcs
->set_low_power_state(dc
->clk_mgr
);
1684 void dcn10_reset_hw_ctx_wrap(
1686 struct dc_state
*context
)
1689 struct dce_hwseq
*hws
= dc
->hwseq
;
1692 for (i
= dc
->res_pool
->pipe_count
- 1; i
>= 0 ; i
--) {
1693 struct pipe_ctx
*pipe_ctx_old
=
1694 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1695 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1697 if (!pipe_ctx_old
->stream
)
1700 if (pipe_ctx_old
->top_pipe
)
1703 if (!pipe_ctx
->stream
||
1704 pipe_need_reprogram(pipe_ctx_old
, pipe_ctx
)) {
1705 struct clock_source
*old_clk
= pipe_ctx_old
->clock_source
;
1707 dcn10_reset_back_end_for_pipe(dc
, pipe_ctx_old
, dc
->current_state
);
1708 if (hws
->funcs
.enable_stream_gating
)
1709 hws
->funcs
.enable_stream_gating(dc
, pipe_ctx_old
);
1711 old_clk
->funcs
->cs_power_down(old_clk
);
1716 static bool patch_address_for_sbs_tb_stereo(
1717 struct pipe_ctx
*pipe_ctx
, PHYSICAL_ADDRESS_LOC
*addr
)
1719 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
1720 bool sec_split
= pipe_ctx
->top_pipe
&&
1721 pipe_ctx
->top_pipe
->plane_state
== pipe_ctx
->plane_state
;
1722 if (sec_split
&& plane_state
->address
.type
== PLN_ADDR_TYPE_GRPH_STEREO
&&
1723 (pipe_ctx
->stream
->timing
.timing_3d_format
==
1724 TIMING_3D_FORMAT_SIDE_BY_SIDE
||
1725 pipe_ctx
->stream
->timing
.timing_3d_format
==
1726 TIMING_3D_FORMAT_TOP_AND_BOTTOM
)) {
1727 *addr
= plane_state
->address
.grph_stereo
.left_addr
;
1728 plane_state
->address
.grph_stereo
.left_addr
=
1729 plane_state
->address
.grph_stereo
.right_addr
;
1732 if (pipe_ctx
->stream
->view_format
!= VIEW_3D_FORMAT_NONE
&&
1733 plane_state
->address
.type
!= PLN_ADDR_TYPE_GRPH_STEREO
) {
1734 plane_state
->address
.type
= PLN_ADDR_TYPE_GRPH_STEREO
;
1735 plane_state
->address
.grph_stereo
.right_addr
=
1736 plane_state
->address
.grph_stereo
.left_addr
;
1737 plane_state
->address
.grph_stereo
.right_meta_addr
=
1738 plane_state
->address
.grph_stereo
.left_meta_addr
;
1744 void dcn10_update_plane_addr(const struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1746 bool addr_patched
= false;
1747 PHYSICAL_ADDRESS_LOC addr
;
1748 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
1750 if (plane_state
== NULL
)
1753 addr_patched
= patch_address_for_sbs_tb_stereo(pipe_ctx
, &addr
);
1755 pipe_ctx
->plane_res
.hubp
->funcs
->hubp_program_surface_flip_and_addr(
1756 pipe_ctx
->plane_res
.hubp
,
1757 &plane_state
->address
,
1758 plane_state
->flip_immediate
);
1760 plane_state
->status
.requested_address
= plane_state
->address
;
1762 if (plane_state
->flip_immediate
)
1763 plane_state
->status
.current_address
= plane_state
->address
;
1766 pipe_ctx
->plane_state
->address
.grph_stereo
.left_addr
= addr
;
1769 bool dcn10_set_input_transfer_func(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
,
1770 const struct dc_plane_state
*plane_state
)
1772 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
1773 const struct dc_transfer_func
*tf
= NULL
;
1776 if (dpp_base
== NULL
)
1779 if (plane_state
->in_transfer_func
)
1780 tf
= plane_state
->in_transfer_func
;
1782 if (plane_state
->gamma_correction
&&
1783 !dpp_base
->ctx
->dc
->debug
.always_use_regamma
1784 && !plane_state
->gamma_correction
->is_identity
1785 && dce_use_lut(plane_state
->format
))
1786 dpp_base
->funcs
->dpp_program_input_lut(dpp_base
, plane_state
->gamma_correction
);
1789 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_BYPASS
);
1790 else if (tf
->type
== TF_TYPE_PREDEFINED
) {
1792 case TRANSFER_FUNCTION_SRGB
:
1793 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_HW_sRGB
);
1795 case TRANSFER_FUNCTION_BT709
:
1796 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_HW_xvYCC
);
1798 case TRANSFER_FUNCTION_LINEAR
:
1799 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_BYPASS
);
1801 case TRANSFER_FUNCTION_PQ
:
1802 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_USER_PWL
);
1803 cm_helper_translate_curve_to_degamma_hw_format(tf
, &dpp_base
->degamma_params
);
1804 dpp_base
->funcs
->dpp_program_degamma_pwl(dpp_base
, &dpp_base
->degamma_params
);
1811 } else if (tf
->type
== TF_TYPE_BYPASS
) {
1812 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_BYPASS
);
1814 cm_helper_translate_curve_to_degamma_hw_format(tf
,
1815 &dpp_base
->degamma_params
);
1816 dpp_base
->funcs
->dpp_program_degamma_pwl(dpp_base
,
1817 &dpp_base
->degamma_params
);
1824 #define MAX_NUM_HW_POINTS 0x200
1826 static void log_tf(struct dc_context
*ctx
,
1827 struct dc_transfer_func
*tf
, uint32_t hw_points_num
)
1829 // DC_LOG_GAMMA is default logging of all hw points
1830 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1831 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1834 DC_LOGGER_INIT(ctx
->logger
);
1835 DC_LOG_GAMMA("Gamma Correction TF");
1836 DC_LOG_ALL_GAMMA("Logging all tf points...");
1837 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1839 for (i
= 0; i
< hw_points_num
; i
++) {
1840 DC_LOG_GAMMA("R\t%d\t%llu", i
, tf
->tf_pts
.red
[i
].value
);
1841 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i
, tf
->tf_pts
.green
[i
].value
);
1842 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i
, tf
->tf_pts
.blue
[i
].value
);
1845 for (i
= hw_points_num
; i
< MAX_NUM_HW_POINTS
; i
++) {
1846 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i
, tf
->tf_pts
.red
[i
].value
);
1847 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i
, tf
->tf_pts
.green
[i
].value
);
1848 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i
, tf
->tf_pts
.blue
[i
].value
);
1852 bool dcn10_set_output_transfer_func(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
,
1853 const struct dc_stream_state
*stream
)
1855 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
1860 dpp
->regamma_params
.hw_points_num
= GAMMA_HW_POINTS_NUM
;
1862 if (stream
->out_transfer_func
&&
1863 stream
->out_transfer_func
->type
== TF_TYPE_PREDEFINED
&&
1864 stream
->out_transfer_func
->tf
== TRANSFER_FUNCTION_SRGB
)
1865 dpp
->funcs
->dpp_program_regamma_pwl(dpp
, NULL
, OPP_REGAMMA_SRGB
);
1867 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1870 else if (cm_helper_translate_curve_to_hw_format(
1871 stream
->out_transfer_func
,
1872 &dpp
->regamma_params
, false)) {
1873 dpp
->funcs
->dpp_program_regamma_pwl(
1875 &dpp
->regamma_params
, OPP_REGAMMA_USER
);
1877 dpp
->funcs
->dpp_program_regamma_pwl(dpp
, NULL
, OPP_REGAMMA_BYPASS
);
1879 if (stream
!= NULL
&& stream
->ctx
!= NULL
&&
1880 stream
->out_transfer_func
!= NULL
) {
1882 stream
->out_transfer_func
,
1883 dpp
->regamma_params
.hw_points_num
);
1889 void dcn10_pipe_control_lock(
1891 struct pipe_ctx
*pipe
,
1894 struct dce_hwseq
*hws
= dc
->hwseq
;
1896 /* use TG master update lock to lock everything on the TG
1897 * therefore only top pipe need to lock
1899 if (!pipe
|| pipe
->top_pipe
)
1902 if (dc
->debug
.sanity_checks
)
1903 hws
->funcs
.verify_allow_pstate_change_high(dc
);
1906 pipe
->stream_res
.tg
->funcs
->lock(pipe
->stream_res
.tg
);
1908 pipe
->stream_res
.tg
->funcs
->unlock(pipe
->stream_res
.tg
);
1910 if (dc
->debug
.sanity_checks
)
1911 hws
->funcs
.verify_allow_pstate_change_high(dc
);
1915 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1917 * Software keepout workaround to prevent cursor update locking from stalling
1918 * out cursor updates indefinitely or from old values from being retained in
1919 * the case where the viewport changes in the same frame as the cursor.
1921 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1922 * too close to VUPDATE, then stall out until VUPDATE finishes.
1924 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1925 * to avoid the need for this workaround.
1927 static void delay_cursor_until_vupdate(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1929 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1930 struct crtc_position position
;
1931 uint32_t vupdate_start
, vupdate_end
;
1932 unsigned int lines_to_vupdate
, us_to_vupdate
, vpos
;
1933 unsigned int us_per_line
, us_vupdate
;
1935 if (!dc
->hwss
.calc_vupdate_position
|| !dc
->hwss
.get_position
)
1938 if (!pipe_ctx
->stream_res
.stream_enc
|| !pipe_ctx
->stream_res
.tg
)
1941 dc
->hwss
.calc_vupdate_position(dc
, pipe_ctx
, &vupdate_start
,
1944 dc
->hwss
.get_position(&pipe_ctx
, 1, &position
);
1945 vpos
= position
.vertical_count
;
1947 /* Avoid wraparound calculation issues */
1948 vupdate_start
+= stream
->timing
.v_total
;
1949 vupdate_end
+= stream
->timing
.v_total
;
1950 vpos
+= stream
->timing
.v_total
;
1952 if (vpos
<= vupdate_start
) {
1953 /* VPOS is in VACTIVE or back porch. */
1954 lines_to_vupdate
= vupdate_start
- vpos
;
1955 } else if (vpos
> vupdate_end
) {
1956 /* VPOS is in the front porch. */
1959 /* VPOS is in VUPDATE. */
1960 lines_to_vupdate
= 0;
1963 /* Calculate time until VUPDATE in microseconds. */
1965 stream
->timing
.h_total
* 10000u / stream
->timing
.pix_clk_100hz
;
1966 us_to_vupdate
= lines_to_vupdate
* us_per_line
;
1968 /* 70 us is a conservative estimate of cursor update time*/
1969 if (us_to_vupdate
> 70)
1972 /* Stall out until the cursor update completes. */
1973 if (vupdate_end
< vupdate_start
)
1974 vupdate_end
+= stream
->timing
.v_total
;
1975 us_vupdate
= (vupdate_end
- vupdate_start
+ 1) * us_per_line
;
1976 udelay(us_to_vupdate
+ us_vupdate
);
1979 void dcn10_cursor_lock(struct dc
*dc
, struct pipe_ctx
*pipe
, bool lock
)
1981 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1982 if (!pipe
|| pipe
->top_pipe
)
1985 /* Prevent cursor lock from stalling out cursor updates. */
1987 delay_cursor_until_vupdate(dc
, pipe
);
1989 if (pipe
->stream
&& should_use_dmub_lock(pipe
->stream
->link
)) {
1990 union dmub_hw_lock_flags hw_locks
= { 0 };
1991 struct dmub_hw_lock_inst_flags inst_flags
= { 0 };
1993 hw_locks
.bits
.lock_cursor
= 1;
1994 inst_flags
.opp_inst
= pipe
->stream_res
.opp
->inst
;
1996 dmub_hw_lock_mgr_cmd(dc
->ctx
->dmub_srv
,
2001 dc
->res_pool
->mpc
->funcs
->cursor_lock(dc
->res_pool
->mpc
,
2002 pipe
->stream_res
.opp
->inst
, lock
);
2005 static bool wait_for_reset_trigger_to_occur(
2006 struct dc_context
*dc_ctx
,
2007 struct timing_generator
*tg
)
2011 /* To avoid endless loop we wait at most
2012 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2013 const uint32_t frames_to_wait_on_triggered_reset
= 10;
2016 for (i
= 0; i
< frames_to_wait_on_triggered_reset
; i
++) {
2018 if (!tg
->funcs
->is_counter_moving(tg
)) {
2019 DC_ERROR("TG counter is not moving!\n");
2023 if (tg
->funcs
->did_triggered_reset_occur(tg
)) {
2025 /* usually occurs at i=1 */
2026 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2031 /* Wait for one frame. */
2032 tg
->funcs
->wait_for_state(tg
, CRTC_STATE_VACTIVE
);
2033 tg
->funcs
->wait_for_state(tg
, CRTC_STATE_VBLANK
);
2037 DC_ERROR("GSL: Timeout on reset trigger!\n");
2042 static uint64_t reduceSizeAndFraction(uint64_t *numerator
,
2043 uint64_t *denominator
,
2044 bool checkUint32Bounary
)
2047 bool ret
= checkUint32Bounary
== false;
2048 uint64_t max_int32
= 0xffffffff;
2049 uint64_t num
, denom
;
2050 static const uint16_t prime_numbers
[] = {
2051 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2052 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2053 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2054 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2055 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2056 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2057 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2058 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2059 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2060 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2061 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2062 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2063 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2064 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2065 941, 947, 953, 967, 971, 977, 983, 991, 997};
2066 int count
= ARRAY_SIZE(prime_numbers
);
2069 denom
= *denominator
;
2070 for (i
= 0; i
< count
; i
++) {
2071 uint32_t num_remainder
, denom_remainder
;
2072 uint64_t num_result
, denom_result
;
2073 if (checkUint32Bounary
&&
2074 num
<= max_int32
&& denom
<= max_int32
) {
2079 num_result
= div_u64_rem(num
, prime_numbers
[i
], &num_remainder
);
2080 denom_result
= div_u64_rem(denom
, prime_numbers
[i
], &denom_remainder
);
2081 if (num_remainder
== 0 && denom_remainder
== 0) {
2083 denom
= denom_result
;
2085 } while (num_remainder
== 0 && denom_remainder
== 0);
2088 *denominator
= denom
;
2092 static bool is_low_refresh_rate(struct pipe_ctx
*pipe
)
2094 uint32_t master_pipe_refresh_rate
=
2095 pipe
->stream
->timing
.pix_clk_100hz
* 100 /
2096 pipe
->stream
->timing
.h_total
/
2097 pipe
->stream
->timing
.v_total
;
2098 return master_pipe_refresh_rate
<= 30;
2101 static uint8_t get_clock_divider(struct pipe_ctx
*pipe
,
2102 bool account_low_refresh_rate
)
2104 uint32_t clock_divider
= 1;
2105 uint32_t numpipes
= 1;
2107 if (account_low_refresh_rate
&& is_low_refresh_rate(pipe
))
2110 if (pipe
->stream_res
.pix_clk_params
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
2113 while (pipe
->next_odm_pipe
) {
2114 pipe
= pipe
->next_odm_pipe
;
2117 clock_divider
*= numpipes
;
2119 return clock_divider
;
2122 static int dcn10_align_pixel_clocks(struct dc
*dc
, int group_size
,
2123 struct pipe_ctx
*grouped_pipes
[])
2125 struct dc_context
*dc_ctx
= dc
->ctx
;
2126 int i
, master
= -1, embedded
= -1;
2127 struct dc_crtc_timing
*hw_crtc_timing
;
2128 uint64_t phase
[MAX_PIPES
];
2129 uint64_t modulo
[MAX_PIPES
];
2132 uint32_t embedded_pix_clk_100hz
;
2133 uint16_t embedded_h_total
;
2134 uint16_t embedded_v_total
;
2135 uint32_t dp_ref_clk_100hz
=
2136 dc
->res_pool
->dp_clock_source
->ctx
->dc
->clk_mgr
->dprefclk_khz
*10;
2138 hw_crtc_timing
= kcalloc(MAX_PIPES
, sizeof(*hw_crtc_timing
), GFP_KERNEL
);
2139 if (!hw_crtc_timing
)
2142 if (dc
->config
.vblank_alignment_dto_params
&&
2143 dc
->res_pool
->dp_clock_source
->funcs
->override_dp_pix_clk
) {
2145 (dc
->config
.vblank_alignment_dto_params
>> 32) & 0x7FFF;
2147 (dc
->config
.vblank_alignment_dto_params
>> 48) & 0x7FFF;
2148 embedded_pix_clk_100hz
=
2149 dc
->config
.vblank_alignment_dto_params
& 0xFFFFFFFF;
2151 for (i
= 0; i
< group_size
; i
++) {
2152 grouped_pipes
[i
]->stream_res
.tg
->funcs
->get_hw_timing(
2153 grouped_pipes
[i
]->stream_res
.tg
,
2154 &hw_crtc_timing
[i
]);
2155 dc
->res_pool
->dp_clock_source
->funcs
->get_pixel_clk_frequency_100hz(
2156 dc
->res_pool
->dp_clock_source
,
2157 grouped_pipes
[i
]->stream_res
.tg
->inst
,
2159 hw_crtc_timing
[i
].pix_clk_100hz
= pclk
;
2160 if (dc_is_embedded_signal(
2161 grouped_pipes
[i
]->stream
->signal
)) {
2164 phase
[i
] = embedded_pix_clk_100hz
*100;
2165 modulo
[i
] = dp_ref_clk_100hz
*100;
2168 phase
[i
] = (uint64_t)embedded_pix_clk_100hz
*
2169 hw_crtc_timing
[i
].h_total
*
2170 hw_crtc_timing
[i
].v_total
;
2171 phase
[i
] = div_u64(phase
[i
], get_clock_divider(grouped_pipes
[i
], true));
2172 modulo
[i
] = (uint64_t)dp_ref_clk_100hz
*
2176 if (reduceSizeAndFraction(&phase
[i
],
2177 &modulo
[i
], true) == false) {
2179 * this will help to stop reporting
2180 * this timing synchronizable
2182 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2183 grouped_pipes
[i
]->stream
->has_non_synchronizable_pclk
= true;
2188 for (i
= 0; i
< group_size
; i
++) {
2189 if (i
!= embedded
&& !grouped_pipes
[i
]->stream
->has_non_synchronizable_pclk
) {
2190 dc
->res_pool
->dp_clock_source
->funcs
->override_dp_pix_clk(
2191 dc
->res_pool
->dp_clock_source
,
2192 grouped_pipes
[i
]->stream_res
.tg
->inst
,
2193 phase
[i
], modulo
[i
]);
2194 dc
->res_pool
->dp_clock_source
->funcs
->get_pixel_clk_frequency_100hz(
2195 dc
->res_pool
->dp_clock_source
,
2196 grouped_pipes
[i
]->stream_res
.tg
->inst
, &pclk
);
2197 grouped_pipes
[i
]->stream
->timing
.pix_clk_100hz
=
2198 pclk
*get_clock_divider(grouped_pipes
[i
], false);
2206 kfree(hw_crtc_timing
);
2210 void dcn10_enable_vblanks_synchronization(
2214 struct pipe_ctx
*grouped_pipes
[])
2216 struct dc_context
*dc_ctx
= dc
->ctx
;
2217 struct output_pixel_processor
*opp
;
2218 struct timing_generator
*tg
;
2219 int i
, width
, height
, master
;
2221 for (i
= 1; i
< group_size
; i
++) {
2222 opp
= grouped_pipes
[i
]->stream_res
.opp
;
2223 tg
= grouped_pipes
[i
]->stream_res
.tg
;
2224 tg
->funcs
->get_otg_active_size(tg
, &width
, &height
);
2226 if (!tg
->funcs
->is_tg_enabled(tg
)) {
2227 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2231 if (opp
->funcs
->opp_program_dpg_dimensions
)
2232 opp
->funcs
->opp_program_dpg_dimensions(opp
, width
, 2*(height
) + 1);
2235 for (i
= 0; i
< group_size
; i
++) {
2236 if (grouped_pipes
[i
]->stream
== NULL
)
2238 grouped_pipes
[i
]->stream
->vblank_synchronized
= false;
2239 grouped_pipes
[i
]->stream
->has_non_synchronizable_pclk
= false;
2242 DC_SYNC_INFO("Aligning DP DTOs\n");
2244 master
= dcn10_align_pixel_clocks(dc
, group_size
, grouped_pipes
);
2246 DC_SYNC_INFO("Synchronizing VBlanks\n");
2249 for (i
= 0; i
< group_size
; i
++) {
2250 if (i
!= master
&& !grouped_pipes
[i
]->stream
->has_non_synchronizable_pclk
)
2251 grouped_pipes
[i
]->stream_res
.tg
->funcs
->align_vblanks(
2252 grouped_pipes
[master
]->stream_res
.tg
,
2253 grouped_pipes
[i
]->stream_res
.tg
,
2254 grouped_pipes
[master
]->stream
->timing
.pix_clk_100hz
,
2255 grouped_pipes
[i
]->stream
->timing
.pix_clk_100hz
,
2256 get_clock_divider(grouped_pipes
[master
], false),
2257 get_clock_divider(grouped_pipes
[i
], false));
2258 grouped_pipes
[i
]->stream
->vblank_synchronized
= true;
2260 grouped_pipes
[master
]->stream
->vblank_synchronized
= true;
2261 DC_SYNC_INFO("Sync complete\n");
2264 for (i
= 1; i
< group_size
; i
++) {
2265 opp
= grouped_pipes
[i
]->stream_res
.opp
;
2266 tg
= grouped_pipes
[i
]->stream_res
.tg
;
2267 tg
->funcs
->get_otg_active_size(tg
, &width
, &height
);
2268 if (opp
->funcs
->opp_program_dpg_dimensions
)
2269 opp
->funcs
->opp_program_dpg_dimensions(opp
, width
, height
);
2273 void dcn10_enable_timing_synchronization(
2277 struct pipe_ctx
*grouped_pipes
[])
2279 struct dc_context
*dc_ctx
= dc
->ctx
;
2280 struct output_pixel_processor
*opp
;
2281 struct timing_generator
*tg
;
2282 int i
, width
, height
;
2284 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2286 for (i
= 1; i
< group_size
; i
++) {
2287 if (grouped_pipes
[i
]->stream
&& grouped_pipes
[i
]->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
)
2290 opp
= grouped_pipes
[i
]->stream_res
.opp
;
2291 tg
= grouped_pipes
[i
]->stream_res
.tg
;
2292 tg
->funcs
->get_otg_active_size(tg
, &width
, &height
);
2294 if (!tg
->funcs
->is_tg_enabled(tg
)) {
2295 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2299 if (opp
->funcs
->opp_program_dpg_dimensions
)
2300 opp
->funcs
->opp_program_dpg_dimensions(opp
, width
, 2*(height
) + 1);
2303 for (i
= 0; i
< group_size
; i
++) {
2304 if (grouped_pipes
[i
]->stream
== NULL
)
2307 if (grouped_pipes
[i
]->stream
&& grouped_pipes
[i
]->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
)
2310 grouped_pipes
[i
]->stream
->vblank_synchronized
= false;
2313 for (i
= 1; i
< group_size
; i
++) {
2314 if (grouped_pipes
[i
]->stream
&& grouped_pipes
[i
]->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
)
2317 grouped_pipes
[i
]->stream_res
.tg
->funcs
->enable_reset_trigger(
2318 grouped_pipes
[i
]->stream_res
.tg
,
2319 grouped_pipes
[0]->stream_res
.tg
->inst
);
2322 DC_SYNC_INFO("Waiting for trigger\n");
2324 /* Need to get only check 1 pipe for having reset as all the others are
2325 * synchronized. Look at last pipe programmed to reset.
2328 if (grouped_pipes
[1]->stream
&& grouped_pipes
[1]->stream
->mall_stream_config
.type
!= SUBVP_PHANTOM
)
2329 wait_for_reset_trigger_to_occur(dc_ctx
, grouped_pipes
[1]->stream_res
.tg
);
2331 for (i
= 1; i
< group_size
; i
++) {
2332 if (grouped_pipes
[i
]->stream
&& grouped_pipes
[i
]->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
)
2335 grouped_pipes
[i
]->stream_res
.tg
->funcs
->disable_reset_trigger(
2336 grouped_pipes
[i
]->stream_res
.tg
);
2339 for (i
= 1; i
< group_size
; i
++) {
2340 if (grouped_pipes
[i
]->stream
&& grouped_pipes
[i
]->stream
->mall_stream_config
.type
== SUBVP_PHANTOM
)
2343 opp
= grouped_pipes
[i
]->stream_res
.opp
;
2344 tg
= grouped_pipes
[i
]->stream_res
.tg
;
2345 tg
->funcs
->get_otg_active_size(tg
, &width
, &height
);
2346 if (opp
->funcs
->opp_program_dpg_dimensions
)
2347 opp
->funcs
->opp_program_dpg_dimensions(opp
, width
, height
);
2350 DC_SYNC_INFO("Sync complete\n");
2353 void dcn10_enable_per_frame_crtc_position_reset(
2356 struct pipe_ctx
*grouped_pipes
[])
2358 struct dc_context
*dc_ctx
= dc
->ctx
;
2361 DC_SYNC_INFO("Setting up\n");
2362 for (i
= 0; i
< group_size
; i
++)
2363 if (grouped_pipes
[i
]->stream_res
.tg
->funcs
->enable_crtc_reset
)
2364 grouped_pipes
[i
]->stream_res
.tg
->funcs
->enable_crtc_reset(
2365 grouped_pipes
[i
]->stream_res
.tg
,
2367 &grouped_pipes
[i
]->stream
->triggered_crtc_reset
);
2369 DC_SYNC_INFO("Waiting for trigger\n");
2371 for (i
= 0; i
< group_size
; i
++)
2372 wait_for_reset_trigger_to_occur(dc_ctx
, grouped_pipes
[i
]->stream_res
.tg
);
2374 DC_SYNC_INFO("Multi-display sync is complete\n");
2377 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp
*hubp1
,
2378 struct vm_system_aperture_param
*apt
,
2379 struct dce_hwseq
*hws
)
2381 PHYSICAL_ADDRESS_LOC physical_page_number
;
2382 uint32_t logical_addr_low
;
2383 uint32_t logical_addr_high
;
2385 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
,
2386 PHYSICAL_PAGE_NUMBER_MSB
, &physical_page_number
.high_part
);
2387 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
,
2388 PHYSICAL_PAGE_NUMBER_LSB
, &physical_page_number
.low_part
);
2390 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
2391 LOGICAL_ADDR
, &logical_addr_low
);
2393 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
2394 LOGICAL_ADDR
, &logical_addr_high
);
2396 apt
->sys_default
.quad_part
= physical_page_number
.quad_part
<< 12;
2397 apt
->sys_low
.quad_part
= (int64_t)logical_addr_low
<< 18;
2398 apt
->sys_high
.quad_part
= (int64_t)logical_addr_high
<< 18;
2401 /* Temporary read settings, future will get values from kmd directly */
2402 static void mmhub_read_vm_context0_settings(struct dcn10_hubp
*hubp1
,
2403 struct vm_context0_param
*vm0
,
2404 struct dce_hwseq
*hws
)
2406 PHYSICAL_ADDRESS_LOC fb_base
;
2407 PHYSICAL_ADDRESS_LOC fb_offset
;
2408 uint32_t fb_base_value
;
2409 uint32_t fb_offset_value
;
2411 REG_GET(DCHUBBUB_SDPIF_FB_BASE
, SDPIF_FB_BASE
, &fb_base_value
);
2412 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET
, SDPIF_FB_OFFSET
, &fb_offset_value
);
2414 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
,
2415 PAGE_DIRECTORY_ENTRY_HI32
, &vm0
->pte_base
.high_part
);
2416 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
,
2417 PAGE_DIRECTORY_ENTRY_LO32
, &vm0
->pte_base
.low_part
);
2419 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
,
2420 LOGICAL_PAGE_NUMBER_HI4
, &vm0
->pte_start
.high_part
);
2421 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
,
2422 LOGICAL_PAGE_NUMBER_LO32
, &vm0
->pte_start
.low_part
);
2424 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
,
2425 LOGICAL_PAGE_NUMBER_HI4
, &vm0
->pte_end
.high_part
);
2426 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
,
2427 LOGICAL_PAGE_NUMBER_LO32
, &vm0
->pte_end
.low_part
);
2429 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
,
2430 PHYSICAL_PAGE_ADDR_HI4
, &vm0
->fault_default
.high_part
);
2431 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
,
2432 PHYSICAL_PAGE_ADDR_LO32
, &vm0
->fault_default
.low_part
);
2435 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2436 * Therefore we need to do
2437 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2438 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2440 fb_base
.quad_part
= (uint64_t)fb_base_value
<< 24;
2441 fb_offset
.quad_part
= (uint64_t)fb_offset_value
<< 24;
2442 vm0
->pte_base
.quad_part
+= fb_base
.quad_part
;
2443 vm0
->pte_base
.quad_part
-= fb_offset
.quad_part
;
2447 static void dcn10_program_pte_vm(struct dce_hwseq
*hws
, struct hubp
*hubp
)
2449 struct dcn10_hubp
*hubp1
= TO_DCN10_HUBP(hubp
);
2450 struct vm_system_aperture_param apt
= {0};
2451 struct vm_context0_param vm0
= {0};
2453 mmhub_read_vm_system_aperture_settings(hubp1
, &apt
, hws
);
2454 mmhub_read_vm_context0_settings(hubp1
, &vm0
, hws
);
2456 hubp
->funcs
->hubp_set_vm_system_aperture_settings(hubp
, &apt
);
2457 hubp
->funcs
->hubp_set_vm_context0_settings(hubp
, &vm0
);
2460 static void dcn10_enable_plane(
2462 struct pipe_ctx
*pipe_ctx
,
2463 struct dc_state
*context
)
2465 struct dce_hwseq
*hws
= dc
->hwseq
;
2467 if (dc
->debug
.sanity_checks
) {
2468 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2471 undo_DEGVIDCN10_253_wa(dc
);
2473 power_on_plane_resources(dc
->hwseq
,
2474 pipe_ctx
->plane_res
.hubp
->inst
);
2476 /* enable DCFCLK current DCHUB */
2477 pipe_ctx
->plane_res
.hubp
->funcs
->hubp_clk_cntl(pipe_ctx
->plane_res
.hubp
, true);
2479 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2480 pipe_ctx
->stream_res
.opp
->funcs
->opp_pipe_clock_control(
2481 pipe_ctx
->stream_res
.opp
,
2484 if (dc
->config
.gpu_vm_support
)
2485 dcn10_program_pte_vm(hws
, pipe_ctx
->plane_res
.hubp
);
2487 if (dc
->debug
.sanity_checks
) {
2488 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2491 if (!pipe_ctx
->top_pipe
2492 && pipe_ctx
->plane_state
2493 && pipe_ctx
->plane_state
->flip_int_enabled
2494 && pipe_ctx
->plane_res
.hubp
->funcs
->hubp_set_flip_int
)
2495 pipe_ctx
->plane_res
.hubp
->funcs
->hubp_set_flip_int(pipe_ctx
->plane_res
.hubp
);
2499 void dcn10_program_gamut_remap(struct pipe_ctx
*pipe_ctx
)
2502 struct dpp_grph_csc_adjustment adjust
;
2503 memset(&adjust
, 0, sizeof(adjust
));
2504 adjust
.gamut_adjust_type
= GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS
;
2507 if (pipe_ctx
->stream
->gamut_remap_matrix
.enable_remap
== true) {
2508 adjust
.gamut_adjust_type
= GRAPHICS_GAMUT_ADJUST_TYPE_SW
;
2509 for (i
= 0; i
< CSC_TEMPERATURE_MATRIX_SIZE
; i
++)
2510 adjust
.temperature_matrix
[i
] =
2511 pipe_ctx
->stream
->gamut_remap_matrix
.matrix
[i
];
2512 } else if (pipe_ctx
->plane_state
&&
2513 pipe_ctx
->plane_state
->gamut_remap_matrix
.enable_remap
== true) {
2514 adjust
.gamut_adjust_type
= GRAPHICS_GAMUT_ADJUST_TYPE_SW
;
2515 for (i
= 0; i
< CSC_TEMPERATURE_MATRIX_SIZE
; i
++)
2516 adjust
.temperature_matrix
[i
] =
2517 pipe_ctx
->plane_state
->gamut_remap_matrix
.matrix
[i
];
2520 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_gamut_remap(pipe_ctx
->plane_res
.dpp
, &adjust
);
2524 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx
*pipe_ctx
, enum dc_color_space colorspace
)
2526 if (pipe_ctx
->plane_state
&& pipe_ctx
->plane_state
->layer_index
> 0 && is_rgb_cspace(colorspace
)) {
2527 if (pipe_ctx
->top_pipe
) {
2528 struct pipe_ctx
*top
= pipe_ctx
->top_pipe
;
2530 while (top
->top_pipe
)
2531 top
= top
->top_pipe
; // Traverse to top pipe_ctx
2532 if (top
->plane_state
&& top
->plane_state
->layer_index
== 0)
2533 return true; // Front MPO plane not hidden
2539 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx
*pipe_ctx
, uint16_t *matrix
)
2541 // Override rear plane RGB bias to fix MPO brightness
2542 uint16_t rgb_bias
= matrix
[3];
2547 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_adjustment(pipe_ctx
->plane_res
.dpp
, matrix
);
2548 matrix
[3] = rgb_bias
;
2549 matrix
[7] = rgb_bias
;
2550 matrix
[11] = rgb_bias
;
2553 void dcn10_program_output_csc(struct dc
*dc
,
2554 struct pipe_ctx
*pipe_ctx
,
2555 enum dc_color_space colorspace
,
2559 if (pipe_ctx
->stream
->csc_color_matrix
.enable_adjustment
== true) {
2560 if (pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_adjustment
!= NULL
) {
2562 /* MPO is broken with RGB colorspaces when OCSC matrix
2563 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2564 * Blending adds offsets from front + rear to rear plane
2566 * Fix is to set RGB bias to 0 on rear plane, top plane
2567 * black value pixels add offset instead of rear + front
2570 int16_t rgb_bias
= matrix
[3];
2571 // matrix[3/7/11] are all the same offset value
2573 if (rgb_bias
> 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx
, colorspace
)) {
2574 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx
, matrix
);
2576 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_adjustment(pipe_ctx
->plane_res
.dpp
, matrix
);
2580 if (pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_default
!= NULL
)
2581 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_default(pipe_ctx
->plane_res
.dpp
, colorspace
);
2585 static void dcn10_update_dpp(struct dpp
*dpp
, struct dc_plane_state
*plane_state
)
2587 struct dc_bias_and_scale bns_params
= {0};
2589 // program the input csc
2590 dpp
->funcs
->dpp_setup(dpp
,
2591 plane_state
->format
,
2592 EXPANSION_MODE_ZERO
,
2593 plane_state
->input_csc_color_matrix
,
2594 plane_state
->color_space
,
2597 //set scale and bias registers
2598 build_prescale_params(&bns_params
, plane_state
);
2599 if (dpp
->funcs
->dpp_program_bias_and_scale
)
2600 dpp
->funcs
->dpp_program_bias_and_scale(dpp
, &bns_params
);
2603 void dcn10_update_visual_confirm_color(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
, struct tg_color
*color
, int mpcc_id
)
2605 struct mpc
*mpc
= dc
->res_pool
->mpc
;
2607 if (dc
->debug
.visual_confirm
== VISUAL_CONFIRM_HDR
)
2608 get_hdr_visual_confirm_color(pipe_ctx
, color
);
2609 else if (dc
->debug
.visual_confirm
== VISUAL_CONFIRM_SURFACE
)
2610 get_surface_visual_confirm_color(pipe_ctx
, color
);
2611 else if (dc
->debug
.visual_confirm
== VISUAL_CONFIRM_SWIZZLE
)
2612 get_surface_tile_visual_confirm_color(pipe_ctx
, color
);
2614 color_space_to_black_color(
2615 dc
, pipe_ctx
->stream
->output_color_space
, color
);
2617 if (mpc
->funcs
->set_bg_color
) {
2618 memcpy(&pipe_ctx
->plane_state
->visual_confirm_color
, color
, sizeof(struct tg_color
));
2619 mpc
->funcs
->set_bg_color(mpc
, color
, mpcc_id
);
2623 void dcn10_update_mpcc(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
2625 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
2626 struct mpcc_blnd_cfg blnd_cfg
= {0};
2627 bool per_pixel_alpha
= pipe_ctx
->plane_state
->per_pixel_alpha
&& pipe_ctx
->bottom_pipe
;
2629 struct mpcc
*new_mpcc
;
2630 struct mpc
*mpc
= dc
->res_pool
->mpc
;
2631 struct mpc_tree
*mpc_tree_params
= &(pipe_ctx
->stream_res
.opp
->mpc_tree_params
);
2633 blnd_cfg
.overlap_only
= false;
2634 blnd_cfg
.global_gain
= 0xff;
2636 if (per_pixel_alpha
) {
2637 /* DCN1.0 has output CM before MPC which seems to screw with
2638 * pre-multiplied alpha.
2640 blnd_cfg
.pre_multiplied_alpha
= (is_rgb_cspace(
2641 pipe_ctx
->stream
->output_color_space
)
2642 && pipe_ctx
->plane_state
->pre_multiplied_alpha
);
2643 if (pipe_ctx
->plane_state
->global_alpha
) {
2644 blnd_cfg
.alpha_mode
= MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN
;
2645 blnd_cfg
.global_gain
= pipe_ctx
->plane_state
->global_alpha_value
;
2647 blnd_cfg
.alpha_mode
= MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA
;
2650 blnd_cfg
.pre_multiplied_alpha
= false;
2651 blnd_cfg
.alpha_mode
= MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
;
2654 if (pipe_ctx
->plane_state
->global_alpha
)
2655 blnd_cfg
.global_alpha
= pipe_ctx
->plane_state
->global_alpha_value
;
2657 blnd_cfg
.global_alpha
= 0xff;
2661 * Note: currently there is a bug in init_hw such that
2662 * on resume from hibernate, BIOS sets up MPCC0, and
2663 * we do mpcc_remove but the mpcc cannot go to idle
2664 * after remove. This cause us to pick mpcc1 here,
2665 * which causes a pstate hang for yet unknown reason.
2667 mpcc_id
= hubp
->inst
;
2669 /* If there is no full update, don't need to touch MPC tree*/
2670 if (!pipe_ctx
->plane_state
->update_flags
.bits
.full_update
) {
2671 mpc
->funcs
->update_blending(mpc
, &blnd_cfg
, mpcc_id
);
2672 dc
->hwss
.update_visual_confirm_color(dc
, pipe_ctx
, &blnd_cfg
.black_color
, mpcc_id
);
2676 /* check if this MPCC is already being used */
2677 new_mpcc
= mpc
->funcs
->get_mpcc_for_dpp(mpc_tree_params
, mpcc_id
);
2678 /* remove MPCC if being used */
2679 if (new_mpcc
!= NULL
)
2680 mpc
->funcs
->remove_mpcc(mpc
, mpc_tree_params
, new_mpcc
);
2682 if (dc
->debug
.sanity_checks
)
2683 mpc
->funcs
->assert_mpcc_idle_before_connect(
2684 dc
->res_pool
->mpc
, mpcc_id
);
2686 /* Call MPC to insert new plane */
2687 new_mpcc
= mpc
->funcs
->insert_plane(dc
->res_pool
->mpc
,
2694 dc
->hwss
.update_visual_confirm_color(dc
, pipe_ctx
, &blnd_cfg
.black_color
, mpcc_id
);
2696 ASSERT(new_mpcc
!= NULL
);
2697 hubp
->opp_id
= pipe_ctx
->stream_res
.opp
->inst
;
2698 hubp
->mpcc_id
= mpcc_id
;
2701 static void update_scaler(struct pipe_ctx
*pipe_ctx
)
2703 bool per_pixel_alpha
=
2704 pipe_ctx
->plane_state
->per_pixel_alpha
&& pipe_ctx
->bottom_pipe
;
2706 pipe_ctx
->plane_res
.scl_data
.lb_params
.alpha_en
= per_pixel_alpha
;
2707 pipe_ctx
->plane_res
.scl_data
.lb_params
.depth
= LB_PIXEL_DEPTH_36BPP
;
2708 /* scaler configuration */
2709 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_scaler(
2710 pipe_ctx
->plane_res
.dpp
, &pipe_ctx
->plane_res
.scl_data
);
2713 static void dcn10_update_dchubp_dpp(
2715 struct pipe_ctx
*pipe_ctx
,
2716 struct dc_state
*context
)
2718 struct dce_hwseq
*hws
= dc
->hwseq
;
2719 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
2720 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
2721 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
2722 struct plane_size size
= plane_state
->plane_size
;
2723 unsigned int compat_level
= 0;
2724 bool should_divided_by_2
= false;
2726 /* depends on DML calculation, DPP clock value may change dynamically */
2727 /* If request max dpp clk is lower than current dispclk, no need to
2730 if (plane_state
->update_flags
.bits
.full_update
) {
2732 /* new calculated dispclk, dppclk are stored in
2733 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2734 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2735 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2736 * dispclk will put in use after optimize_bandwidth when
2737 * ramp_up_dispclk_with_dpp is called.
2738 * there are two places for dppclk be put in use. One location
2739 * is the same as the location as dispclk. Another is within
2740 * update_dchubp_dpp which happens between pre_bandwidth and
2741 * optimize_bandwidth.
2742 * dppclk updated within update_dchubp_dpp will cause new
2743 * clock values of dispclk and dppclk not be in use at the same
2744 * time. when clocks are decreased, this may cause dppclk is
2745 * lower than previous configuration and let pipe stuck.
2746 * for example, eDP + external dp, change resolution of DP from
2747 * 1920x1080x144hz to 1280x960x60hz.
2748 * before change: dispclk = 337889 dppclk = 337889
2749 * change mode, dcn10_validate_bandwidth calculate
2750 * dispclk = 143122 dppclk = 143122
2751 * update_dchubp_dpp be executed before dispclk be updated,
2752 * dispclk = 337889, but dppclk use new value dispclk /2 =
2753 * 168944. this will cause pipe pstate warning issue.
2754 * solution: between pre_bandwidth and optimize_bandwidth, while
2755 * dispclk is going to be decreased, keep dppclk = dispclk
2757 if (context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
<
2758 dc
->clk_mgr
->clks
.dispclk_khz
)
2759 should_divided_by_2
= false;
2761 should_divided_by_2
=
2762 context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
<=
2763 dc
->clk_mgr
->clks
.dispclk_khz
/ 2;
2765 dpp
->funcs
->dpp_dppclk_control(
2767 should_divided_by_2
,
2770 if (dc
->res_pool
->dccg
)
2771 dc
->res_pool
->dccg
->funcs
->update_dpp_dto(
2774 pipe_ctx
->plane_res
.bw
.dppclk_khz
);
2776 dc
->clk_mgr
->clks
.dppclk_khz
= should_divided_by_2
?
2777 dc
->clk_mgr
->clks
.dispclk_khz
/ 2 :
2778 dc
->clk_mgr
->clks
.dispclk_khz
;
2781 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2782 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2783 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2785 if (plane_state
->update_flags
.bits
.full_update
) {
2786 hubp
->funcs
->hubp_vtg_sel(hubp
, pipe_ctx
->stream_res
.tg
->inst
);
2788 hubp
->funcs
->hubp_setup(
2790 &pipe_ctx
->dlg_regs
,
2791 &pipe_ctx
->ttu_regs
,
2793 &pipe_ctx
->pipe_dlg_param
);
2794 hubp
->funcs
->hubp_setup_interdependent(
2796 &pipe_ctx
->dlg_regs
,
2797 &pipe_ctx
->ttu_regs
);
2800 size
.surface_size
= pipe_ctx
->plane_res
.scl_data
.viewport
;
2802 if (plane_state
->update_flags
.bits
.full_update
||
2803 plane_state
->update_flags
.bits
.bpp_change
)
2804 dcn10_update_dpp(dpp
, plane_state
);
2806 if (plane_state
->update_flags
.bits
.full_update
||
2807 plane_state
->update_flags
.bits
.per_pixel_alpha_change
||
2808 plane_state
->update_flags
.bits
.global_alpha_change
)
2809 hws
->funcs
.update_mpcc(dc
, pipe_ctx
);
2811 if (plane_state
->update_flags
.bits
.full_update
||
2812 plane_state
->update_flags
.bits
.per_pixel_alpha_change
||
2813 plane_state
->update_flags
.bits
.global_alpha_change
||
2814 plane_state
->update_flags
.bits
.scaling_change
||
2815 plane_state
->update_flags
.bits
.position_change
) {
2816 update_scaler(pipe_ctx
);
2819 if (plane_state
->update_flags
.bits
.full_update
||
2820 plane_state
->update_flags
.bits
.scaling_change
||
2821 plane_state
->update_flags
.bits
.position_change
) {
2822 hubp
->funcs
->mem_program_viewport(
2824 &pipe_ctx
->plane_res
.scl_data
.viewport
,
2825 &pipe_ctx
->plane_res
.scl_data
.viewport_c
);
2828 if (pipe_ctx
->stream
->cursor_attributes
.address
.quad_part
!= 0) {
2829 dc
->hwss
.set_cursor_position(pipe_ctx
);
2830 dc
->hwss
.set_cursor_attribute(pipe_ctx
);
2832 if (dc
->hwss
.set_cursor_sdr_white_level
)
2833 dc
->hwss
.set_cursor_sdr_white_level(pipe_ctx
);
2836 if (plane_state
->update_flags
.bits
.full_update
) {
2838 dc
->hwss
.program_gamut_remap(pipe_ctx
);
2840 dc
->hwss
.program_output_csc(dc
,
2842 pipe_ctx
->stream
->output_color_space
,
2843 pipe_ctx
->stream
->csc_color_matrix
.matrix
,
2844 pipe_ctx
->stream_res
.opp
->inst
);
2847 if (plane_state
->update_flags
.bits
.full_update
||
2848 plane_state
->update_flags
.bits
.pixel_format_change
||
2849 plane_state
->update_flags
.bits
.horizontal_mirror_change
||
2850 plane_state
->update_flags
.bits
.rotation_change
||
2851 plane_state
->update_flags
.bits
.swizzle_change
||
2852 plane_state
->update_flags
.bits
.dcc_change
||
2853 plane_state
->update_flags
.bits
.bpp_change
||
2854 plane_state
->update_flags
.bits
.scaling_change
||
2855 plane_state
->update_flags
.bits
.plane_size_change
) {
2856 hubp
->funcs
->hubp_program_surface_config(
2858 plane_state
->format
,
2859 &plane_state
->tiling_info
,
2861 plane_state
->rotation
,
2863 plane_state
->horizontal_mirror
,
2867 hubp
->power_gated
= false;
2869 hws
->funcs
.update_plane_addr(dc
, pipe_ctx
);
2871 if (is_pipe_tree_visible(pipe_ctx
))
2872 hubp
->funcs
->set_blank(hubp
, false);
2875 void dcn10_blank_pixel_data(
2877 struct pipe_ctx
*pipe_ctx
,
2880 enum dc_color_space color_space
;
2881 struct tg_color black_color
= {0};
2882 struct stream_resource
*stream_res
= &pipe_ctx
->stream_res
;
2883 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
2885 /* program otg blank color */
2886 color_space
= stream
->output_color_space
;
2887 color_space_to_black_color(dc
, color_space
, &black_color
);
2890 * The way 420 is packed, 2 channels carry Y component, 1 channel
2891 * alternate between Cb and Cr, so both channels need the pixel
2894 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
2895 black_color
.color_r_cr
= black_color
.color_g_y
;
2898 if (stream_res
->tg
->funcs
->set_blank_color
)
2899 stream_res
->tg
->funcs
->set_blank_color(
2904 if (stream_res
->tg
->funcs
->set_blank
)
2905 stream_res
->tg
->funcs
->set_blank(stream_res
->tg
, blank
);
2906 if (stream_res
->abm
) {
2907 dc
->hwss
.set_pipe(pipe_ctx
);
2908 stream_res
->abm
->funcs
->set_abm_level(stream_res
->abm
, stream
->abm_level
);
2911 dc
->hwss
.set_abm_immediate_disable(pipe_ctx
);
2912 if (stream_res
->tg
->funcs
->set_blank
) {
2913 stream_res
->tg
->funcs
->wait_for_state(stream_res
->tg
, CRTC_STATE_VBLANK
);
2914 stream_res
->tg
->funcs
->set_blank(stream_res
->tg
, blank
);
2919 void dcn10_set_hdr_multiplier(struct pipe_ctx
*pipe_ctx
)
2921 struct fixed31_32 multiplier
= pipe_ctx
->plane_state
->hdr_mult
;
2922 uint32_t hw_mult
= 0x1f000; // 1.0 default multiplier
2923 struct custom_float_format fmt
;
2925 fmt
.exponenta_bits
= 6;
2926 fmt
.mantissa_bits
= 12;
2930 if (!dc_fixpt_eq(multiplier
, dc_fixpt_from_int(0))) // check != 0
2931 convert_to_custom_float_format(multiplier
, &fmt
, &hw_mult
);
2933 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_hdr_multiplier(
2934 pipe_ctx
->plane_res
.dpp
, hw_mult
);
2937 void dcn10_program_pipe(
2939 struct pipe_ctx
*pipe_ctx
,
2940 struct dc_state
*context
)
2942 struct dce_hwseq
*hws
= dc
->hwseq
;
2944 if (pipe_ctx
->top_pipe
== NULL
) {
2945 bool blank
= !is_pipe_tree_visible(pipe_ctx
);
2947 pipe_ctx
->stream_res
.tg
->funcs
->program_global_sync(
2948 pipe_ctx
->stream_res
.tg
,
2949 calculate_vready_offset_for_group(pipe_ctx
),
2950 pipe_ctx
->pipe_dlg_param
.vstartup_start
,
2951 pipe_ctx
->pipe_dlg_param
.vupdate_offset
,
2952 pipe_ctx
->pipe_dlg_param
.vupdate_width
);
2954 pipe_ctx
->stream_res
.tg
->funcs
->set_vtg_params(
2955 pipe_ctx
->stream_res
.tg
, &pipe_ctx
->stream
->timing
, true);
2957 if (hws
->funcs
.setup_vupdate_interrupt
)
2958 hws
->funcs
.setup_vupdate_interrupt(dc
, pipe_ctx
);
2960 hws
->funcs
.blank_pixel_data(dc
, pipe_ctx
, blank
);
2963 if (pipe_ctx
->plane_state
->update_flags
.bits
.full_update
)
2964 dcn10_enable_plane(dc
, pipe_ctx
, context
);
2966 dcn10_update_dchubp_dpp(dc
, pipe_ctx
, context
);
2968 hws
->funcs
.set_hdr_multiplier(pipe_ctx
);
2970 if (pipe_ctx
->plane_state
->update_flags
.bits
.full_update
||
2971 pipe_ctx
->plane_state
->update_flags
.bits
.in_transfer_func_change
||
2972 pipe_ctx
->plane_state
->update_flags
.bits
.gamma_change
)
2973 hws
->funcs
.set_input_transfer_func(dc
, pipe_ctx
, pipe_ctx
->plane_state
);
2975 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2976 * only do gamma programming for full update.
2977 * TODO: This can be further optimized/cleaned up
2978 * Always call this for now since it does memcmp inside before
2979 * doing heavy calculation and programming
2981 if (pipe_ctx
->plane_state
->update_flags
.bits
.full_update
)
2982 hws
->funcs
.set_output_transfer_func(dc
, pipe_ctx
, pipe_ctx
->stream
);
2985 void dcn10_wait_for_pending_cleared(struct dc
*dc
,
2986 struct dc_state
*context
)
2988 struct pipe_ctx
*pipe_ctx
;
2989 struct timing_generator
*tg
;
2992 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2993 pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2994 tg
= pipe_ctx
->stream_res
.tg
;
2997 * Only wait for top pipe's tg penindg bit
2998 * Also skip if pipe is disabled.
3000 if (pipe_ctx
->top_pipe
||
3001 !pipe_ctx
->stream
|| !pipe_ctx
->plane_state
||
3002 !tg
->funcs
->is_tg_enabled(tg
))
3006 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3007 * For some reason waiting for OTG_UPDATE_PENDING cleared
3008 * seems to not trigger the update right away, and if we
3009 * lock again before VUPDATE then we don't get a separated
3012 pipe_ctx
->stream_res
.tg
->funcs
->wait_for_state(pipe_ctx
->stream_res
.tg
, CRTC_STATE_VBLANK
);
3013 pipe_ctx
->stream_res
.tg
->funcs
->wait_for_state(pipe_ctx
->stream_res
.tg
, CRTC_STATE_VACTIVE
);
3017 void dcn10_post_unlock_program_front_end(
3019 struct dc_state
*context
)
3023 DC_LOGGER_INIT(dc
->ctx
->logger
);
3025 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
3026 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
3028 if (!pipe_ctx
->top_pipe
&&
3029 !pipe_ctx
->prev_odm_pipe
&&
3031 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3033 if (context
->stream_status
[i
].plane_count
== 0)
3034 false_optc_underflow_wa(dc
, pipe_ctx
->stream
, tg
);
3038 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
3039 if (context
->res_ctx
.pipe_ctx
[i
].update_flags
.bits
.disable
)
3040 dc
->hwss
.disable_plane(dc
, &dc
->current_state
->res_ctx
.pipe_ctx
[i
]);
3042 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
3043 if (context
->res_ctx
.pipe_ctx
[i
].update_flags
.bits
.disable
) {
3044 dc
->hwss
.optimize_bandwidth(dc
, context
);
3048 if (dc
->hwseq
->wa
.DEGVIDCN10_254
)
3049 hubbub1_wm_change_req_wa(dc
->res_pool
->hubbub
);
3052 static void dcn10_stereo_hw_frame_pack_wa(struct dc
*dc
, struct dc_state
*context
)
3056 for (i
= 0; i
< context
->stream_count
; i
++) {
3057 if (context
->streams
[i
]->timing
.timing_3d_format
3058 == TIMING_3D_FORMAT_HW_FRAME_PACKING
) {
3062 hubbub1_allow_self_refresh_control(dc
->res_pool
->hubbub
, false);
3068 void dcn10_prepare_bandwidth(
3070 struct dc_state
*context
)
3072 struct dce_hwseq
*hws
= dc
->hwseq
;
3073 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
3074 int min_fclk_khz
, min_dcfclk_khz
, socclk_khz
;
3076 if (dc
->debug
.sanity_checks
)
3077 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3079 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
3080 if (context
->stream_count
== 0)
3081 context
->bw_ctx
.bw
.dcn
.clk
.phyclk_khz
= 0;
3083 dc
->clk_mgr
->funcs
->update_clocks(
3089 dc
->wm_optimized_required
= hubbub
->funcs
->program_watermarks(hubbub
,
3090 &context
->bw_ctx
.bw
.dcn
.watermarks
,
3091 dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000,
3093 dcn10_stereo_hw_frame_pack_wa(dc
, context
);
3095 if (dc
->debug
.pplib_wm_report_mode
== WM_REPORT_OVERRIDE
) {
3098 dc
, &min_fclk_khz
, &min_dcfclk_khz
, &socclk_khz
);
3100 dcn_bw_notify_pplib_of_wm_ranges(
3101 dc
, min_fclk_khz
, min_dcfclk_khz
, socclk_khz
);
3104 if (dc
->debug
.sanity_checks
)
3105 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3108 void dcn10_optimize_bandwidth(
3110 struct dc_state
*context
)
3112 struct dce_hwseq
*hws
= dc
->hwseq
;
3113 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
3114 int min_fclk_khz
, min_dcfclk_khz
, socclk_khz
;
3116 if (dc
->debug
.sanity_checks
)
3117 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3119 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
3120 if (context
->stream_count
== 0)
3121 context
->bw_ctx
.bw
.dcn
.clk
.phyclk_khz
= 0;
3123 dc
->clk_mgr
->funcs
->update_clocks(
3129 hubbub
->funcs
->program_watermarks(hubbub
,
3130 &context
->bw_ctx
.bw
.dcn
.watermarks
,
3131 dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000,
3134 dcn10_stereo_hw_frame_pack_wa(dc
, context
);
3136 if (dc
->debug
.pplib_wm_report_mode
== WM_REPORT_OVERRIDE
) {
3139 dc
, &min_fclk_khz
, &min_dcfclk_khz
, &socclk_khz
);
3141 dcn_bw_notify_pplib_of_wm_ranges(
3142 dc
, min_fclk_khz
, min_dcfclk_khz
, socclk_khz
);
3145 if (dc
->debug
.sanity_checks
)
3146 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3149 void dcn10_set_drr(struct pipe_ctx
**pipe_ctx
,
3150 int num_pipes
, struct dc_crtc_timing_adjust adjust
)
3153 struct drr_params params
= {0};
3154 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3155 unsigned int event_triggers
= 0x800;
3156 // Note DRR trigger events are generated regardless of whether num frames met.
3157 unsigned int num_frames
= 2;
3159 params
.vertical_total_max
= adjust
.v_total_max
;
3160 params
.vertical_total_min
= adjust
.v_total_min
;
3161 params
.vertical_total_mid
= adjust
.v_total_mid
;
3162 params
.vertical_total_mid_frame_num
= adjust
.v_total_mid_frame_num
;
3163 /* TODO: If multiple pipes are to be supported, you need
3164 * some GSL stuff. Static screen triggers may be programmed differently
3167 for (i
= 0; i
< num_pipes
; i
++) {
3168 if ((pipe_ctx
[i
]->stream_res
.tg
!= NULL
) && pipe_ctx
[i
]->stream_res
.tg
->funcs
) {
3169 if (pipe_ctx
[i
]->stream_res
.tg
->funcs
->set_drr
)
3170 pipe_ctx
[i
]->stream_res
.tg
->funcs
->set_drr(
3171 pipe_ctx
[i
]->stream_res
.tg
, ¶ms
);
3172 if (adjust
.v_total_max
!= 0 && adjust
.v_total_min
!= 0)
3173 if (pipe_ctx
[i
]->stream_res
.tg
->funcs
->set_static_screen_control
)
3174 pipe_ctx
[i
]->stream_res
.tg
->funcs
->set_static_screen_control(
3175 pipe_ctx
[i
]->stream_res
.tg
,
3176 event_triggers
, num_frames
);
3181 void dcn10_get_position(struct pipe_ctx
**pipe_ctx
,
3183 struct crtc_position
*position
)
3187 /* TODO: handle pipes > 1
3189 for (i
= 0; i
< num_pipes
; i
++)
3190 pipe_ctx
[i
]->stream_res
.tg
->funcs
->get_position(pipe_ctx
[i
]->stream_res
.tg
, position
);
3193 void dcn10_set_static_screen_control(struct pipe_ctx
**pipe_ctx
,
3194 int num_pipes
, const struct dc_static_screen_params
*params
)
3197 unsigned int triggers
= 0;
3199 if (params
->triggers
.surface_update
)
3201 if (params
->triggers
.cursor_update
)
3203 if (params
->triggers
.force_trigger
)
3206 for (i
= 0; i
< num_pipes
; i
++)
3207 pipe_ctx
[i
]->stream_res
.tg
->funcs
->
3208 set_static_screen_control(pipe_ctx
[i
]->stream_res
.tg
,
3209 triggers
, params
->num_frames
);
3212 static void dcn10_config_stereo_parameters(
3213 struct dc_stream_state
*stream
, struct crtc_stereo_flags
*flags
)
3215 enum view_3d_format view_format
= stream
->view_format
;
3216 enum dc_timing_3d_format timing_3d_format
=\
3217 stream
->timing
.timing_3d_format
;
3218 bool non_stereo_timing
= false;
3220 if (timing_3d_format
== TIMING_3D_FORMAT_NONE
||
3221 timing_3d_format
== TIMING_3D_FORMAT_SIDE_BY_SIDE
||
3222 timing_3d_format
== TIMING_3D_FORMAT_TOP_AND_BOTTOM
)
3223 non_stereo_timing
= true;
3225 if (non_stereo_timing
== false &&
3226 view_format
== VIEW_3D_FORMAT_FRAME_SEQUENTIAL
) {
3228 flags
->PROGRAM_STEREO
= 1;
3229 flags
->PROGRAM_POLARITY
= 1;
3230 if (timing_3d_format
== TIMING_3D_FORMAT_FRAME_ALTERNATE
||
3231 timing_3d_format
== TIMING_3D_FORMAT_INBAND_FA
||
3232 timing_3d_format
== TIMING_3D_FORMAT_DP_HDMI_INBAND_FA
||
3233 timing_3d_format
== TIMING_3D_FORMAT_SIDEBAND_FA
) {
3235 if (stream
->link
&& stream
->link
->ddc
) {
3236 enum display_dongle_type dongle
= \
3237 stream
->link
->ddc
->dongle_type
;
3239 if (dongle
== DISPLAY_DONGLE_DP_VGA_CONVERTER
||
3240 dongle
== DISPLAY_DONGLE_DP_DVI_CONVERTER
||
3241 dongle
== DISPLAY_DONGLE_DP_HDMI_CONVERTER
)
3242 flags
->DISABLE_STEREO_DP_SYNC
= 1;
3245 flags
->RIGHT_EYE_POLARITY
=\
3246 stream
->timing
.flags
.RIGHT_EYE_3D_POLARITY
;
3247 if (timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING
)
3248 flags
->FRAME_PACKED
= 1;
3254 void dcn10_setup_stereo(struct pipe_ctx
*pipe_ctx
, struct dc
*dc
)
3256 struct crtc_stereo_flags flags
= { 0 };
3257 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
3259 dcn10_config_stereo_parameters(stream
, &flags
);
3261 if (stream
->timing
.timing_3d_format
== TIMING_3D_FORMAT_SIDEBAND_FA
) {
3262 if (!dc_set_generic_gpio_for_stereo(true, dc
->ctx
->gpio_service
))
3263 dc_set_generic_gpio_for_stereo(false, dc
->ctx
->gpio_service
);
3265 dc_set_generic_gpio_for_stereo(false, dc
->ctx
->gpio_service
);
3268 pipe_ctx
->stream_res
.opp
->funcs
->opp_program_stereo(
3269 pipe_ctx
->stream_res
.opp
,
3270 flags
.PROGRAM_STEREO
== 1,
3273 pipe_ctx
->stream_res
.tg
->funcs
->program_stereo(
3274 pipe_ctx
->stream_res
.tg
,
3281 static struct hubp
*get_hubp_by_inst(struct resource_pool
*res_pool
, int mpcc_inst
)
3285 for (i
= 0; i
< res_pool
->pipe_count
; i
++) {
3286 if (res_pool
->hubps
[i
]->inst
== mpcc_inst
)
3287 return res_pool
->hubps
[i
];
3293 void dcn10_wait_for_mpcc_disconnect(
3295 struct resource_pool
*res_pool
,
3296 struct pipe_ctx
*pipe_ctx
)
3298 struct dce_hwseq
*hws
= dc
->hwseq
;
3301 if (dc
->debug
.sanity_checks
) {
3302 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3305 if (!pipe_ctx
->stream_res
.opp
)
3308 for (mpcc_inst
= 0; mpcc_inst
< MAX_PIPES
; mpcc_inst
++) {
3309 if (pipe_ctx
->stream_res
.opp
->mpcc_disconnect_pending
[mpcc_inst
]) {
3310 struct hubp
*hubp
= get_hubp_by_inst(res_pool
, mpcc_inst
);
3312 if (pipe_ctx
->stream_res
.tg
->funcs
->is_tg_enabled(pipe_ctx
->stream_res
.tg
))
3313 res_pool
->mpc
->funcs
->wait_for_idle(res_pool
->mpc
, mpcc_inst
);
3314 pipe_ctx
->stream_res
.opp
->mpcc_disconnect_pending
[mpcc_inst
] = false;
3315 hubp
->funcs
->set_blank(hubp
, true);
3319 if (dc
->debug
.sanity_checks
) {
3320 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3325 bool dcn10_dummy_display_power_gating(
3327 uint8_t controller_id
,
3328 struct dc_bios
*dcb
,
3329 enum pipe_gating_control power_gating
)
3334 void dcn10_update_pending_status(struct pipe_ctx
*pipe_ctx
)
3336 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
3337 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3339 struct dc
*dc
= pipe_ctx
->stream
->ctx
->dc
;
3341 if (plane_state
== NULL
)
3344 flip_pending
= pipe_ctx
->plane_res
.hubp
->funcs
->hubp_is_flip_pending(
3345 pipe_ctx
->plane_res
.hubp
);
3347 plane_state
->status
.is_flip_pending
= plane_state
->status
.is_flip_pending
|| flip_pending
;
3350 plane_state
->status
.current_address
= plane_state
->status
.requested_address
;
3352 if (plane_state
->status
.current_address
.type
== PLN_ADDR_TYPE_GRPH_STEREO
&&
3353 tg
->funcs
->is_stereo_left_eye
) {
3354 plane_state
->status
.is_right_eye
=
3355 !tg
->funcs
->is_stereo_left_eye(pipe_ctx
->stream_res
.tg
);
3358 if (dc
->hwseq
->wa_state
.disallow_self_refresh_during_multi_plane_transition_applied
) {
3359 struct dce_hwseq
*hwseq
= dc
->hwseq
;
3360 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[0];
3361 unsigned int cur_frame
= tg
->funcs
->get_frame_count(tg
);
3363 if (cur_frame
!= hwseq
->wa_state
.disallow_self_refresh_during_multi_plane_transition_applied_on_frame
) {
3364 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
3366 hubbub
->funcs
->allow_self_refresh_control(hubbub
, !dc
->debug
.disable_stutter
);
3367 hwseq
->wa_state
.disallow_self_refresh_during_multi_plane_transition_applied
= false;
3372 void dcn10_update_dchub(struct dce_hwseq
*hws
, struct dchub_init_data
*dh_data
)
3374 struct hubbub
*hubbub
= hws
->ctx
->dc
->res_pool
->hubbub
;
3376 /* In DCN, this programming sequence is owned by the hubbub */
3377 hubbub
->funcs
->update_dchub(hubbub
, dh_data
);
3380 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx
*pipe_ctx
)
3382 struct pipe_ctx
*test_pipe
, *split_pipe
;
3383 const struct scaler_data
*scl_data
= &pipe_ctx
->plane_res
.scl_data
;
3384 struct rect r1
= scl_data
->recout
, r2
, r2_half
;
3385 int r1_r
= r1
.x
+ r1
.width
, r1_b
= r1
.y
+ r1
.height
, r2_r
, r2_b
;
3386 int cur_layer
= pipe_ctx
->plane_state
->layer_index
;
3389 * Disable the cursor if there's another pipe above this with a
3390 * plane that contains this pipe's viewport to prevent double cursor
3391 * and incorrect scaling artifacts.
3393 for (test_pipe
= pipe_ctx
->top_pipe
; test_pipe
;
3394 test_pipe
= test_pipe
->top_pipe
) {
3395 // Skip invisible layer and pipe-split plane on same layer
3396 if (!test_pipe
->plane_state
||
3397 !test_pipe
->plane_state
->visible
||
3398 test_pipe
->plane_state
->layer_index
== cur_layer
)
3401 r2
= test_pipe
->plane_res
.scl_data
.recout
;
3402 r2_r
= r2
.x
+ r2
.width
;
3403 r2_b
= r2
.y
+ r2
.height
;
3404 split_pipe
= test_pipe
;
3407 * There is another half plane on same layer because of
3408 * pipe-split, merge together per same height.
3410 for (split_pipe
= pipe_ctx
->top_pipe
; split_pipe
;
3411 split_pipe
= split_pipe
->top_pipe
)
3412 if (split_pipe
->plane_state
->layer_index
== test_pipe
->plane_state
->layer_index
) {
3413 r2_half
= split_pipe
->plane_res
.scl_data
.recout
;
3414 r2
.x
= (r2_half
.x
< r2
.x
) ? r2_half
.x
: r2
.x
;
3415 r2
.width
= r2
.width
+ r2_half
.width
;
3416 r2_r
= r2
.x
+ r2
.width
;
3420 if (r1
.x
>= r2
.x
&& r1
.y
>= r2
.y
&& r1_r
<= r2_r
&& r1_b
<= r2_b
)
3427 void dcn10_set_cursor_position(struct pipe_ctx
*pipe_ctx
)
3429 struct dc_cursor_position pos_cpy
= pipe_ctx
->stream
->cursor_position
;
3430 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
3431 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
3432 struct dc_cursor_mi_param param
= {
3433 .pixel_clk_khz
= pipe_ctx
->stream
->timing
.pix_clk_100hz
/ 10,
3434 .ref_clk_khz
= pipe_ctx
->stream
->ctx
->dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
,
3435 .viewport
= pipe_ctx
->plane_res
.scl_data
.viewport
,
3436 .h_scale_ratio
= pipe_ctx
->plane_res
.scl_data
.ratios
.horz
,
3437 .v_scale_ratio
= pipe_ctx
->plane_res
.scl_data
.ratios
.vert
,
3438 .rotation
= pipe_ctx
->plane_state
->rotation
,
3439 .mirror
= pipe_ctx
->plane_state
->horizontal_mirror
3441 bool pipe_split_on
= false;
3442 bool odm_combine_on
= (pipe_ctx
->next_odm_pipe
!= NULL
) ||
3443 (pipe_ctx
->prev_odm_pipe
!= NULL
);
3445 int x_plane
= pipe_ctx
->plane_state
->dst_rect
.x
;
3446 int y_plane
= pipe_ctx
->plane_state
->dst_rect
.y
;
3447 int x_pos
= pos_cpy
.x
;
3448 int y_pos
= pos_cpy
.y
;
3450 if ((pipe_ctx
->top_pipe
!= NULL
) || (pipe_ctx
->bottom_pipe
!= NULL
)) {
3451 if ((pipe_ctx
->plane_state
->src_rect
.width
!= pipe_ctx
->plane_res
.scl_data
.viewport
.width
) ||
3452 (pipe_ctx
->plane_state
->src_rect
.height
!= pipe_ctx
->plane_res
.scl_data
.viewport
.height
)) {
3453 pipe_split_on
= true;
3458 * DC cursor is stream space, HW cursor is plane space and drawn
3459 * as part of the framebuffer.
3461 * Cursor position can't be negative, but hotspot can be used to
3462 * shift cursor out of the plane bounds. Hotspot must be smaller
3463 * than the cursor size.
3467 * Translate cursor from stream space to plane space.
3469 * If the cursor is scaled then we need to scale the position
3470 * to be in the approximately correct place. We can't do anything
3471 * about the actual size being incorrect, that's a limitation of
3474 if (param
.rotation
== ROTATION_ANGLE_90
|| param
.rotation
== ROTATION_ANGLE_270
) {
3475 x_pos
= (x_pos
- x_plane
) * pipe_ctx
->plane_state
->src_rect
.height
/
3476 pipe_ctx
->plane_state
->dst_rect
.width
;
3477 y_pos
= (y_pos
- y_plane
) * pipe_ctx
->plane_state
->src_rect
.width
/
3478 pipe_ctx
->plane_state
->dst_rect
.height
;
3480 x_pos
= (x_pos
- x_plane
) * pipe_ctx
->plane_state
->src_rect
.width
/
3481 pipe_ctx
->plane_state
->dst_rect
.width
;
3482 y_pos
= (y_pos
- y_plane
) * pipe_ctx
->plane_state
->src_rect
.height
/
3483 pipe_ctx
->plane_state
->dst_rect
.height
;
3487 * If the cursor's source viewport is clipped then we need to
3488 * translate the cursor to appear in the correct position on
3491 * This translation isn't affected by scaling so it needs to be
3492 * done *after* we adjust the position for the scale factor.
3494 * This is only done by opt-in for now since there are still
3495 * some usecases like tiled display that might enable the
3496 * cursor on both streams while expecting dc to clip it.
3498 if (pos_cpy
.translate_by_source
) {
3499 x_pos
+= pipe_ctx
->plane_state
->src_rect
.x
;
3500 y_pos
+= pipe_ctx
->plane_state
->src_rect
.y
;
3504 * If the position is negative then we need to add to the hotspot
3505 * to shift the cursor outside the plane.
3509 pos_cpy
.x_hotspot
-= x_pos
;
3514 pos_cpy
.y_hotspot
-= y_pos
;
3518 pos_cpy
.x
= (uint32_t)x_pos
;
3519 pos_cpy
.y
= (uint32_t)y_pos
;
3521 if (pipe_ctx
->plane_state
->address
.type
3522 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
)
3523 pos_cpy
.enable
= false;
3525 if (pos_cpy
.enable
&& dcn10_can_pipe_disable_cursor(pipe_ctx
))
3526 pos_cpy
.enable
= false;
3529 if (param
.rotation
== ROTATION_ANGLE_0
) {
3530 int viewport_width
=
3531 pipe_ctx
->plane_res
.scl_data
.viewport
.width
;
3533 pipe_ctx
->plane_res
.scl_data
.viewport
.x
;
3536 if (pipe_split_on
|| odm_combine_on
) {
3537 if (pos_cpy
.x
>= viewport_width
+ viewport_x
) {
3538 pos_cpy
.x
= 2 * viewport_width
3539 - pos_cpy
.x
+ 2 * viewport_x
;
3541 uint32_t temp_x
= pos_cpy
.x
;
3543 pos_cpy
.x
= 2 * viewport_x
- pos_cpy
.x
;
3544 if (temp_x
>= viewport_x
+
3545 (int)hubp
->curs_attr
.width
|| pos_cpy
.x
3546 <= (int)hubp
->curs_attr
.width
+
3547 pipe_ctx
->plane_state
->src_rect
.x
) {
3548 pos_cpy
.x
= temp_x
+ viewport_width
;
3552 pos_cpy
.x
= viewport_width
- pos_cpy
.x
+ 2 * viewport_x
;
3556 // Swap axis and mirror horizontally
3557 else if (param
.rotation
== ROTATION_ANGLE_90
) {
3558 uint32_t temp_x
= pos_cpy
.x
;
3560 pos_cpy
.x
= pipe_ctx
->plane_res
.scl_data
.viewport
.width
-
3561 (pos_cpy
.y
- pipe_ctx
->plane_res
.scl_data
.viewport
.x
) + pipe_ctx
->plane_res
.scl_data
.viewport
.x
;
3564 // Swap axis and mirror vertically
3565 else if (param
.rotation
== ROTATION_ANGLE_270
) {
3566 uint32_t temp_y
= pos_cpy
.y
;
3567 int viewport_height
=
3568 pipe_ctx
->plane_res
.scl_data
.viewport
.height
;
3570 pipe_ctx
->plane_res
.scl_data
.viewport
.y
;
3573 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3574 * For pipe split cases:
3575 * - apply offset of viewport.y to normalize pos_cpy.x
3576 * - calculate the pos_cpy.y as before
3577 * - shift pos_cpy.y back by same offset to get final value
3578 * - since we iterate through both pipes, use the lower
3579 * viewport.y for offset
3580 * For non pipe split cases, use the same calculation for
3581 * pos_cpy.y as the 180 degree rotation case below,
3582 * but use pos_cpy.x as our input because we are rotating
3585 if (pipe_split_on
|| odm_combine_on
) {
3586 int pos_cpy_x_offset
;
3587 int other_pipe_viewport_y
;
3589 if (pipe_split_on
) {
3590 if (pipe_ctx
->bottom_pipe
) {
3591 other_pipe_viewport_y
=
3592 pipe_ctx
->bottom_pipe
->plane_res
.scl_data
.viewport
.y
;
3594 other_pipe_viewport_y
=
3595 pipe_ctx
->top_pipe
->plane_res
.scl_data
.viewport
.y
;
3598 if (pipe_ctx
->next_odm_pipe
) {
3599 other_pipe_viewport_y
=
3600 pipe_ctx
->next_odm_pipe
->plane_res
.scl_data
.viewport
.y
;
3602 other_pipe_viewport_y
=
3603 pipe_ctx
->prev_odm_pipe
->plane_res
.scl_data
.viewport
.y
;
3606 pos_cpy_x_offset
= (viewport_y
> other_pipe_viewport_y
) ?
3607 other_pipe_viewport_y
: viewport_y
;
3608 pos_cpy
.x
-= pos_cpy_x_offset
;
3609 if (pos_cpy
.x
> viewport_height
) {
3610 pos_cpy
.x
= pos_cpy
.x
- viewport_height
;
3611 pos_cpy
.y
= viewport_height
- pos_cpy
.x
;
3613 pos_cpy
.y
= 2 * viewport_height
- pos_cpy
.x
;
3615 pos_cpy
.y
+= pos_cpy_x_offset
;
3617 pos_cpy
.y
= (2 * viewport_y
) + viewport_height
- pos_cpy
.x
;
3621 // Mirror horizontally and vertically
3622 else if (param
.rotation
== ROTATION_ANGLE_180
) {
3623 int viewport_width
=
3624 pipe_ctx
->plane_res
.scl_data
.viewport
.width
;
3626 pipe_ctx
->plane_res
.scl_data
.viewport
.x
;
3628 if (!param
.mirror
) {
3629 if (pipe_split_on
|| odm_combine_on
) {
3630 if (pos_cpy
.x
>= viewport_width
+ viewport_x
) {
3631 pos_cpy
.x
= 2 * viewport_width
3632 - pos_cpy
.x
+ 2 * viewport_x
;
3634 uint32_t temp_x
= pos_cpy
.x
;
3636 pos_cpy
.x
= 2 * viewport_x
- pos_cpy
.x
;
3637 if (temp_x
>= viewport_x
+
3638 (int)hubp
->curs_attr
.width
|| pos_cpy
.x
3639 <= (int)hubp
->curs_attr
.width
+
3640 pipe_ctx
->plane_state
->src_rect
.x
) {
3641 pos_cpy
.x
= 2 * viewport_width
- temp_x
;
3645 pos_cpy
.x
= viewport_width
- pos_cpy
.x
+ 2 * viewport_x
;
3650 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3652 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3653 * pos_cpy.y_new = viewport.y + delta_from_bottom
3655 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3657 pos_cpy
.y
= (2 * pipe_ctx
->plane_res
.scl_data
.viewport
.y
) +
3658 pipe_ctx
->plane_res
.scl_data
.viewport
.height
- pos_cpy
.y
;
3661 hubp
->funcs
->set_cursor_position(hubp
, &pos_cpy
, ¶m
);
3662 dpp
->funcs
->set_cursor_position(dpp
, &pos_cpy
, ¶m
, hubp
->curs_attr
.width
, hubp
->curs_attr
.height
);
3665 void dcn10_set_cursor_attribute(struct pipe_ctx
*pipe_ctx
)
3667 struct dc_cursor_attributes
*attributes
= &pipe_ctx
->stream
->cursor_attributes
;
3669 pipe_ctx
->plane_res
.hubp
->funcs
->set_cursor_attributes(
3670 pipe_ctx
->plane_res
.hubp
, attributes
);
3671 pipe_ctx
->plane_res
.dpp
->funcs
->set_cursor_attributes(
3672 pipe_ctx
->plane_res
.dpp
, attributes
);
3675 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx
*pipe_ctx
)
3677 uint32_t sdr_white_level
= pipe_ctx
->stream
->cursor_attributes
.sdr_white_level
;
3678 struct fixed31_32 multiplier
;
3679 struct dpp_cursor_attributes opt_attr
= { 0 };
3680 uint32_t hw_scale
= 0x3c00; // 1.0 default multiplier
3681 struct custom_float_format fmt
;
3683 if (!pipe_ctx
->plane_res
.dpp
->funcs
->set_optional_cursor_attributes
)
3686 fmt
.exponenta_bits
= 5;
3687 fmt
.mantissa_bits
= 10;
3690 if (sdr_white_level
> 80) {
3691 multiplier
= dc_fixpt_from_fraction(sdr_white_level
, 80);
3692 convert_to_custom_float_format(multiplier
, &fmt
, &hw_scale
);
3695 opt_attr
.scale
= hw_scale
;
3698 pipe_ctx
->plane_res
.dpp
->funcs
->set_optional_cursor_attributes(
3699 pipe_ctx
->plane_res
.dpp
, &opt_attr
);
3703 * apply_front_porch_workaround TODO FPGA still need?
3705 * This is a workaround for a bug that has existed since R5xx and has not been
3706 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3708 static void apply_front_porch_workaround(
3709 struct dc_crtc_timing
*timing
)
3711 if (timing
->flags
.INTERLACE
== 1) {
3712 if (timing
->v_front_porch
< 2)
3713 timing
->v_front_porch
= 2;
3715 if (timing
->v_front_porch
< 1)
3716 timing
->v_front_porch
= 1;
3720 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx
*pipe_ctx
)
3722 const struct dc_crtc_timing
*dc_crtc_timing
= &pipe_ctx
->stream
->timing
;
3723 struct dc_crtc_timing patched_crtc_timing
;
3724 int vesa_sync_start
;
3726 int interlace_factor
;
3728 patched_crtc_timing
= *dc_crtc_timing
;
3729 apply_front_porch_workaround(&patched_crtc_timing
);
3731 interlace_factor
= patched_crtc_timing
.flags
.INTERLACE
? 2 : 1;
3733 vesa_sync_start
= patched_crtc_timing
.v_addressable
+
3734 patched_crtc_timing
.v_border_bottom
+
3735 patched_crtc_timing
.v_front_porch
;
3737 asic_blank_end
= (patched_crtc_timing
.v_total
-
3739 patched_crtc_timing
.v_border_top
)
3742 return asic_blank_end
-
3743 pipe_ctx
->pipe_dlg_param
.vstartup_start
+ 1;
3746 void dcn10_calc_vupdate_position(
3748 struct pipe_ctx
*pipe_ctx
,
3749 uint32_t *start_line
,
3752 const struct dc_crtc_timing
*timing
= &pipe_ctx
->stream
->timing
;
3753 int vupdate_pos
= dc
->hwss
.get_vupdate_offset_from_vsync(pipe_ctx
);
3755 if (vupdate_pos
>= 0)
3756 *start_line
= vupdate_pos
- ((vupdate_pos
/ timing
->v_total
) * timing
->v_total
);
3758 *start_line
= vupdate_pos
+ ((-vupdate_pos
/ timing
->v_total
) + 1) * timing
->v_total
- 1;
3759 *end_line
= (*start_line
+ 2) % timing
->v_total
;
3762 static void dcn10_cal_vline_position(
3764 struct pipe_ctx
*pipe_ctx
,
3765 uint32_t *start_line
,
3768 const struct dc_crtc_timing
*timing
= &pipe_ctx
->stream
->timing
;
3769 int vline_pos
= pipe_ctx
->stream
->periodic_interrupt
.lines_offset
;
3771 if (pipe_ctx
->stream
->periodic_interrupt
.ref_point
== START_V_UPDATE
) {
3774 else if (vline_pos
< 0)
3777 vline_pos
+= dc
->hwss
.get_vupdate_offset_from_vsync(pipe_ctx
);
3779 *start_line
= vline_pos
- ((vline_pos
/ timing
->v_total
) * timing
->v_total
);
3781 *start_line
= vline_pos
+ ((-vline_pos
/ timing
->v_total
) + 1) * timing
->v_total
- 1;
3782 *end_line
= (*start_line
+ 2) % timing
->v_total
;
3783 } else if (pipe_ctx
->stream
->periodic_interrupt
.ref_point
== START_V_SYNC
) {
3784 // vsync is line 0 so start_line is just the requested line offset
3785 *start_line
= vline_pos
;
3786 *end_line
= (*start_line
+ 2) % timing
->v_total
;
3791 void dcn10_setup_periodic_interrupt(
3793 struct pipe_ctx
*pipe_ctx
)
3795 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3796 uint32_t start_line
= 0;
3797 uint32_t end_line
= 0;
3799 dcn10_cal_vline_position(dc
, pipe_ctx
, &start_line
, &end_line
);
3801 tg
->funcs
->setup_vertical_interrupt0(tg
, start_line
, end_line
);
3804 void dcn10_setup_vupdate_interrupt(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
3806 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3807 int start_line
= dc
->hwss
.get_vupdate_offset_from_vsync(pipe_ctx
);
3809 if (start_line
< 0) {
3814 if (tg
->funcs
->setup_vertical_interrupt2
)
3815 tg
->funcs
->setup_vertical_interrupt2(tg
, start_line
);
3818 void dcn10_unblank_stream(struct pipe_ctx
*pipe_ctx
,
3819 struct dc_link_settings
*link_settings
)
3821 struct encoder_unblank_param params
= {0};
3822 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
3823 struct dc_link
*link
= stream
->link
;
3824 struct dce_hwseq
*hws
= link
->dc
->hwseq
;
3826 /* only 3 items below are used by unblank */
3827 params
.timing
= pipe_ctx
->stream
->timing
;
3829 params
.link_settings
.link_rate
= link_settings
->link_rate
;
3831 if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
3832 if (params
.timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3833 params
.timing
.pix_clk_100hz
/= 2;
3834 pipe_ctx
->stream_res
.stream_enc
->funcs
->dp_unblank(link
, pipe_ctx
->stream_res
.stream_enc
, ¶ms
);
3837 if (link
->local_sink
&& link
->local_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
3838 hws
->funcs
.edp_backlight_control(link
, true);
3842 void dcn10_send_immediate_sdp_message(struct pipe_ctx
*pipe_ctx
,
3843 const uint8_t *custom_sdp_message
,
3844 unsigned int sdp_message_size
)
3846 if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
3847 pipe_ctx
->stream_res
.stream_enc
->funcs
->send_immediate_sdp_message(
3848 pipe_ctx
->stream_res
.stream_enc
,
3853 enum dc_status
dcn10_set_clock(struct dc
*dc
,
3854 enum dc_clock_type clock_type
,
3858 struct dc_state
*context
= dc
->current_state
;
3859 struct dc_clock_config clock_cfg
= {0};
3860 struct dc_clocks
*current_clocks
= &context
->bw_ctx
.bw
.dcn
.clk
;
3862 if (!dc
->clk_mgr
|| !dc
->clk_mgr
->funcs
->get_clock
)
3863 return DC_FAIL_UNSUPPORTED_1
;
3865 dc
->clk_mgr
->funcs
->get_clock(dc
->clk_mgr
,
3866 context
, clock_type
, &clock_cfg
);
3868 if (clk_khz
> clock_cfg
.max_clock_khz
)
3869 return DC_FAIL_CLK_EXCEED_MAX
;
3871 if (clk_khz
< clock_cfg
.min_clock_khz
)
3872 return DC_FAIL_CLK_BELOW_MIN
;
3874 if (clk_khz
< clock_cfg
.bw_requirequired_clock_khz
)
3875 return DC_FAIL_CLK_BELOW_CFG_REQUIRED
;
3877 /*update internal request clock for update clock use*/
3878 if (clock_type
== DC_CLOCK_TYPE_DISPCLK
)
3879 current_clocks
->dispclk_khz
= clk_khz
;
3880 else if (clock_type
== DC_CLOCK_TYPE_DPPCLK
)
3881 current_clocks
->dppclk_khz
= clk_khz
;
3883 return DC_ERROR_UNEXPECTED
;
3885 if (dc
->clk_mgr
->funcs
->update_clocks
)
3886 dc
->clk_mgr
->funcs
->update_clocks(dc
->clk_mgr
,
3892 void dcn10_get_clock(struct dc
*dc
,
3893 enum dc_clock_type clock_type
,
3894 struct dc_clock_config
*clock_cfg
)
3896 struct dc_state
*context
= dc
->current_state
;
3898 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->get_clock
)
3899 dc
->clk_mgr
->funcs
->get_clock(dc
->clk_mgr
, context
, clock_type
, clock_cfg
);
3903 void dcn10_get_dcc_en_bits(struct dc
*dc
, int *dcc_en_bits
)
3905 struct resource_pool
*pool
= dc
->res_pool
;
3908 for (i
= 0; i
< pool
->pipe_count
; i
++) {
3909 struct hubp
*hubp
= pool
->hubps
[i
];
3910 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(hubp
)->state
);
3912 hubp
->funcs
->hubp_read_state(hubp
);
3915 dcc_en_bits
[i
] = s
->dcc_en
? 1 : 0;