]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drm/amd/display: Fix 4to1 MPC black screen with DPP RCO
[thirdparty/kernel/stable.git] / drivers / gpu / drm / amd / display / dc / dcn10 / dcn10_hw_sequencer.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59
60 #define DC_LOGGER_INIT(logger)
61
62 #define CTX \
63 hws->ctx
64 #define REG(reg)\
65 hws->regs->reg
66
67 #undef FN
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
70
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
74
75 #define GAMMA_HW_POINTS_NUM 256
76
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79
80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
82 uint32_t ref_cycle)
83 {
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87
88 DTN_INFO(" %11d.%03d",
89 us_x10 / frac,
90 us_x10 % frac);
91 }
92
93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
95 bool lock)
96 {
97 struct pipe_ctx *pipe_ctx;
98 struct pipe_ctx *old_pipe_ctx;
99 struct timing_generator *tg;
100 int i;
101
102 for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
106
107 /*
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
110 */
111 if (pipe_ctx->top_pipe ||
112 !pipe_ctx->stream ||
113 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 !tg->funcs->is_tg_enabled(tg))
115 continue;
116
117 if (lock)
118 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
119 else
120 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 }
122 }
123
124 static void log_mpc_crc(struct dc *dc,
125 struct dc_log_buffer_ctx *log_ctx)
126 {
127 struct dc_context *dc_ctx = dc->ctx;
128 struct dce_hwseq *hws = dc->hwseq;
129
130 if (REG(MPC_CRC_RESULT_GB))
131 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
136 }
137
138 static void dcn10_log_hubbub_state(struct dc *dc,
139 struct dc_log_buffer_ctx *log_ctx)
140 {
141 struct dc_context *dc_ctx = dc->ctx;
142 struct dcn_hubbub_wm wm;
143 int i;
144
145 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
147
148 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
149 " sr_enter sr_exit dram_clk_change\n");
150
151 for (i = 0; i < 4; i++) {
152 struct dcn_hubbub_wm_set *s;
153
154 s = &wm.sets[i];
155 DTN_INFO("WM_Set[%d]:", s->wm_set);
156 DTN_INFO_MICRO_SEC(s->data_urgent);
157 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 DTN_INFO_MICRO_SEC(s->sr_enter);
159 DTN_INFO_MICRO_SEC(s->sr_exit);
160 DTN_INFO_MICRO_SEC(s->dram_clk_change);
161 DTN_INFO("\n");
162 }
163
164 DTN_INFO("\n");
165 }
166
167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
168 {
169 struct dc_context *dc_ctx = dc->ctx;
170 struct resource_pool *pool = dc->res_pool;
171 int i;
172
173 DTN_INFO(
174 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
175 for (i = 0; i < pool->pipe_count; i++) {
176 struct hubp *hubp = pool->hubps[i];
177 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
178
179 hubp->funcs->hubp_read_state(hubp);
180
181 if (!s->blank_en) {
182 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
183 hubp->inst,
184 s->pixel_format,
185 s->inuse_addr_hi,
186 s->viewport_width,
187 s->viewport_height,
188 s->rotation_angle,
189 s->h_mirror_en,
190 s->sw_mode,
191 s->dcc_en,
192 s->blank_en,
193 s->clock_en,
194 s->ttu_disable,
195 s->underflow_status);
196 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
199 DTN_INFO("\n");
200 }
201 }
202
203 DTN_INFO("\n=========RQ========\n");
204 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
207 for (i = 0; i < pool->pipe_count; i++) {
208 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
210
211 if (!s->blank_en)
212 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
213 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
222 }
223
224 DTN_INFO("========DLG========\n");
225 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
226 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
227 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
228 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
229 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
230 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
231 " x_rp_dlay x_rr_sfl\n");
232 for (i = 0; i < pool->pipe_count; i++) {
233 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
235
236 if (!s->blank_en)
237 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
240 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 dlg_regs->xfc_reg_remote_surface_flip_latency);
258 }
259
260 DTN_INFO("========TTU========\n");
261 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
262 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
263 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
264 for (i = 0; i < pool->pipe_count; i++) {
265 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
267
268 if (!s->blank_en)
269 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
270 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
277 }
278 DTN_INFO("\n");
279 }
280
281 void dcn10_log_hw_state(struct dc *dc,
282 struct dc_log_buffer_ctx *log_ctx)
283 {
284 struct dc_context *dc_ctx = dc->ctx;
285 struct resource_pool *pool = dc->res_pool;
286 int i;
287
288 DTN_INFO_BEGIN();
289
290 dcn10_log_hubbub_state(dc, log_ctx);
291
292 dcn10_log_hubp_states(dc, log_ctx);
293
294 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
295 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
296 "C31 C32 C33 C34\n");
297 for (i = 0; i < pool->pipe_count; i++) {
298 struct dpp *dpp = pool->dpps[i];
299 struct dcn_dpp_state s = {0};
300
301 dpp->funcs->dpp_read_state(dpp, &s);
302
303 if (!s.is_enabled)
304 continue;
305
306 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
307 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
308 dpp->inst,
309 s.igam_input_format,
310 (s.igam_lut_mode == 0) ? "BypassFixed" :
311 ((s.igam_lut_mode == 1) ? "BypassFloat" :
312 ((s.igam_lut_mode == 2) ? "RAM" :
313 ((s.igam_lut_mode == 3) ? "RAM" :
314 "Unknown"))),
315 (s.dgam_lut_mode == 0) ? "Bypass" :
316 ((s.dgam_lut_mode == 1) ? "sRGB" :
317 ((s.dgam_lut_mode == 2) ? "Ycc" :
318 ((s.dgam_lut_mode == 3) ? "RAM" :
319 ((s.dgam_lut_mode == 4) ? "RAM" :
320 "Unknown")))),
321 (s.rgam_lut_mode == 0) ? "Bypass" :
322 ((s.rgam_lut_mode == 1) ? "sRGB" :
323 ((s.rgam_lut_mode == 2) ? "Ycc" :
324 ((s.rgam_lut_mode == 3) ? "RAM" :
325 ((s.rgam_lut_mode == 4) ? "RAM" :
326 "Unknown")))),
327 s.gamut_remap_mode,
328 s.gamut_remap_c11_c12,
329 s.gamut_remap_c13_c14,
330 s.gamut_remap_c21_c22,
331 s.gamut_remap_c23_c24,
332 s.gamut_remap_c31_c32,
333 s.gamut_remap_c33_c34);
334 DTN_INFO("\n");
335 }
336 DTN_INFO("\n");
337
338 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
339 for (i = 0; i < pool->pipe_count; i++) {
340 struct mpcc_state s = {0};
341
342 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
343 if (s.opp_id != 0xf)
344 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
345 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
347 s.idle);
348 }
349 DTN_INFO("\n");
350
351 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
352
353 for (i = 0; i < pool->timing_generator_count; i++) {
354 struct timing_generator *tg = pool->timing_generators[i];
355 struct dcn_otg_state s = {0};
356 /* Read shared OTG state registers for all DCNx */
357 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
358
359 /*
360 * For DCN2 and greater, a register on the OPP is used to
361 * determine if the CRTC is blanked instead of the OTG. So use
362 * dpg_is_blanked() if exists, otherwise fallback on otg.
363 *
364 * TODO: Implement DCN-specific read_otg_state hooks.
365 */
366 if (pool->opps[i]->funcs->dpg_is_blanked)
367 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
368 else
369 s.blank_enabled = tg->funcs->is_blanked(tg);
370
371 //only print if OTG master is enabled
372 if ((s.otg_enabled & 1) == 0)
373 continue;
374
375 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
376 tg->inst,
377 s.v_blank_start,
378 s.v_blank_end,
379 s.v_sync_a_start,
380 s.v_sync_a_end,
381 s.v_sync_a_pol,
382 s.v_total_max,
383 s.v_total_min,
384 s.v_total_max_sel,
385 s.v_total_min_sel,
386 s.h_blank_start,
387 s.h_blank_end,
388 s.h_sync_a_start,
389 s.h_sync_a_end,
390 s.h_sync_a_pol,
391 s.h_total,
392 s.v_total,
393 s.underflow_occurred_status,
394 s.blank_enabled);
395
396 // Clear underflow for debug purposes
397 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 // This function is called only from Windows or Diags test environment, hence it's safe to clear
399 // it from here without affecting the original intent.
400 tg->funcs->clear_optc_underflow(tg);
401 }
402 DTN_INFO("\n");
403
404 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 // TODO: Update golden log header to reflect this name change
406 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
407 for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 struct display_stream_compressor *dsc = pool->dscs[i];
409 struct dcn_dsc_state s = {0};
410
411 dsc->funcs->dsc_read_state(dsc, &s);
412 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 dsc->inst,
414 s.dsc_clock_en,
415 s.dsc_slice_width,
416 s.dsc_bits_per_pixel);
417 DTN_INFO("\n");
418 }
419 DTN_INFO("\n");
420
421 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
422 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
423 for (i = 0; i < pool->stream_enc_count; i++) {
424 struct stream_encoder *enc = pool->stream_enc[i];
425 struct enc_state s = {0};
426
427 if (enc->funcs->enc_read_state) {
428 enc->funcs->enc_read_state(enc, &s);
429 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
430 enc->id,
431 s.dsc_mode,
432 s.sec_gsp_pps_line_num,
433 s.vbid6_line_reference,
434 s.vbid6_line_num,
435 s.sec_gsp_pps_enable,
436 s.sec_stream_enable);
437 DTN_INFO("\n");
438 }
439 }
440 DTN_INFO("\n");
441
442 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
443 for (i = 0; i < dc->link_count; i++) {
444 struct link_encoder *lenc = dc->links[i]->link_enc;
445
446 struct link_enc_state s = {0};
447
448 if (lenc && lenc->funcs->read_state) {
449 lenc->funcs->read_state(lenc, &s);
450 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
451 i,
452 s.dphy_fec_en,
453 s.dphy_fec_ready_shadow,
454 s.dphy_fec_active_status,
455 s.dp_link_training_complete);
456 DTN_INFO("\n");
457 }
458 }
459 DTN_INFO("\n");
460
461 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
462 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
463 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
470
471 log_mpc_crc(dc, log_ctx);
472
473 {
474 if (pool->hpo_dp_stream_enc_count > 0) {
475 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
476 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
479
480 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
482
483 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
484 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 hpo_dp_se_state.stream_enc_enabled,
486 hpo_dp_se_state.otg_inst,
487 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 (hpo_dp_se_state.component_depth == 0) ? 6 :
491 ((hpo_dp_se_state.component_depth == 1) ? 8 :
492 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 hpo_dp_se_state.vid_stream_enabled,
494 hpo_dp_se_state.sdp_enabled,
495 hpo_dp_se_state.compressed_format,
496 hpo_dp_se_state.mapped_to_link_enc);
497 }
498 }
499
500 DTN_INFO("\n");
501 }
502
503 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 if (pool->hpo_dp_link_enc_count) {
505 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
506
507 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
510
511 if (hpo_dp_link_enc->funcs->read_state) {
512 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
514 hpo_dp_link_enc->inst,
515 hpo_dp_le_state.link_enc_enabled,
516 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 hpo_dp_le_state.lane_count,
520 hpo_dp_le_state.stream_src[0],
521 hpo_dp_le_state.slot_count[0],
522 hpo_dp_le_state.vc_rate_x[0],
523 hpo_dp_le_state.vc_rate_y[0]);
524 DTN_INFO("\n");
525 }
526 }
527
528 DTN_INFO("\n");
529 }
530 }
531
532 DTN_INFO_END();
533 }
534
535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
536 {
537 struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 struct timing_generator *tg = pipe_ctx->stream_res.tg;
539
540 if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 tg->funcs->clear_optc_underflow(tg);
542 return true;
543 }
544
545 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 hubp->funcs->hubp_clear_underflow(hubp);
547 return true;
548 }
549 return false;
550 }
551
552 void dcn10_enable_power_gating_plane(
553 struct dce_hwseq *hws,
554 bool enable)
555 {
556 bool force_on = true; /* disable power gating */
557
558 if (enable)
559 force_on = false;
560
561 /* DCHUBP0/1/2/3 */
562 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
566
567 /* DPP0/1/2/3 */
568 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
572 }
573
574 void dcn10_disable_vga(
575 struct dce_hwseq *hws)
576 {
577 unsigned int in_vga1_mode = 0;
578 unsigned int in_vga2_mode = 0;
579 unsigned int in_vga3_mode = 0;
580 unsigned int in_vga4_mode = 0;
581
582 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
586
587 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 in_vga3_mode == 0 && in_vga4_mode == 0)
589 return;
590
591 REG_WRITE(D1VGA_CONTROL, 0);
592 REG_WRITE(D2VGA_CONTROL, 0);
593 REG_WRITE(D3VGA_CONTROL, 0);
594 REG_WRITE(D4VGA_CONTROL, 0);
595
596 /* HW Engineer's Notes:
597 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
599 *
600 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 * VGA_TEST_ENABLE, to leave it in the same state as before.
602 */
603 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 }
606
607 /**
608 * dcn10_dpp_pg_control - DPP power gate control.
609 *
610 * @hws: dce_hwseq reference.
611 * @dpp_inst: DPP instance reference.
612 * @power_on: true if we want to enable power gate, false otherwise.
613 *
614 * Enable or disable power gate in the specific DPP instance.
615 */
616 void dcn10_dpp_pg_control(
617 struct dce_hwseq *hws,
618 unsigned int dpp_inst,
619 bool power_on)
620 {
621 uint32_t power_gate = power_on ? 0 : 1;
622 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
623
624 if (hws->ctx->dc->debug.disable_dpp_power_gate)
625 return;
626 if (REG(DOMAIN1_PG_CONFIG) == 0)
627 return;
628
629 switch (dpp_inst) {
630 case 0: /* DPP0 */
631 REG_UPDATE(DOMAIN1_PG_CONFIG,
632 DOMAIN1_POWER_GATE, power_gate);
633
634 REG_WAIT(DOMAIN1_PG_STATUS,
635 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 1, 1000);
637 break;
638 case 1: /* DPP1 */
639 REG_UPDATE(DOMAIN3_PG_CONFIG,
640 DOMAIN3_POWER_GATE, power_gate);
641
642 REG_WAIT(DOMAIN3_PG_STATUS,
643 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 1, 1000);
645 break;
646 case 2: /* DPP2 */
647 REG_UPDATE(DOMAIN5_PG_CONFIG,
648 DOMAIN5_POWER_GATE, power_gate);
649
650 REG_WAIT(DOMAIN5_PG_STATUS,
651 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 1, 1000);
653 break;
654 case 3: /* DPP3 */
655 REG_UPDATE(DOMAIN7_PG_CONFIG,
656 DOMAIN7_POWER_GATE, power_gate);
657
658 REG_WAIT(DOMAIN7_PG_STATUS,
659 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
660 1, 1000);
661 break;
662 default:
663 BREAK_TO_DEBUGGER();
664 break;
665 }
666 }
667
668 /**
669 * dcn10_hubp_pg_control - HUBP power gate control.
670 *
671 * @hws: dce_hwseq reference.
672 * @hubp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
674 *
675 * Enable or disable power gate in the specific HUBP instance.
676 */
677 void dcn10_hubp_pg_control(
678 struct dce_hwseq *hws,
679 unsigned int hubp_inst,
680 bool power_on)
681 {
682 uint32_t power_gate = power_on ? 0 : 1;
683 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684
685 if (hws->ctx->dc->debug.disable_hubp_power_gate)
686 return;
687 if (REG(DOMAIN0_PG_CONFIG) == 0)
688 return;
689
690 switch (hubp_inst) {
691 case 0: /* DCHUBP0 */
692 REG_UPDATE(DOMAIN0_PG_CONFIG,
693 DOMAIN0_POWER_GATE, power_gate);
694
695 REG_WAIT(DOMAIN0_PG_STATUS,
696 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
697 1, 1000);
698 break;
699 case 1: /* DCHUBP1 */
700 REG_UPDATE(DOMAIN2_PG_CONFIG,
701 DOMAIN2_POWER_GATE, power_gate);
702
703 REG_WAIT(DOMAIN2_PG_STATUS,
704 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
705 1, 1000);
706 break;
707 case 2: /* DCHUBP2 */
708 REG_UPDATE(DOMAIN4_PG_CONFIG,
709 DOMAIN4_POWER_GATE, power_gate);
710
711 REG_WAIT(DOMAIN4_PG_STATUS,
712 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
713 1, 1000);
714 break;
715 case 3: /* DCHUBP3 */
716 REG_UPDATE(DOMAIN6_PG_CONFIG,
717 DOMAIN6_POWER_GATE, power_gate);
718
719 REG_WAIT(DOMAIN6_PG_STATUS,
720 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
721 1, 1000);
722 break;
723 default:
724 BREAK_TO_DEBUGGER();
725 break;
726 }
727 }
728
729 static void power_on_plane_resources(
730 struct dce_hwseq *hws,
731 int plane_id)
732 {
733 DC_LOGGER_INIT(hws->ctx->logger);
734
735 if (hws->funcs.dpp_root_clock_control)
736 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
737
738 if (REG(DC_IP_REQUEST_CNTL)) {
739 REG_SET(DC_IP_REQUEST_CNTL, 0,
740 IP_REQUEST_EN, 1);
741
742 if (hws->funcs.dpp_pg_control)
743 hws->funcs.dpp_pg_control(hws, plane_id, true);
744
745 if (hws->funcs.hubp_pg_control)
746 hws->funcs.hubp_pg_control(hws, plane_id, true);
747
748 REG_SET(DC_IP_REQUEST_CNTL, 0,
749 IP_REQUEST_EN, 0);
750 DC_LOG_DEBUG(
751 "Un-gated front end for pipe %d\n", plane_id);
752 }
753 }
754
755 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
756 {
757 struct dce_hwseq *hws = dc->hwseq;
758 struct hubp *hubp = dc->res_pool->hubps[0];
759
760 if (!hws->wa_state.DEGVIDCN10_253_applied)
761 return;
762
763 hubp->funcs->set_blank(hubp, true);
764
765 REG_SET(DC_IP_REQUEST_CNTL, 0,
766 IP_REQUEST_EN, 1);
767
768 hws->funcs.hubp_pg_control(hws, 0, false);
769 REG_SET(DC_IP_REQUEST_CNTL, 0,
770 IP_REQUEST_EN, 0);
771
772 hws->wa_state.DEGVIDCN10_253_applied = false;
773 }
774
775 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
776 {
777 struct dce_hwseq *hws = dc->hwseq;
778 struct hubp *hubp = dc->res_pool->hubps[0];
779 int i;
780
781 if (dc->debug.disable_stutter)
782 return;
783
784 if (!hws->wa.DEGVIDCN10_253)
785 return;
786
787 for (i = 0; i < dc->res_pool->pipe_count; i++) {
788 if (!dc->res_pool->hubps[i]->power_gated)
789 return;
790 }
791
792 /* all pipe power gated, apply work around to enable stutter. */
793
794 REG_SET(DC_IP_REQUEST_CNTL, 0,
795 IP_REQUEST_EN, 1);
796
797 hws->funcs.hubp_pg_control(hws, 0, true);
798 REG_SET(DC_IP_REQUEST_CNTL, 0,
799 IP_REQUEST_EN, 0);
800
801 hubp->funcs->set_hubp_blank_en(hubp, false);
802 hws->wa_state.DEGVIDCN10_253_applied = true;
803 }
804
805 void dcn10_bios_golden_init(struct dc *dc)
806 {
807 struct dce_hwseq *hws = dc->hwseq;
808 struct dc_bios *bp = dc->ctx->dc_bios;
809 int i;
810 bool allow_self_fresh_force_enable = true;
811
812 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
813 return;
814
815 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
816 allow_self_fresh_force_enable =
817 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
818
819
820 /* WA for making DF sleep when idle after resume from S0i3.
821 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
822 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
823 * before calling command table and it changed to 1 after,
824 * it should be set back to 0.
825 */
826
827 /* initialize dcn global */
828 bp->funcs->enable_disp_power_gating(bp,
829 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
830
831 for (i = 0; i < dc->res_pool->pipe_count; i++) {
832 /* initialize dcn per pipe */
833 bp->funcs->enable_disp_power_gating(bp,
834 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
835 }
836
837 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
838 if (allow_self_fresh_force_enable == false &&
839 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
840 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
841 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
842
843 }
844
845 static void false_optc_underflow_wa(
846 struct dc *dc,
847 const struct dc_stream_state *stream,
848 struct timing_generator *tg)
849 {
850 int i;
851 bool underflow;
852
853 if (!dc->hwseq->wa.false_optc_underflow)
854 return;
855
856 underflow = tg->funcs->is_optc_underflow_occurred(tg);
857
858 for (i = 0; i < dc->res_pool->pipe_count; i++) {
859 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
860
861 if (old_pipe_ctx->stream != stream)
862 continue;
863
864 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
865 }
866
867 if (tg->funcs->set_blank_data_double_buffer)
868 tg->funcs->set_blank_data_double_buffer(tg, true);
869
870 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
871 tg->funcs->clear_optc_underflow(tg);
872 }
873
874 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
875 {
876 struct pipe_ctx *other_pipe;
877 int vready_offset = pipe->pipe_dlg_param.vready_offset;
878
879 /* Always use the largest vready_offset of all connected pipes */
880 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
881 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
882 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
883 }
884 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
885 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
887 }
888 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
889 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
891 }
892 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
893 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
894 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
895 }
896
897 return vready_offset;
898 }
899
900 enum dc_status dcn10_enable_stream_timing(
901 struct pipe_ctx *pipe_ctx,
902 struct dc_state *context,
903 struct dc *dc)
904 {
905 struct dc_stream_state *stream = pipe_ctx->stream;
906 enum dc_color_space color_space;
907 struct tg_color black_color = {0};
908
909 /* by upper caller loop, pipe0 is parent pipe and be called first.
910 * back end is set up by for pipe0. Other children pipe share back end
911 * with pipe 0. No program is needed.
912 */
913 if (pipe_ctx->top_pipe != NULL)
914 return DC_OK;
915
916 /* TODO check if timing_changed, disable stream if timing changed */
917
918 /* HW program guide assume display already disable
919 * by unplug sequence. OTG assume stop.
920 */
921 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
922
923 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
924 pipe_ctx->clock_source,
925 &pipe_ctx->stream_res.pix_clk_params,
926 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
927 &pipe_ctx->pll_settings)) {
928 BREAK_TO_DEBUGGER();
929 return DC_ERROR_UNEXPECTED;
930 }
931
932 if (dc_is_hdmi_tmds_signal(stream->signal)) {
933 stream->link->phy_state.symclk_ref_cnts.otg = 1;
934 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
935 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
936 else
937 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
938 }
939
940 pipe_ctx->stream_res.tg->funcs->program_timing(
941 pipe_ctx->stream_res.tg,
942 &stream->timing,
943 calculate_vready_offset_for_group(pipe_ctx),
944 pipe_ctx->pipe_dlg_param.vstartup_start,
945 pipe_ctx->pipe_dlg_param.vupdate_offset,
946 pipe_ctx->pipe_dlg_param.vupdate_width,
947 pipe_ctx->stream->signal,
948 true);
949
950 #if 0 /* move to after enable_crtc */
951 /* TODO: OPP FMT, ABM. etc. should be done here. */
952 /* or FPGA now. instance 0 only. TODO: move to opp.c */
953
954 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
955
956 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
957 pipe_ctx->stream_res.opp,
958 &stream->bit_depth_params,
959 &stream->clamping);
960 #endif
961 /* program otg blank color */
962 color_space = stream->output_color_space;
963 color_space_to_black_color(dc, color_space, &black_color);
964
965 /*
966 * The way 420 is packed, 2 channels carry Y component, 1 channel
967 * alternate between Cb and Cr, so both channels need the pixel
968 * value for Y
969 */
970 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
971 black_color.color_r_cr = black_color.color_g_y;
972
973 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
974 pipe_ctx->stream_res.tg->funcs->set_blank_color(
975 pipe_ctx->stream_res.tg,
976 &black_color);
977
978 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
979 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
980 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
981 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
982 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
983 }
984
985 /* VTG is within DCHUB command block. DCFCLK is always on */
986 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
987 BREAK_TO_DEBUGGER();
988 return DC_ERROR_UNEXPECTED;
989 }
990
991 /* TODO program crtc source select for non-virtual signal*/
992 /* TODO program FMT */
993 /* TODO setup link_enc */
994 /* TODO set stream attributes */
995 /* TODO program audio */
996 /* TODO enable stream if timing changed */
997 /* TODO unblank stream if DP */
998
999 return DC_OK;
1000 }
1001
1002 static void dcn10_reset_back_end_for_pipe(
1003 struct dc *dc,
1004 struct pipe_ctx *pipe_ctx,
1005 struct dc_state *context)
1006 {
1007 int i;
1008 struct dc_link *link;
1009 DC_LOGGER_INIT(dc->ctx->logger);
1010 if (pipe_ctx->stream_res.stream_enc == NULL) {
1011 pipe_ctx->stream = NULL;
1012 return;
1013 }
1014
1015 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1016 link = pipe_ctx->stream->link;
1017 /* DPMS may already disable or */
1018 /* dpms_off status is incorrect due to fastboot
1019 * feature. When system resume from S4 with second
1020 * screen only, the dpms_off would be true but
1021 * VBIOS lit up eDP, so check link status too.
1022 */
1023 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1024 dc->link_srv->set_dpms_off(pipe_ctx);
1025 else if (pipe_ctx->stream_res.audio)
1026 dc->hwss.disable_audio_stream(pipe_ctx);
1027
1028 if (pipe_ctx->stream_res.audio) {
1029 /*disable az_endpoint*/
1030 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1031
1032 /*free audio*/
1033 if (dc->caps.dynamic_audio == true) {
1034 /*we have to dynamic arbitrate the audio endpoints*/
1035 /*we free the resource, need reset is_audio_acquired*/
1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1037 pipe_ctx->stream_res.audio, false);
1038 pipe_ctx->stream_res.audio = NULL;
1039 }
1040 }
1041 }
1042
1043 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1044 * back end share by all pipes and will be disable only when disable
1045 * parent pipe.
1046 */
1047 if (pipe_ctx->top_pipe == NULL) {
1048
1049 if (pipe_ctx->stream_res.abm)
1050 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1051
1052 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1053
1054 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1055 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1056 pipe_ctx->stream_res.tg->funcs->set_drr(
1057 pipe_ctx->stream_res.tg, NULL);
1058 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1059 }
1060
1061 for (i = 0; i < dc->res_pool->pipe_count; i++)
1062 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1063 break;
1064
1065 if (i == dc->res_pool->pipe_count)
1066 return;
1067
1068 pipe_ctx->stream = NULL;
1069 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1070 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1071 }
1072
1073 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1074 {
1075 struct hubp *hubp ;
1076 unsigned int i;
1077 bool need_recover = true;
1078
1079 if (!dc->debug.recovery_enabled)
1080 return false;
1081
1082 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1083 struct pipe_ctx *pipe_ctx =
1084 &dc->current_state->res_ctx.pipe_ctx[i];
1085 if (pipe_ctx != NULL) {
1086 hubp = pipe_ctx->plane_res.hubp;
1087 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1088 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1089 /* one pipe underflow, we will reset all the pipes*/
1090 need_recover = true;
1091 }
1092 }
1093 }
1094 }
1095 if (!need_recover)
1096 return false;
1097 /*
1098 DCHUBP_CNTL:HUBP_BLANK_EN=1
1099 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1100 DCHUBP_CNTL:HUBP_DISABLE=1
1101 DCHUBP_CNTL:HUBP_DISABLE=0
1102 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1103 DCSURF_PRIMARY_SURFACE_ADDRESS
1104 DCHUBP_CNTL:HUBP_BLANK_EN=0
1105 */
1106
1107 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1108 struct pipe_ctx *pipe_ctx =
1109 &dc->current_state->res_ctx.pipe_ctx[i];
1110 if (pipe_ctx != NULL) {
1111 hubp = pipe_ctx->plane_res.hubp;
1112 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1113 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1114 hubp->funcs->set_hubp_blank_en(hubp, true);
1115 }
1116 }
1117 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1118 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1119
1120 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1121 struct pipe_ctx *pipe_ctx =
1122 &dc->current_state->res_ctx.pipe_ctx[i];
1123 if (pipe_ctx != NULL) {
1124 hubp = pipe_ctx->plane_res.hubp;
1125 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1126 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1127 hubp->funcs->hubp_disable_control(hubp, true);
1128 }
1129 }
1130 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1131 struct pipe_ctx *pipe_ctx =
1132 &dc->current_state->res_ctx.pipe_ctx[i];
1133 if (pipe_ctx != NULL) {
1134 hubp = pipe_ctx->plane_res.hubp;
1135 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1136 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1137 hubp->funcs->hubp_disable_control(hubp, true);
1138 }
1139 }
1140 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1141 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1142 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1143 struct pipe_ctx *pipe_ctx =
1144 &dc->current_state->res_ctx.pipe_ctx[i];
1145 if (pipe_ctx != NULL) {
1146 hubp = pipe_ctx->plane_res.hubp;
1147 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1148 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1149 hubp->funcs->set_hubp_blank_en(hubp, true);
1150 }
1151 }
1152 return true;
1153
1154 }
1155
1156 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1157 {
1158 struct hubbub *hubbub = dc->res_pool->hubbub;
1159 static bool should_log_hw_state; /* prevent hw state log by default */
1160
1161 if (!hubbub->funcs->verify_allow_pstate_change_high)
1162 return;
1163
1164 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1165 int i = 0;
1166
1167 if (should_log_hw_state)
1168 dcn10_log_hw_state(dc, NULL);
1169
1170 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1171 BREAK_TO_DEBUGGER();
1172 if (dcn10_hw_wa_force_recovery(dc)) {
1173 /*check again*/
1174 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1175 BREAK_TO_DEBUGGER();
1176 }
1177 }
1178 }
1179
1180 /* trigger HW to start disconnect plane from stream on the next vsync */
1181 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1182 {
1183 struct dce_hwseq *hws = dc->hwseq;
1184 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1185 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1186 struct mpc *mpc = dc->res_pool->mpc;
1187 struct mpc_tree *mpc_tree_params;
1188 struct mpcc *mpcc_to_remove = NULL;
1189 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1190
1191 mpc_tree_params = &(opp->mpc_tree_params);
1192 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1193
1194 /*Already reset*/
1195 if (mpcc_to_remove == NULL)
1196 return;
1197
1198 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1199 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1200 // so don't wait for MPCC_IDLE in the programming sequence
1201 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1202 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1203
1204 dc->optimized_required = true;
1205
1206 if (hubp->funcs->hubp_disconnect)
1207 hubp->funcs->hubp_disconnect(hubp);
1208
1209 if (dc->debug.sanity_checks)
1210 hws->funcs.verify_allow_pstate_change_high(dc);
1211 }
1212
1213 /**
1214 * dcn10_plane_atomic_power_down - Power down plane components.
1215 *
1216 * @dc: dc struct reference. used for grab hwseq.
1217 * @dpp: dpp struct reference.
1218 * @hubp: hubp struct reference.
1219 *
1220 * Keep in mind that this operation requires a power gate configuration;
1221 * however, requests for switch power gate are precisely controlled to avoid
1222 * problems. For this reason, power gate request is usually disabled. This
1223 * function first needs to enable the power gate request before disabling DPP
1224 * and HUBP. Finally, it disables the power gate request again.
1225 */
1226 void dcn10_plane_atomic_power_down(struct dc *dc,
1227 struct dpp *dpp,
1228 struct hubp *hubp)
1229 {
1230 struct dce_hwseq *hws = dc->hwseq;
1231 DC_LOGGER_INIT(dc->ctx->logger);
1232
1233 if (REG(DC_IP_REQUEST_CNTL)) {
1234 REG_SET(DC_IP_REQUEST_CNTL, 0,
1235 IP_REQUEST_EN, 1);
1236
1237 if (hws->funcs.dpp_pg_control)
1238 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1239
1240 if (hws->funcs.hubp_pg_control)
1241 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1242
1243 dpp->funcs->dpp_reset(dpp);
1244
1245 REG_SET(DC_IP_REQUEST_CNTL, 0,
1246 IP_REQUEST_EN, 0);
1247 DC_LOG_DEBUG(
1248 "Power gated front end %d\n", hubp->inst);
1249 }
1250
1251 if (hws->funcs.dpp_root_clock_control)
1252 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1253 }
1254
1255 /* disable HW used by plane.
1256 * note: cannot disable until disconnect is complete
1257 */
1258 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1259 {
1260 struct dce_hwseq *hws = dc->hwseq;
1261 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1262 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1263 int opp_id = hubp->opp_id;
1264
1265 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1266
1267 hubp->funcs->hubp_clk_cntl(hubp, false);
1268
1269 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1270
1271 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1272 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1273 pipe_ctx->stream_res.opp,
1274 false);
1275
1276 hubp->power_gated = true;
1277 dc->optimized_required = false; /* We're powering off, no need to optimize */
1278
1279 hws->funcs.plane_atomic_power_down(dc,
1280 pipe_ctx->plane_res.dpp,
1281 pipe_ctx->plane_res.hubp);
1282
1283 pipe_ctx->stream = NULL;
1284 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1285 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1286 pipe_ctx->top_pipe = NULL;
1287 pipe_ctx->bottom_pipe = NULL;
1288 pipe_ctx->plane_state = NULL;
1289 }
1290
1291 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1292 {
1293 struct dce_hwseq *hws = dc->hwseq;
1294 DC_LOGGER_INIT(dc->ctx->logger);
1295
1296 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1297 return;
1298
1299 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1300
1301 apply_DEGVIDCN10_253_wa(dc);
1302
1303 DC_LOG_DC("Power down front end %d\n",
1304 pipe_ctx->pipe_idx);
1305 }
1306
1307 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1308 {
1309 int i;
1310 struct dce_hwseq *hws = dc->hwseq;
1311 struct hubbub *hubbub = dc->res_pool->hubbub;
1312 bool can_apply_seamless_boot = false;
1313
1314 for (i = 0; i < context->stream_count; i++) {
1315 if (context->streams[i]->apply_seamless_boot_optimization) {
1316 can_apply_seamless_boot = true;
1317 break;
1318 }
1319 }
1320
1321 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1322 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1323 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1324
1325 /* There is assumption that pipe_ctx is not mapping irregularly
1326 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1327 * we will use the pipe, so don't disable
1328 */
1329 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1330 continue;
1331
1332 /* Blank controller using driver code instead of
1333 * command table.
1334 */
1335 if (tg->funcs->is_tg_enabled(tg)) {
1336 if (hws->funcs.init_blank != NULL) {
1337 hws->funcs.init_blank(dc, tg);
1338 tg->funcs->lock(tg);
1339 } else {
1340 tg->funcs->lock(tg);
1341 tg->funcs->set_blank(tg, true);
1342 hwss_wait_for_blank_complete(tg);
1343 }
1344 }
1345 }
1346
1347 /* Reset det size */
1348 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1349 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1350 struct hubp *hubp = dc->res_pool->hubps[i];
1351
1352 /* Do not need to reset for seamless boot */
1353 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1354 continue;
1355
1356 if (hubbub && hubp) {
1357 if (hubbub->funcs->program_det_size)
1358 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1359 }
1360 }
1361
1362 /* num_opp will be equal to number of mpcc */
1363 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1364 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1365
1366 /* Cannot reset the MPC mux if seamless boot */
1367 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1368 continue;
1369
1370 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1371 dc->res_pool->mpc, i);
1372 }
1373
1374 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1375 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1376 struct hubp *hubp = dc->res_pool->hubps[i];
1377 struct dpp *dpp = dc->res_pool->dpps[i];
1378 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1379
1380 /* There is assumption that pipe_ctx is not mapping irregularly
1381 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1382 * we will use the pipe, so don't disable
1383 */
1384 if (can_apply_seamless_boot &&
1385 pipe_ctx->stream != NULL &&
1386 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1387 pipe_ctx->stream_res.tg)) {
1388 // Enable double buffering for OTG_BLANK no matter if
1389 // seamless boot is enabled or not to suppress global sync
1390 // signals when OTG blanked. This is to prevent pipe from
1391 // requesting data while in PSR.
1392 tg->funcs->tg_init(tg);
1393 hubp->power_gated = true;
1394 continue;
1395 }
1396
1397 /* Disable on the current state so the new one isn't cleared. */
1398 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1399
1400 dpp->funcs->dpp_reset(dpp);
1401
1402 pipe_ctx->stream_res.tg = tg;
1403 pipe_ctx->pipe_idx = i;
1404
1405 pipe_ctx->plane_res.hubp = hubp;
1406 pipe_ctx->plane_res.dpp = dpp;
1407 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1408 hubp->mpcc_id = dpp->inst;
1409 hubp->opp_id = OPP_ID_INVALID;
1410 hubp->power_gated = false;
1411
1412 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1413 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1414 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1415 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1416
1417 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1418
1419 if (tg->funcs->is_tg_enabled(tg))
1420 tg->funcs->unlock(tg);
1421
1422 dc->hwss.disable_plane(dc, pipe_ctx);
1423
1424 pipe_ctx->stream_res.tg = NULL;
1425 pipe_ctx->plane_res.hubp = NULL;
1426
1427 if (tg->funcs->is_tg_enabled(tg)) {
1428 if (tg->funcs->init_odm)
1429 tg->funcs->init_odm(tg);
1430 }
1431
1432 tg->funcs->tg_init(tg);
1433 }
1434
1435 /* Power gate DSCs */
1436 if (hws->funcs.dsc_pg_control != NULL) {
1437 uint32_t num_opps = 0;
1438 uint32_t opp_id_src0 = OPP_ID_INVALID;
1439 uint32_t opp_id_src1 = OPP_ID_INVALID;
1440
1441 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1442 // We can't use res_pool->res_cap->num_timing_generator to check
1443 // Because it records display pipes default setting built in driver,
1444 // not display pipes of the current chip.
1445 // Some ASICs would be fused display pipes less than the default setting.
1446 // In dcnxx_resource_construct function, driver would obatin real information.
1447 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1448 uint32_t optc_dsc_state = 0;
1449 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1450
1451 if (tg->funcs->is_tg_enabled(tg)) {
1452 if (tg->funcs->get_dsc_status)
1453 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1454 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1455 // non-zero value is DSC enabled
1456 if (optc_dsc_state != 0) {
1457 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1458 break;
1459 }
1460 }
1461 }
1462
1463 // Step 2: To power down DSC but skip DSC of running OPTC
1464 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1465 struct dcn_dsc_state s = {0};
1466
1467 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1468
1469 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1470 s.dsc_clock_en && s.dsc_fw_en)
1471 continue;
1472
1473 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1474 }
1475 }
1476 }
1477
1478 void dcn10_init_hw(struct dc *dc)
1479 {
1480 int i;
1481 struct abm *abm = dc->res_pool->abm;
1482 struct dmcu *dmcu = dc->res_pool->dmcu;
1483 struct dce_hwseq *hws = dc->hwseq;
1484 struct dc_bios *dcb = dc->ctx->dc_bios;
1485 struct resource_pool *res_pool = dc->res_pool;
1486 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1487 bool is_optimized_init_done = false;
1488
1489 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1490 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1491
1492 /* Align bw context with hw config when system resume. */
1493 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1494 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1495 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1496 }
1497
1498 // Initialize the dccg
1499 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1500 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1501
1502 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1503
1504 REG_WRITE(REFCLK_CNTL, 0);
1505 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1506 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1507
1508 if (!dc->debug.disable_clock_gate) {
1509 /* enable all DCN clock gating */
1510 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1511
1512 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1513
1514 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1515 }
1516
1517 //Enable ability to power gate / don't force power on permanently
1518 if (hws->funcs.enable_power_gating_plane)
1519 hws->funcs.enable_power_gating_plane(hws, true);
1520
1521 return;
1522 }
1523
1524 if (!dcb->funcs->is_accelerated_mode(dcb))
1525 hws->funcs.disable_vga(dc->hwseq);
1526
1527 hws->funcs.bios_golden_init(dc);
1528
1529 if (dc->ctx->dc_bios->fw_info_valid) {
1530 res_pool->ref_clocks.xtalin_clock_inKhz =
1531 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1532
1533 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1534 if (res_pool->dccg && res_pool->hubbub) {
1535
1536 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1537 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1538 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1539
1540 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1541 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1542 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1543 } else {
1544 // Not all ASICs have DCCG sw component
1545 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1546 res_pool->ref_clocks.xtalin_clock_inKhz;
1547 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1548 res_pool->ref_clocks.xtalin_clock_inKhz;
1549 }
1550 }
1551 } else
1552 ASSERT_CRITICAL(false);
1553
1554 for (i = 0; i < dc->link_count; i++) {
1555 /* Power up AND update implementation according to the
1556 * required signal (which may be different from the
1557 * default signal on connector).
1558 */
1559 struct dc_link *link = dc->links[i];
1560
1561 if (!is_optimized_init_done)
1562 link->link_enc->funcs->hw_init(link->link_enc);
1563
1564 /* Check for enabled DIG to identify enabled display */
1565 if (link->link_enc->funcs->is_dig_enabled &&
1566 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1567 link->link_status.link_active = true;
1568 if (link->link_enc->funcs->fec_is_active &&
1569 link->link_enc->funcs->fec_is_active(link->link_enc))
1570 link->fec_state = dc_link_fec_enabled;
1571 }
1572 }
1573
1574 /* we want to turn off all dp displays before doing detection */
1575 dc->link_srv->blank_all_dp_displays(dc);
1576
1577 if (hws->funcs.enable_power_gating_plane)
1578 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1579
1580 /* If taking control over from VBIOS, we may want to optimize our first
1581 * mode set, so we need to skip powering down pipes until we know which
1582 * pipes we want to use.
1583 * Otherwise, if taking control is not possible, we need to power
1584 * everything down.
1585 */
1586 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1587 if (!is_optimized_init_done) {
1588 hws->funcs.init_pipes(dc, dc->current_state);
1589 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1590 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1591 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1592 }
1593 }
1594
1595 if (!is_optimized_init_done) {
1596
1597 for (i = 0; i < res_pool->audio_count; i++) {
1598 struct audio *audio = res_pool->audios[i];
1599
1600 audio->funcs->hw_init(audio);
1601 }
1602
1603 for (i = 0; i < dc->link_count; i++) {
1604 struct dc_link *link = dc->links[i];
1605
1606 if (link->panel_cntl)
1607 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1608 }
1609
1610 if (abm != NULL)
1611 abm->funcs->abm_init(abm, backlight);
1612
1613 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1614 dmcu->funcs->dmcu_init(dmcu);
1615 }
1616
1617 if (abm != NULL && dmcu != NULL)
1618 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1619
1620 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1621 if (!is_optimized_init_done)
1622 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1623
1624 if (!dc->debug.disable_clock_gate) {
1625 /* enable all DCN clock gating */
1626 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1627
1628 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1629
1630 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1631 }
1632
1633 if (dc->clk_mgr->funcs->notify_wm_ranges)
1634 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1635 }
1636
1637 /* In headless boot cases, DIG may be turned
1638 * on which causes HW/SW discrepancies.
1639 * To avoid this, power down hardware on boot
1640 * if DIG is turned on
1641 */
1642 void dcn10_power_down_on_boot(struct dc *dc)
1643 {
1644 struct dc_link *edp_links[MAX_NUM_EDP];
1645 struct dc_link *edp_link = NULL;
1646 int edp_num;
1647 int i = 0;
1648
1649 dc_get_edp_links(dc, edp_links, &edp_num);
1650 if (edp_num)
1651 edp_link = edp_links[0];
1652
1653 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1654 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1655 dc->hwseq->funcs.edp_backlight_control &&
1656 dc->hwss.power_down &&
1657 dc->hwss.edp_power_control) {
1658 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1659 dc->hwss.power_down(dc);
1660 dc->hwss.edp_power_control(edp_link, false);
1661 } else {
1662 for (i = 0; i < dc->link_count; i++) {
1663 struct dc_link *link = dc->links[i];
1664
1665 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1666 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1667 dc->hwss.power_down) {
1668 dc->hwss.power_down(dc);
1669 break;
1670 }
1671
1672 }
1673 }
1674
1675 /*
1676 * Call update_clocks with empty context
1677 * to send DISPLAY_OFF
1678 * Otherwise DISPLAY_OFF may not be asserted
1679 */
1680 if (dc->clk_mgr->funcs->set_low_power_state)
1681 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1682 }
1683
1684 void dcn10_reset_hw_ctx_wrap(
1685 struct dc *dc,
1686 struct dc_state *context)
1687 {
1688 int i;
1689 struct dce_hwseq *hws = dc->hwseq;
1690
1691 /* Reset Back End*/
1692 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1693 struct pipe_ctx *pipe_ctx_old =
1694 &dc->current_state->res_ctx.pipe_ctx[i];
1695 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1696
1697 if (!pipe_ctx_old->stream)
1698 continue;
1699
1700 if (pipe_ctx_old->top_pipe)
1701 continue;
1702
1703 if (!pipe_ctx->stream ||
1704 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1705 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1706
1707 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1708 if (hws->funcs.enable_stream_gating)
1709 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1710 if (old_clk)
1711 old_clk->funcs->cs_power_down(old_clk);
1712 }
1713 }
1714 }
1715
1716 static bool patch_address_for_sbs_tb_stereo(
1717 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1718 {
1719 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1720 bool sec_split = pipe_ctx->top_pipe &&
1721 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1722 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1723 (pipe_ctx->stream->timing.timing_3d_format ==
1724 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1725 pipe_ctx->stream->timing.timing_3d_format ==
1726 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1727 *addr = plane_state->address.grph_stereo.left_addr;
1728 plane_state->address.grph_stereo.left_addr =
1729 plane_state->address.grph_stereo.right_addr;
1730 return true;
1731 } else {
1732 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1733 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1734 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1735 plane_state->address.grph_stereo.right_addr =
1736 plane_state->address.grph_stereo.left_addr;
1737 plane_state->address.grph_stereo.right_meta_addr =
1738 plane_state->address.grph_stereo.left_meta_addr;
1739 }
1740 }
1741 return false;
1742 }
1743
1744 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1745 {
1746 bool addr_patched = false;
1747 PHYSICAL_ADDRESS_LOC addr;
1748 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1749
1750 if (plane_state == NULL)
1751 return;
1752
1753 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1754
1755 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1756 pipe_ctx->plane_res.hubp,
1757 &plane_state->address,
1758 plane_state->flip_immediate);
1759
1760 plane_state->status.requested_address = plane_state->address;
1761
1762 if (plane_state->flip_immediate)
1763 plane_state->status.current_address = plane_state->address;
1764
1765 if (addr_patched)
1766 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1767 }
1768
1769 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1770 const struct dc_plane_state *plane_state)
1771 {
1772 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1773 const struct dc_transfer_func *tf = NULL;
1774 bool result = true;
1775
1776 if (dpp_base == NULL)
1777 return false;
1778
1779 if (plane_state->in_transfer_func)
1780 tf = plane_state->in_transfer_func;
1781
1782 if (plane_state->gamma_correction &&
1783 !dpp_base->ctx->dc->debug.always_use_regamma
1784 && !plane_state->gamma_correction->is_identity
1785 && dce_use_lut(plane_state->format))
1786 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1787
1788 if (tf == NULL)
1789 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1790 else if (tf->type == TF_TYPE_PREDEFINED) {
1791 switch (tf->tf) {
1792 case TRANSFER_FUNCTION_SRGB:
1793 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1794 break;
1795 case TRANSFER_FUNCTION_BT709:
1796 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1797 break;
1798 case TRANSFER_FUNCTION_LINEAR:
1799 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1800 break;
1801 case TRANSFER_FUNCTION_PQ:
1802 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1803 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1804 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1805 result = true;
1806 break;
1807 default:
1808 result = false;
1809 break;
1810 }
1811 } else if (tf->type == TF_TYPE_BYPASS) {
1812 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1813 } else {
1814 cm_helper_translate_curve_to_degamma_hw_format(tf,
1815 &dpp_base->degamma_params);
1816 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1817 &dpp_base->degamma_params);
1818 result = true;
1819 }
1820
1821 return result;
1822 }
1823
1824 #define MAX_NUM_HW_POINTS 0x200
1825
1826 static void log_tf(struct dc_context *ctx,
1827 struct dc_transfer_func *tf, uint32_t hw_points_num)
1828 {
1829 // DC_LOG_GAMMA is default logging of all hw points
1830 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1831 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1832 int i = 0;
1833
1834 DC_LOGGER_INIT(ctx->logger);
1835 DC_LOG_GAMMA("Gamma Correction TF");
1836 DC_LOG_ALL_GAMMA("Logging all tf points...");
1837 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1838
1839 for (i = 0; i < hw_points_num; i++) {
1840 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1841 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1842 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1843 }
1844
1845 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1846 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1847 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1848 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1849 }
1850 }
1851
1852 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1853 const struct dc_stream_state *stream)
1854 {
1855 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1856
1857 if (dpp == NULL)
1858 return false;
1859
1860 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1861
1862 if (stream->out_transfer_func &&
1863 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1864 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1865 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1866
1867 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1868 * update.
1869 */
1870 else if (cm_helper_translate_curve_to_hw_format(
1871 stream->out_transfer_func,
1872 &dpp->regamma_params, false)) {
1873 dpp->funcs->dpp_program_regamma_pwl(
1874 dpp,
1875 &dpp->regamma_params, OPP_REGAMMA_USER);
1876 } else
1877 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1878
1879 if (stream != NULL && stream->ctx != NULL &&
1880 stream->out_transfer_func != NULL) {
1881 log_tf(stream->ctx,
1882 stream->out_transfer_func,
1883 dpp->regamma_params.hw_points_num);
1884 }
1885
1886 return true;
1887 }
1888
1889 void dcn10_pipe_control_lock(
1890 struct dc *dc,
1891 struct pipe_ctx *pipe,
1892 bool lock)
1893 {
1894 struct dce_hwseq *hws = dc->hwseq;
1895
1896 /* use TG master update lock to lock everything on the TG
1897 * therefore only top pipe need to lock
1898 */
1899 if (!pipe || pipe->top_pipe)
1900 return;
1901
1902 if (dc->debug.sanity_checks)
1903 hws->funcs.verify_allow_pstate_change_high(dc);
1904
1905 if (lock)
1906 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1907 else
1908 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1909
1910 if (dc->debug.sanity_checks)
1911 hws->funcs.verify_allow_pstate_change_high(dc);
1912 }
1913
1914 /**
1915 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1916 *
1917 * Software keepout workaround to prevent cursor update locking from stalling
1918 * out cursor updates indefinitely or from old values from being retained in
1919 * the case where the viewport changes in the same frame as the cursor.
1920 *
1921 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1922 * too close to VUPDATE, then stall out until VUPDATE finishes.
1923 *
1924 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1925 * to avoid the need for this workaround.
1926 */
1927 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1928 {
1929 struct dc_stream_state *stream = pipe_ctx->stream;
1930 struct crtc_position position;
1931 uint32_t vupdate_start, vupdate_end;
1932 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1933 unsigned int us_per_line, us_vupdate;
1934
1935 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1936 return;
1937
1938 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1939 return;
1940
1941 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1942 &vupdate_end);
1943
1944 dc->hwss.get_position(&pipe_ctx, 1, &position);
1945 vpos = position.vertical_count;
1946
1947 /* Avoid wraparound calculation issues */
1948 vupdate_start += stream->timing.v_total;
1949 vupdate_end += stream->timing.v_total;
1950 vpos += stream->timing.v_total;
1951
1952 if (vpos <= vupdate_start) {
1953 /* VPOS is in VACTIVE or back porch. */
1954 lines_to_vupdate = vupdate_start - vpos;
1955 } else if (vpos > vupdate_end) {
1956 /* VPOS is in the front porch. */
1957 return;
1958 } else {
1959 /* VPOS is in VUPDATE. */
1960 lines_to_vupdate = 0;
1961 }
1962
1963 /* Calculate time until VUPDATE in microseconds. */
1964 us_per_line =
1965 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1966 us_to_vupdate = lines_to_vupdate * us_per_line;
1967
1968 /* 70 us is a conservative estimate of cursor update time*/
1969 if (us_to_vupdate > 70)
1970 return;
1971
1972 /* Stall out until the cursor update completes. */
1973 if (vupdate_end < vupdate_start)
1974 vupdate_end += stream->timing.v_total;
1975 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1976 udelay(us_to_vupdate + us_vupdate);
1977 }
1978
1979 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1980 {
1981 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1982 if (!pipe || pipe->top_pipe)
1983 return;
1984
1985 /* Prevent cursor lock from stalling out cursor updates. */
1986 if (lock)
1987 delay_cursor_until_vupdate(dc, pipe);
1988
1989 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1990 union dmub_hw_lock_flags hw_locks = { 0 };
1991 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1992
1993 hw_locks.bits.lock_cursor = 1;
1994 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1995
1996 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1997 lock,
1998 &hw_locks,
1999 &inst_flags);
2000 } else
2001 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2002 pipe->stream_res.opp->inst, lock);
2003 }
2004
2005 static bool wait_for_reset_trigger_to_occur(
2006 struct dc_context *dc_ctx,
2007 struct timing_generator *tg)
2008 {
2009 bool rc = false;
2010
2011 /* To avoid endless loop we wait at most
2012 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2013 const uint32_t frames_to_wait_on_triggered_reset = 10;
2014 int i;
2015
2016 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2017
2018 if (!tg->funcs->is_counter_moving(tg)) {
2019 DC_ERROR("TG counter is not moving!\n");
2020 break;
2021 }
2022
2023 if (tg->funcs->did_triggered_reset_occur(tg)) {
2024 rc = true;
2025 /* usually occurs at i=1 */
2026 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2027 i);
2028 break;
2029 }
2030
2031 /* Wait for one frame. */
2032 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2033 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2034 }
2035
2036 if (false == rc)
2037 DC_ERROR("GSL: Timeout on reset trigger!\n");
2038
2039 return rc;
2040 }
2041
2042 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2043 uint64_t *denominator,
2044 bool checkUint32Bounary)
2045 {
2046 int i;
2047 bool ret = checkUint32Bounary == false;
2048 uint64_t max_int32 = 0xffffffff;
2049 uint64_t num, denom;
2050 static const uint16_t prime_numbers[] = {
2051 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2052 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2053 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2054 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2055 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2056 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2057 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2058 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2059 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2060 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2061 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2062 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2063 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2064 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2065 941, 947, 953, 967, 971, 977, 983, 991, 997};
2066 int count = ARRAY_SIZE(prime_numbers);
2067
2068 num = *numerator;
2069 denom = *denominator;
2070 for (i = 0; i < count; i++) {
2071 uint32_t num_remainder, denom_remainder;
2072 uint64_t num_result, denom_result;
2073 if (checkUint32Bounary &&
2074 num <= max_int32 && denom <= max_int32) {
2075 ret = true;
2076 break;
2077 }
2078 do {
2079 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2080 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2081 if (num_remainder == 0 && denom_remainder == 0) {
2082 num = num_result;
2083 denom = denom_result;
2084 }
2085 } while (num_remainder == 0 && denom_remainder == 0);
2086 }
2087 *numerator = num;
2088 *denominator = denom;
2089 return ret;
2090 }
2091
2092 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2093 {
2094 uint32_t master_pipe_refresh_rate =
2095 pipe->stream->timing.pix_clk_100hz * 100 /
2096 pipe->stream->timing.h_total /
2097 pipe->stream->timing.v_total;
2098 return master_pipe_refresh_rate <= 30;
2099 }
2100
2101 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2102 bool account_low_refresh_rate)
2103 {
2104 uint32_t clock_divider = 1;
2105 uint32_t numpipes = 1;
2106
2107 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2108 clock_divider *= 2;
2109
2110 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2111 clock_divider *= 2;
2112
2113 while (pipe->next_odm_pipe) {
2114 pipe = pipe->next_odm_pipe;
2115 numpipes++;
2116 }
2117 clock_divider *= numpipes;
2118
2119 return clock_divider;
2120 }
2121
2122 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2123 struct pipe_ctx *grouped_pipes[])
2124 {
2125 struct dc_context *dc_ctx = dc->ctx;
2126 int i, master = -1, embedded = -1;
2127 struct dc_crtc_timing *hw_crtc_timing;
2128 uint64_t phase[MAX_PIPES];
2129 uint64_t modulo[MAX_PIPES];
2130 unsigned int pclk;
2131
2132 uint32_t embedded_pix_clk_100hz;
2133 uint16_t embedded_h_total;
2134 uint16_t embedded_v_total;
2135 uint32_t dp_ref_clk_100hz =
2136 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2137
2138 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2139 if (!hw_crtc_timing)
2140 return master;
2141
2142 if (dc->config.vblank_alignment_dto_params &&
2143 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2144 embedded_h_total =
2145 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2146 embedded_v_total =
2147 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2148 embedded_pix_clk_100hz =
2149 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2150
2151 for (i = 0; i < group_size; i++) {
2152 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2153 grouped_pipes[i]->stream_res.tg,
2154 &hw_crtc_timing[i]);
2155 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2156 dc->res_pool->dp_clock_source,
2157 grouped_pipes[i]->stream_res.tg->inst,
2158 &pclk);
2159 hw_crtc_timing[i].pix_clk_100hz = pclk;
2160 if (dc_is_embedded_signal(
2161 grouped_pipes[i]->stream->signal)) {
2162 embedded = i;
2163 master = i;
2164 phase[i] = embedded_pix_clk_100hz*100;
2165 modulo[i] = dp_ref_clk_100hz*100;
2166 } else {
2167
2168 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2169 hw_crtc_timing[i].h_total*
2170 hw_crtc_timing[i].v_total;
2171 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2172 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2173 embedded_h_total*
2174 embedded_v_total;
2175
2176 if (reduceSizeAndFraction(&phase[i],
2177 &modulo[i], true) == false) {
2178 /*
2179 * this will help to stop reporting
2180 * this timing synchronizable
2181 */
2182 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2183 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2184 }
2185 }
2186 }
2187
2188 for (i = 0; i < group_size; i++) {
2189 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2190 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2191 dc->res_pool->dp_clock_source,
2192 grouped_pipes[i]->stream_res.tg->inst,
2193 phase[i], modulo[i]);
2194 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2195 dc->res_pool->dp_clock_source,
2196 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2197 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2198 pclk*get_clock_divider(grouped_pipes[i], false);
2199 if (master == -1)
2200 master = i;
2201 }
2202 }
2203
2204 }
2205
2206 kfree(hw_crtc_timing);
2207 return master;
2208 }
2209
2210 void dcn10_enable_vblanks_synchronization(
2211 struct dc *dc,
2212 int group_index,
2213 int group_size,
2214 struct pipe_ctx *grouped_pipes[])
2215 {
2216 struct dc_context *dc_ctx = dc->ctx;
2217 struct output_pixel_processor *opp;
2218 struct timing_generator *tg;
2219 int i, width, height, master;
2220
2221 for (i = 1; i < group_size; i++) {
2222 opp = grouped_pipes[i]->stream_res.opp;
2223 tg = grouped_pipes[i]->stream_res.tg;
2224 tg->funcs->get_otg_active_size(tg, &width, &height);
2225
2226 if (!tg->funcs->is_tg_enabled(tg)) {
2227 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2228 return;
2229 }
2230
2231 if (opp->funcs->opp_program_dpg_dimensions)
2232 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2233 }
2234
2235 for (i = 0; i < group_size; i++) {
2236 if (grouped_pipes[i]->stream == NULL)
2237 continue;
2238 grouped_pipes[i]->stream->vblank_synchronized = false;
2239 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2240 }
2241
2242 DC_SYNC_INFO("Aligning DP DTOs\n");
2243
2244 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2245
2246 DC_SYNC_INFO("Synchronizing VBlanks\n");
2247
2248 if (master >= 0) {
2249 for (i = 0; i < group_size; i++) {
2250 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2251 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2252 grouped_pipes[master]->stream_res.tg,
2253 grouped_pipes[i]->stream_res.tg,
2254 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2255 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2256 get_clock_divider(grouped_pipes[master], false),
2257 get_clock_divider(grouped_pipes[i], false));
2258 grouped_pipes[i]->stream->vblank_synchronized = true;
2259 }
2260 grouped_pipes[master]->stream->vblank_synchronized = true;
2261 DC_SYNC_INFO("Sync complete\n");
2262 }
2263
2264 for (i = 1; i < group_size; i++) {
2265 opp = grouped_pipes[i]->stream_res.opp;
2266 tg = grouped_pipes[i]->stream_res.tg;
2267 tg->funcs->get_otg_active_size(tg, &width, &height);
2268 if (opp->funcs->opp_program_dpg_dimensions)
2269 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2270 }
2271 }
2272
2273 void dcn10_enable_timing_synchronization(
2274 struct dc *dc,
2275 int group_index,
2276 int group_size,
2277 struct pipe_ctx *grouped_pipes[])
2278 {
2279 struct dc_context *dc_ctx = dc->ctx;
2280 struct output_pixel_processor *opp;
2281 struct timing_generator *tg;
2282 int i, width, height;
2283
2284 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2285
2286 for (i = 1; i < group_size; i++) {
2287 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2288 continue;
2289
2290 opp = grouped_pipes[i]->stream_res.opp;
2291 tg = grouped_pipes[i]->stream_res.tg;
2292 tg->funcs->get_otg_active_size(tg, &width, &height);
2293
2294 if (!tg->funcs->is_tg_enabled(tg)) {
2295 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2296 return;
2297 }
2298
2299 if (opp->funcs->opp_program_dpg_dimensions)
2300 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2301 }
2302
2303 for (i = 0; i < group_size; i++) {
2304 if (grouped_pipes[i]->stream == NULL)
2305 continue;
2306
2307 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2308 continue;
2309
2310 grouped_pipes[i]->stream->vblank_synchronized = false;
2311 }
2312
2313 for (i = 1; i < group_size; i++) {
2314 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2315 continue;
2316
2317 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2318 grouped_pipes[i]->stream_res.tg,
2319 grouped_pipes[0]->stream_res.tg->inst);
2320 }
2321
2322 DC_SYNC_INFO("Waiting for trigger\n");
2323
2324 /* Need to get only check 1 pipe for having reset as all the others are
2325 * synchronized. Look at last pipe programmed to reset.
2326 */
2327
2328 if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2329 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2330
2331 for (i = 1; i < group_size; i++) {
2332 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2333 continue;
2334
2335 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2336 grouped_pipes[i]->stream_res.tg);
2337 }
2338
2339 for (i = 1; i < group_size; i++) {
2340 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2341 continue;
2342
2343 opp = grouped_pipes[i]->stream_res.opp;
2344 tg = grouped_pipes[i]->stream_res.tg;
2345 tg->funcs->get_otg_active_size(tg, &width, &height);
2346 if (opp->funcs->opp_program_dpg_dimensions)
2347 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2348 }
2349
2350 DC_SYNC_INFO("Sync complete\n");
2351 }
2352
2353 void dcn10_enable_per_frame_crtc_position_reset(
2354 struct dc *dc,
2355 int group_size,
2356 struct pipe_ctx *grouped_pipes[])
2357 {
2358 struct dc_context *dc_ctx = dc->ctx;
2359 int i;
2360
2361 DC_SYNC_INFO("Setting up\n");
2362 for (i = 0; i < group_size; i++)
2363 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2364 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2365 grouped_pipes[i]->stream_res.tg,
2366 0,
2367 &grouped_pipes[i]->stream->triggered_crtc_reset);
2368
2369 DC_SYNC_INFO("Waiting for trigger\n");
2370
2371 for (i = 0; i < group_size; i++)
2372 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2373
2374 DC_SYNC_INFO("Multi-display sync is complete\n");
2375 }
2376
2377 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2378 struct vm_system_aperture_param *apt,
2379 struct dce_hwseq *hws)
2380 {
2381 PHYSICAL_ADDRESS_LOC physical_page_number;
2382 uint32_t logical_addr_low;
2383 uint32_t logical_addr_high;
2384
2385 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2386 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2387 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2388 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2389
2390 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2391 LOGICAL_ADDR, &logical_addr_low);
2392
2393 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2394 LOGICAL_ADDR, &logical_addr_high);
2395
2396 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2397 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2398 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2399 }
2400
2401 /* Temporary read settings, future will get values from kmd directly */
2402 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2403 struct vm_context0_param *vm0,
2404 struct dce_hwseq *hws)
2405 {
2406 PHYSICAL_ADDRESS_LOC fb_base;
2407 PHYSICAL_ADDRESS_LOC fb_offset;
2408 uint32_t fb_base_value;
2409 uint32_t fb_offset_value;
2410
2411 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2412 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2413
2414 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2415 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2416 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2417 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2418
2419 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2420 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2421 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2422 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2423
2424 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2425 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2426 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2427 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2428
2429 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2430 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2431 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2432 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2433
2434 /*
2435 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2436 * Therefore we need to do
2437 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2438 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2439 */
2440 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2441 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2442 vm0->pte_base.quad_part += fb_base.quad_part;
2443 vm0->pte_base.quad_part -= fb_offset.quad_part;
2444 }
2445
2446
2447 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2448 {
2449 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2450 struct vm_system_aperture_param apt = {0};
2451 struct vm_context0_param vm0 = {0};
2452
2453 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2454 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2455
2456 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2457 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2458 }
2459
2460 static void dcn10_enable_plane(
2461 struct dc *dc,
2462 struct pipe_ctx *pipe_ctx,
2463 struct dc_state *context)
2464 {
2465 struct dce_hwseq *hws = dc->hwseq;
2466
2467 if (dc->debug.sanity_checks) {
2468 hws->funcs.verify_allow_pstate_change_high(dc);
2469 }
2470
2471 undo_DEGVIDCN10_253_wa(dc);
2472
2473 power_on_plane_resources(dc->hwseq,
2474 pipe_ctx->plane_res.hubp->inst);
2475
2476 /* enable DCFCLK current DCHUB */
2477 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2478
2479 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2480 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2481 pipe_ctx->stream_res.opp,
2482 true);
2483
2484 if (dc->config.gpu_vm_support)
2485 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2486
2487 if (dc->debug.sanity_checks) {
2488 hws->funcs.verify_allow_pstate_change_high(dc);
2489 }
2490
2491 if (!pipe_ctx->top_pipe
2492 && pipe_ctx->plane_state
2493 && pipe_ctx->plane_state->flip_int_enabled
2494 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2495 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2496
2497 }
2498
2499 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2500 {
2501 int i = 0;
2502 struct dpp_grph_csc_adjustment adjust;
2503 memset(&adjust, 0, sizeof(adjust));
2504 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2505
2506
2507 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2508 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2509 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2510 adjust.temperature_matrix[i] =
2511 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2512 } else if (pipe_ctx->plane_state &&
2513 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2514 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2515 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2516 adjust.temperature_matrix[i] =
2517 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2518 }
2519
2520 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2521 }
2522
2523
2524 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2525 {
2526 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2527 if (pipe_ctx->top_pipe) {
2528 struct pipe_ctx *top = pipe_ctx->top_pipe;
2529
2530 while (top->top_pipe)
2531 top = top->top_pipe; // Traverse to top pipe_ctx
2532 if (top->plane_state && top->plane_state->layer_index == 0)
2533 return true; // Front MPO plane not hidden
2534 }
2535 }
2536 return false;
2537 }
2538
2539 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2540 {
2541 // Override rear plane RGB bias to fix MPO brightness
2542 uint16_t rgb_bias = matrix[3];
2543
2544 matrix[3] = 0;
2545 matrix[7] = 0;
2546 matrix[11] = 0;
2547 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2548 matrix[3] = rgb_bias;
2549 matrix[7] = rgb_bias;
2550 matrix[11] = rgb_bias;
2551 }
2552
2553 void dcn10_program_output_csc(struct dc *dc,
2554 struct pipe_ctx *pipe_ctx,
2555 enum dc_color_space colorspace,
2556 uint16_t *matrix,
2557 int opp_id)
2558 {
2559 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2560 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2561
2562 /* MPO is broken with RGB colorspaces when OCSC matrix
2563 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2564 * Blending adds offsets from front + rear to rear plane
2565 *
2566 * Fix is to set RGB bias to 0 on rear plane, top plane
2567 * black value pixels add offset instead of rear + front
2568 */
2569
2570 int16_t rgb_bias = matrix[3];
2571 // matrix[3/7/11] are all the same offset value
2572
2573 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2574 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2575 } else {
2576 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2577 }
2578 }
2579 } else {
2580 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2581 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2582 }
2583 }
2584
2585 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2586 {
2587 struct dc_bias_and_scale bns_params = {0};
2588
2589 // program the input csc
2590 dpp->funcs->dpp_setup(dpp,
2591 plane_state->format,
2592 EXPANSION_MODE_ZERO,
2593 plane_state->input_csc_color_matrix,
2594 plane_state->color_space,
2595 NULL);
2596
2597 //set scale and bias registers
2598 build_prescale_params(&bns_params, plane_state);
2599 if (dpp->funcs->dpp_program_bias_and_scale)
2600 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2601 }
2602
2603 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2604 {
2605 struct mpc *mpc = dc->res_pool->mpc;
2606
2607 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2608 get_hdr_visual_confirm_color(pipe_ctx, color);
2609 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2610 get_surface_visual_confirm_color(pipe_ctx, color);
2611 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2612 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2613 else
2614 color_space_to_black_color(
2615 dc, pipe_ctx->stream->output_color_space, color);
2616
2617 if (mpc->funcs->set_bg_color) {
2618 memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
2619 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2620 }
2621 }
2622
2623 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2624 {
2625 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2626 struct mpcc_blnd_cfg blnd_cfg = {0};
2627 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2628 int mpcc_id;
2629 struct mpcc *new_mpcc;
2630 struct mpc *mpc = dc->res_pool->mpc;
2631 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2632
2633 blnd_cfg.overlap_only = false;
2634 blnd_cfg.global_gain = 0xff;
2635
2636 if (per_pixel_alpha) {
2637 /* DCN1.0 has output CM before MPC which seems to screw with
2638 * pre-multiplied alpha.
2639 */
2640 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2641 pipe_ctx->stream->output_color_space)
2642 && pipe_ctx->plane_state->pre_multiplied_alpha);
2643 if (pipe_ctx->plane_state->global_alpha) {
2644 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2645 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2646 } else {
2647 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2648 }
2649 } else {
2650 blnd_cfg.pre_multiplied_alpha = false;
2651 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2652 }
2653
2654 if (pipe_ctx->plane_state->global_alpha)
2655 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2656 else
2657 blnd_cfg.global_alpha = 0xff;
2658
2659 /*
2660 * TODO: remove hack
2661 * Note: currently there is a bug in init_hw such that
2662 * on resume from hibernate, BIOS sets up MPCC0, and
2663 * we do mpcc_remove but the mpcc cannot go to idle
2664 * after remove. This cause us to pick mpcc1 here,
2665 * which causes a pstate hang for yet unknown reason.
2666 */
2667 mpcc_id = hubp->inst;
2668
2669 /* If there is no full update, don't need to touch MPC tree*/
2670 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2671 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2672 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2673 return;
2674 }
2675
2676 /* check if this MPCC is already being used */
2677 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2678 /* remove MPCC if being used */
2679 if (new_mpcc != NULL)
2680 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2681 else
2682 if (dc->debug.sanity_checks)
2683 mpc->funcs->assert_mpcc_idle_before_connect(
2684 dc->res_pool->mpc, mpcc_id);
2685
2686 /* Call MPC to insert new plane */
2687 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2688 mpc_tree_params,
2689 &blnd_cfg,
2690 NULL,
2691 NULL,
2692 hubp->inst,
2693 mpcc_id);
2694 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2695
2696 ASSERT(new_mpcc != NULL);
2697 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2698 hubp->mpcc_id = mpcc_id;
2699 }
2700
2701 static void update_scaler(struct pipe_ctx *pipe_ctx)
2702 {
2703 bool per_pixel_alpha =
2704 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2705
2706 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2707 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2708 /* scaler configuration */
2709 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2710 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2711 }
2712
2713 static void dcn10_update_dchubp_dpp(
2714 struct dc *dc,
2715 struct pipe_ctx *pipe_ctx,
2716 struct dc_state *context)
2717 {
2718 struct dce_hwseq *hws = dc->hwseq;
2719 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2720 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2721 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2722 struct plane_size size = plane_state->plane_size;
2723 unsigned int compat_level = 0;
2724 bool should_divided_by_2 = false;
2725
2726 /* depends on DML calculation, DPP clock value may change dynamically */
2727 /* If request max dpp clk is lower than current dispclk, no need to
2728 * divided by 2
2729 */
2730 if (plane_state->update_flags.bits.full_update) {
2731
2732 /* new calculated dispclk, dppclk are stored in
2733 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2734 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2735 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2736 * dispclk will put in use after optimize_bandwidth when
2737 * ramp_up_dispclk_with_dpp is called.
2738 * there are two places for dppclk be put in use. One location
2739 * is the same as the location as dispclk. Another is within
2740 * update_dchubp_dpp which happens between pre_bandwidth and
2741 * optimize_bandwidth.
2742 * dppclk updated within update_dchubp_dpp will cause new
2743 * clock values of dispclk and dppclk not be in use at the same
2744 * time. when clocks are decreased, this may cause dppclk is
2745 * lower than previous configuration and let pipe stuck.
2746 * for example, eDP + external dp, change resolution of DP from
2747 * 1920x1080x144hz to 1280x960x60hz.
2748 * before change: dispclk = 337889 dppclk = 337889
2749 * change mode, dcn10_validate_bandwidth calculate
2750 * dispclk = 143122 dppclk = 143122
2751 * update_dchubp_dpp be executed before dispclk be updated,
2752 * dispclk = 337889, but dppclk use new value dispclk /2 =
2753 * 168944. this will cause pipe pstate warning issue.
2754 * solution: between pre_bandwidth and optimize_bandwidth, while
2755 * dispclk is going to be decreased, keep dppclk = dispclk
2756 **/
2757 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2758 dc->clk_mgr->clks.dispclk_khz)
2759 should_divided_by_2 = false;
2760 else
2761 should_divided_by_2 =
2762 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2763 dc->clk_mgr->clks.dispclk_khz / 2;
2764
2765 dpp->funcs->dpp_dppclk_control(
2766 dpp,
2767 should_divided_by_2,
2768 true);
2769
2770 if (dc->res_pool->dccg)
2771 dc->res_pool->dccg->funcs->update_dpp_dto(
2772 dc->res_pool->dccg,
2773 dpp->inst,
2774 pipe_ctx->plane_res.bw.dppclk_khz);
2775 else
2776 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2777 dc->clk_mgr->clks.dispclk_khz / 2 :
2778 dc->clk_mgr->clks.dispclk_khz;
2779 }
2780
2781 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2782 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2783 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2784 */
2785 if (plane_state->update_flags.bits.full_update) {
2786 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2787
2788 hubp->funcs->hubp_setup(
2789 hubp,
2790 &pipe_ctx->dlg_regs,
2791 &pipe_ctx->ttu_regs,
2792 &pipe_ctx->rq_regs,
2793 &pipe_ctx->pipe_dlg_param);
2794 hubp->funcs->hubp_setup_interdependent(
2795 hubp,
2796 &pipe_ctx->dlg_regs,
2797 &pipe_ctx->ttu_regs);
2798 }
2799
2800 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2801
2802 if (plane_state->update_flags.bits.full_update ||
2803 plane_state->update_flags.bits.bpp_change)
2804 dcn10_update_dpp(dpp, plane_state);
2805
2806 if (plane_state->update_flags.bits.full_update ||
2807 plane_state->update_flags.bits.per_pixel_alpha_change ||
2808 plane_state->update_flags.bits.global_alpha_change)
2809 hws->funcs.update_mpcc(dc, pipe_ctx);
2810
2811 if (plane_state->update_flags.bits.full_update ||
2812 plane_state->update_flags.bits.per_pixel_alpha_change ||
2813 plane_state->update_flags.bits.global_alpha_change ||
2814 plane_state->update_flags.bits.scaling_change ||
2815 plane_state->update_flags.bits.position_change) {
2816 update_scaler(pipe_ctx);
2817 }
2818
2819 if (plane_state->update_flags.bits.full_update ||
2820 plane_state->update_flags.bits.scaling_change ||
2821 plane_state->update_flags.bits.position_change) {
2822 hubp->funcs->mem_program_viewport(
2823 hubp,
2824 &pipe_ctx->plane_res.scl_data.viewport,
2825 &pipe_ctx->plane_res.scl_data.viewport_c);
2826 }
2827
2828 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2829 dc->hwss.set_cursor_position(pipe_ctx);
2830 dc->hwss.set_cursor_attribute(pipe_ctx);
2831
2832 if (dc->hwss.set_cursor_sdr_white_level)
2833 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2834 }
2835
2836 if (plane_state->update_flags.bits.full_update) {
2837 /*gamut remap*/
2838 dc->hwss.program_gamut_remap(pipe_ctx);
2839
2840 dc->hwss.program_output_csc(dc,
2841 pipe_ctx,
2842 pipe_ctx->stream->output_color_space,
2843 pipe_ctx->stream->csc_color_matrix.matrix,
2844 pipe_ctx->stream_res.opp->inst);
2845 }
2846
2847 if (plane_state->update_flags.bits.full_update ||
2848 plane_state->update_flags.bits.pixel_format_change ||
2849 plane_state->update_flags.bits.horizontal_mirror_change ||
2850 plane_state->update_flags.bits.rotation_change ||
2851 plane_state->update_flags.bits.swizzle_change ||
2852 plane_state->update_flags.bits.dcc_change ||
2853 plane_state->update_flags.bits.bpp_change ||
2854 plane_state->update_flags.bits.scaling_change ||
2855 plane_state->update_flags.bits.plane_size_change) {
2856 hubp->funcs->hubp_program_surface_config(
2857 hubp,
2858 plane_state->format,
2859 &plane_state->tiling_info,
2860 &size,
2861 plane_state->rotation,
2862 &plane_state->dcc,
2863 plane_state->horizontal_mirror,
2864 compat_level);
2865 }
2866
2867 hubp->power_gated = false;
2868
2869 hws->funcs.update_plane_addr(dc, pipe_ctx);
2870
2871 if (is_pipe_tree_visible(pipe_ctx))
2872 hubp->funcs->set_blank(hubp, false);
2873 }
2874
2875 void dcn10_blank_pixel_data(
2876 struct dc *dc,
2877 struct pipe_ctx *pipe_ctx,
2878 bool blank)
2879 {
2880 enum dc_color_space color_space;
2881 struct tg_color black_color = {0};
2882 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2883 struct dc_stream_state *stream = pipe_ctx->stream;
2884
2885 /* program otg blank color */
2886 color_space = stream->output_color_space;
2887 color_space_to_black_color(dc, color_space, &black_color);
2888
2889 /*
2890 * The way 420 is packed, 2 channels carry Y component, 1 channel
2891 * alternate between Cb and Cr, so both channels need the pixel
2892 * value for Y
2893 */
2894 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2895 black_color.color_r_cr = black_color.color_g_y;
2896
2897
2898 if (stream_res->tg->funcs->set_blank_color)
2899 stream_res->tg->funcs->set_blank_color(
2900 stream_res->tg,
2901 &black_color);
2902
2903 if (!blank) {
2904 if (stream_res->tg->funcs->set_blank)
2905 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2906 if (stream_res->abm) {
2907 dc->hwss.set_pipe(pipe_ctx);
2908 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2909 }
2910 } else {
2911 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2912 if (stream_res->tg->funcs->set_blank) {
2913 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2914 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2915 }
2916 }
2917 }
2918
2919 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2920 {
2921 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2922 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2923 struct custom_float_format fmt;
2924
2925 fmt.exponenta_bits = 6;
2926 fmt.mantissa_bits = 12;
2927 fmt.sign = true;
2928
2929
2930 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2931 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2932
2933 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2934 pipe_ctx->plane_res.dpp, hw_mult);
2935 }
2936
2937 void dcn10_program_pipe(
2938 struct dc *dc,
2939 struct pipe_ctx *pipe_ctx,
2940 struct dc_state *context)
2941 {
2942 struct dce_hwseq *hws = dc->hwseq;
2943
2944 if (pipe_ctx->top_pipe == NULL) {
2945 bool blank = !is_pipe_tree_visible(pipe_ctx);
2946
2947 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2948 pipe_ctx->stream_res.tg,
2949 calculate_vready_offset_for_group(pipe_ctx),
2950 pipe_ctx->pipe_dlg_param.vstartup_start,
2951 pipe_ctx->pipe_dlg_param.vupdate_offset,
2952 pipe_ctx->pipe_dlg_param.vupdate_width);
2953
2954 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2955 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2956
2957 if (hws->funcs.setup_vupdate_interrupt)
2958 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2959
2960 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2961 }
2962
2963 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2964 dcn10_enable_plane(dc, pipe_ctx, context);
2965
2966 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2967
2968 hws->funcs.set_hdr_multiplier(pipe_ctx);
2969
2970 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2971 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2972 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2973 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2974
2975 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2976 * only do gamma programming for full update.
2977 * TODO: This can be further optimized/cleaned up
2978 * Always call this for now since it does memcmp inside before
2979 * doing heavy calculation and programming
2980 */
2981 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2982 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2983 }
2984
2985 void dcn10_wait_for_pending_cleared(struct dc *dc,
2986 struct dc_state *context)
2987 {
2988 struct pipe_ctx *pipe_ctx;
2989 struct timing_generator *tg;
2990 int i;
2991
2992 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2993 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2994 tg = pipe_ctx->stream_res.tg;
2995
2996 /*
2997 * Only wait for top pipe's tg penindg bit
2998 * Also skip if pipe is disabled.
2999 */
3000 if (pipe_ctx->top_pipe ||
3001 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3002 !tg->funcs->is_tg_enabled(tg))
3003 continue;
3004
3005 /*
3006 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3007 * For some reason waiting for OTG_UPDATE_PENDING cleared
3008 * seems to not trigger the update right away, and if we
3009 * lock again before VUPDATE then we don't get a separated
3010 * operation.
3011 */
3012 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3013 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3014 }
3015 }
3016
3017 void dcn10_post_unlock_program_front_end(
3018 struct dc *dc,
3019 struct dc_state *context)
3020 {
3021 int i;
3022
3023 DC_LOGGER_INIT(dc->ctx->logger);
3024
3025 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3026 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3027
3028 if (!pipe_ctx->top_pipe &&
3029 !pipe_ctx->prev_odm_pipe &&
3030 pipe_ctx->stream) {
3031 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3032
3033 if (context->stream_status[i].plane_count == 0)
3034 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3035 }
3036 }
3037
3038 for (i = 0; i < dc->res_pool->pipe_count; i++)
3039 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3040 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3041
3042 for (i = 0; i < dc->res_pool->pipe_count; i++)
3043 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3044 dc->hwss.optimize_bandwidth(dc, context);
3045 break;
3046 }
3047
3048 if (dc->hwseq->wa.DEGVIDCN10_254)
3049 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3050 }
3051
3052 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3053 {
3054 uint8_t i;
3055
3056 for (i = 0; i < context->stream_count; i++) {
3057 if (context->streams[i]->timing.timing_3d_format
3058 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3059 /*
3060 * Disable stutter
3061 */
3062 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3063 break;
3064 }
3065 }
3066 }
3067
3068 void dcn10_prepare_bandwidth(
3069 struct dc *dc,
3070 struct dc_state *context)
3071 {
3072 struct dce_hwseq *hws = dc->hwseq;
3073 struct hubbub *hubbub = dc->res_pool->hubbub;
3074 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3075
3076 if (dc->debug.sanity_checks)
3077 hws->funcs.verify_allow_pstate_change_high(dc);
3078
3079 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3080 if (context->stream_count == 0)
3081 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3082
3083 dc->clk_mgr->funcs->update_clocks(
3084 dc->clk_mgr,
3085 context,
3086 false);
3087 }
3088
3089 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3090 &context->bw_ctx.bw.dcn.watermarks,
3091 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3092 true);
3093 dcn10_stereo_hw_frame_pack_wa(dc, context);
3094
3095 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3096 DC_FP_START();
3097 dcn_get_soc_clks(
3098 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3099 DC_FP_END();
3100 dcn_bw_notify_pplib_of_wm_ranges(
3101 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3102 }
3103
3104 if (dc->debug.sanity_checks)
3105 hws->funcs.verify_allow_pstate_change_high(dc);
3106 }
3107
3108 void dcn10_optimize_bandwidth(
3109 struct dc *dc,
3110 struct dc_state *context)
3111 {
3112 struct dce_hwseq *hws = dc->hwseq;
3113 struct hubbub *hubbub = dc->res_pool->hubbub;
3114 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3115
3116 if (dc->debug.sanity_checks)
3117 hws->funcs.verify_allow_pstate_change_high(dc);
3118
3119 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3120 if (context->stream_count == 0)
3121 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3122
3123 dc->clk_mgr->funcs->update_clocks(
3124 dc->clk_mgr,
3125 context,
3126 true);
3127 }
3128
3129 hubbub->funcs->program_watermarks(hubbub,
3130 &context->bw_ctx.bw.dcn.watermarks,
3131 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3132 true);
3133
3134 dcn10_stereo_hw_frame_pack_wa(dc, context);
3135
3136 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3137 DC_FP_START();
3138 dcn_get_soc_clks(
3139 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3140 DC_FP_END();
3141 dcn_bw_notify_pplib_of_wm_ranges(
3142 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3143 }
3144
3145 if (dc->debug.sanity_checks)
3146 hws->funcs.verify_allow_pstate_change_high(dc);
3147 }
3148
3149 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3150 int num_pipes, struct dc_crtc_timing_adjust adjust)
3151 {
3152 int i = 0;
3153 struct drr_params params = {0};
3154 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3155 unsigned int event_triggers = 0x800;
3156 // Note DRR trigger events are generated regardless of whether num frames met.
3157 unsigned int num_frames = 2;
3158
3159 params.vertical_total_max = adjust.v_total_max;
3160 params.vertical_total_min = adjust.v_total_min;
3161 params.vertical_total_mid = adjust.v_total_mid;
3162 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3163 /* TODO: If multiple pipes are to be supported, you need
3164 * some GSL stuff. Static screen triggers may be programmed differently
3165 * as well.
3166 */
3167 for (i = 0; i < num_pipes; i++) {
3168 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3169 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3170 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3171 pipe_ctx[i]->stream_res.tg, &params);
3172 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3173 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3174 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3175 pipe_ctx[i]->stream_res.tg,
3176 event_triggers, num_frames);
3177 }
3178 }
3179 }
3180
3181 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3182 int num_pipes,
3183 struct crtc_position *position)
3184 {
3185 int i = 0;
3186
3187 /* TODO: handle pipes > 1
3188 */
3189 for (i = 0; i < num_pipes; i++)
3190 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3191 }
3192
3193 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3194 int num_pipes, const struct dc_static_screen_params *params)
3195 {
3196 unsigned int i;
3197 unsigned int triggers = 0;
3198
3199 if (params->triggers.surface_update)
3200 triggers |= 0x80;
3201 if (params->triggers.cursor_update)
3202 triggers |= 0x2;
3203 if (params->triggers.force_trigger)
3204 triggers |= 0x1;
3205
3206 for (i = 0; i < num_pipes; i++)
3207 pipe_ctx[i]->stream_res.tg->funcs->
3208 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3209 triggers, params->num_frames);
3210 }
3211
3212 static void dcn10_config_stereo_parameters(
3213 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3214 {
3215 enum view_3d_format view_format = stream->view_format;
3216 enum dc_timing_3d_format timing_3d_format =\
3217 stream->timing.timing_3d_format;
3218 bool non_stereo_timing = false;
3219
3220 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3221 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3222 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3223 non_stereo_timing = true;
3224
3225 if (non_stereo_timing == false &&
3226 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3227
3228 flags->PROGRAM_STEREO = 1;
3229 flags->PROGRAM_POLARITY = 1;
3230 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3231 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3232 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3233 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3234
3235 if (stream->link && stream->link->ddc) {
3236 enum display_dongle_type dongle = \
3237 stream->link->ddc->dongle_type;
3238
3239 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3240 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3241 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3242 flags->DISABLE_STEREO_DP_SYNC = 1;
3243 }
3244 }
3245 flags->RIGHT_EYE_POLARITY =\
3246 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3247 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3248 flags->FRAME_PACKED = 1;
3249 }
3250
3251 return;
3252 }
3253
3254 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3255 {
3256 struct crtc_stereo_flags flags = { 0 };
3257 struct dc_stream_state *stream = pipe_ctx->stream;
3258
3259 dcn10_config_stereo_parameters(stream, &flags);
3260
3261 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3262 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3263 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3264 } else {
3265 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3266 }
3267
3268 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3269 pipe_ctx->stream_res.opp,
3270 flags.PROGRAM_STEREO == 1,
3271 &stream->timing);
3272
3273 pipe_ctx->stream_res.tg->funcs->program_stereo(
3274 pipe_ctx->stream_res.tg,
3275 &stream->timing,
3276 &flags);
3277
3278 return;
3279 }
3280
3281 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3282 {
3283 int i;
3284
3285 for (i = 0; i < res_pool->pipe_count; i++) {
3286 if (res_pool->hubps[i]->inst == mpcc_inst)
3287 return res_pool->hubps[i];
3288 }
3289 ASSERT(false);
3290 return NULL;
3291 }
3292
3293 void dcn10_wait_for_mpcc_disconnect(
3294 struct dc *dc,
3295 struct resource_pool *res_pool,
3296 struct pipe_ctx *pipe_ctx)
3297 {
3298 struct dce_hwseq *hws = dc->hwseq;
3299 int mpcc_inst;
3300
3301 if (dc->debug.sanity_checks) {
3302 hws->funcs.verify_allow_pstate_change_high(dc);
3303 }
3304
3305 if (!pipe_ctx->stream_res.opp)
3306 return;
3307
3308 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3309 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3310 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3311
3312 if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3313 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3314 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3315 hubp->funcs->set_blank(hubp, true);
3316 }
3317 }
3318
3319 if (dc->debug.sanity_checks) {
3320 hws->funcs.verify_allow_pstate_change_high(dc);
3321 }
3322
3323 }
3324
3325 bool dcn10_dummy_display_power_gating(
3326 struct dc *dc,
3327 uint8_t controller_id,
3328 struct dc_bios *dcb,
3329 enum pipe_gating_control power_gating)
3330 {
3331 return true;
3332 }
3333
3334 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3335 {
3336 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3337 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3338 bool flip_pending;
3339 struct dc *dc = pipe_ctx->stream->ctx->dc;
3340
3341 if (plane_state == NULL)
3342 return;
3343
3344 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3345 pipe_ctx->plane_res.hubp);
3346
3347 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3348
3349 if (!flip_pending)
3350 plane_state->status.current_address = plane_state->status.requested_address;
3351
3352 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3353 tg->funcs->is_stereo_left_eye) {
3354 plane_state->status.is_right_eye =
3355 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3356 }
3357
3358 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3359 struct dce_hwseq *hwseq = dc->hwseq;
3360 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3361 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3362
3363 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3364 struct hubbub *hubbub = dc->res_pool->hubbub;
3365
3366 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3367 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3368 }
3369 }
3370 }
3371
3372 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3373 {
3374 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3375
3376 /* In DCN, this programming sequence is owned by the hubbub */
3377 hubbub->funcs->update_dchub(hubbub, dh_data);
3378 }
3379
3380 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3381 {
3382 struct pipe_ctx *test_pipe, *split_pipe;
3383 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3384 struct rect r1 = scl_data->recout, r2, r2_half;
3385 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3386 int cur_layer = pipe_ctx->plane_state->layer_index;
3387
3388 /**
3389 * Disable the cursor if there's another pipe above this with a
3390 * plane that contains this pipe's viewport to prevent double cursor
3391 * and incorrect scaling artifacts.
3392 */
3393 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3394 test_pipe = test_pipe->top_pipe) {
3395 // Skip invisible layer and pipe-split plane on same layer
3396 if (!test_pipe->plane_state ||
3397 !test_pipe->plane_state->visible ||
3398 test_pipe->plane_state->layer_index == cur_layer)
3399 continue;
3400
3401 r2 = test_pipe->plane_res.scl_data.recout;
3402 r2_r = r2.x + r2.width;
3403 r2_b = r2.y + r2.height;
3404 split_pipe = test_pipe;
3405
3406 /**
3407 * There is another half plane on same layer because of
3408 * pipe-split, merge together per same height.
3409 */
3410 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3411 split_pipe = split_pipe->top_pipe)
3412 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3413 r2_half = split_pipe->plane_res.scl_data.recout;
3414 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3415 r2.width = r2.width + r2_half.width;
3416 r2_r = r2.x + r2.width;
3417 break;
3418 }
3419
3420 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3421 return true;
3422 }
3423
3424 return false;
3425 }
3426
3427 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3428 {
3429 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3430 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3431 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3432 struct dc_cursor_mi_param param = {
3433 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3434 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3435 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3436 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3437 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3438 .rotation = pipe_ctx->plane_state->rotation,
3439 .mirror = pipe_ctx->plane_state->horizontal_mirror
3440 };
3441 bool pipe_split_on = false;
3442 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3443 (pipe_ctx->prev_odm_pipe != NULL);
3444
3445 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3446 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3447 int x_pos = pos_cpy.x;
3448 int y_pos = pos_cpy.y;
3449
3450 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3451 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3452 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3453 pipe_split_on = true;
3454 }
3455 }
3456
3457 /**
3458 * DC cursor is stream space, HW cursor is plane space and drawn
3459 * as part of the framebuffer.
3460 *
3461 * Cursor position can't be negative, but hotspot can be used to
3462 * shift cursor out of the plane bounds. Hotspot must be smaller
3463 * than the cursor size.
3464 */
3465
3466 /**
3467 * Translate cursor from stream space to plane space.
3468 *
3469 * If the cursor is scaled then we need to scale the position
3470 * to be in the approximately correct place. We can't do anything
3471 * about the actual size being incorrect, that's a limitation of
3472 * the hardware.
3473 */
3474 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3475 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3476 pipe_ctx->plane_state->dst_rect.width;
3477 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3478 pipe_ctx->plane_state->dst_rect.height;
3479 } else {
3480 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3481 pipe_ctx->plane_state->dst_rect.width;
3482 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3483 pipe_ctx->plane_state->dst_rect.height;
3484 }
3485
3486 /**
3487 * If the cursor's source viewport is clipped then we need to
3488 * translate the cursor to appear in the correct position on
3489 * the screen.
3490 *
3491 * This translation isn't affected by scaling so it needs to be
3492 * done *after* we adjust the position for the scale factor.
3493 *
3494 * This is only done by opt-in for now since there are still
3495 * some usecases like tiled display that might enable the
3496 * cursor on both streams while expecting dc to clip it.
3497 */
3498 if (pos_cpy.translate_by_source) {
3499 x_pos += pipe_ctx->plane_state->src_rect.x;
3500 y_pos += pipe_ctx->plane_state->src_rect.y;
3501 }
3502
3503 /**
3504 * If the position is negative then we need to add to the hotspot
3505 * to shift the cursor outside the plane.
3506 */
3507
3508 if (x_pos < 0) {
3509 pos_cpy.x_hotspot -= x_pos;
3510 x_pos = 0;
3511 }
3512
3513 if (y_pos < 0) {
3514 pos_cpy.y_hotspot -= y_pos;
3515 y_pos = 0;
3516 }
3517
3518 pos_cpy.x = (uint32_t)x_pos;
3519 pos_cpy.y = (uint32_t)y_pos;
3520
3521 if (pipe_ctx->plane_state->address.type
3522 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3523 pos_cpy.enable = false;
3524
3525 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3526 pos_cpy.enable = false;
3527
3528
3529 if (param.rotation == ROTATION_ANGLE_0) {
3530 int viewport_width =
3531 pipe_ctx->plane_res.scl_data.viewport.width;
3532 int viewport_x =
3533 pipe_ctx->plane_res.scl_data.viewport.x;
3534
3535 if (param.mirror) {
3536 if (pipe_split_on || odm_combine_on) {
3537 if (pos_cpy.x >= viewport_width + viewport_x) {
3538 pos_cpy.x = 2 * viewport_width
3539 - pos_cpy.x + 2 * viewport_x;
3540 } else {
3541 uint32_t temp_x = pos_cpy.x;
3542
3543 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3544 if (temp_x >= viewport_x +
3545 (int)hubp->curs_attr.width || pos_cpy.x
3546 <= (int)hubp->curs_attr.width +
3547 pipe_ctx->plane_state->src_rect.x) {
3548 pos_cpy.x = temp_x + viewport_width;
3549 }
3550 }
3551 } else {
3552 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3553 }
3554 }
3555 }
3556 // Swap axis and mirror horizontally
3557 else if (param.rotation == ROTATION_ANGLE_90) {
3558 uint32_t temp_x = pos_cpy.x;
3559
3560 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3561 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3562 pos_cpy.y = temp_x;
3563 }
3564 // Swap axis and mirror vertically
3565 else if (param.rotation == ROTATION_ANGLE_270) {
3566 uint32_t temp_y = pos_cpy.y;
3567 int viewport_height =
3568 pipe_ctx->plane_res.scl_data.viewport.height;
3569 int viewport_y =
3570 pipe_ctx->plane_res.scl_data.viewport.y;
3571
3572 /**
3573 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3574 * For pipe split cases:
3575 * - apply offset of viewport.y to normalize pos_cpy.x
3576 * - calculate the pos_cpy.y as before
3577 * - shift pos_cpy.y back by same offset to get final value
3578 * - since we iterate through both pipes, use the lower
3579 * viewport.y for offset
3580 * For non pipe split cases, use the same calculation for
3581 * pos_cpy.y as the 180 degree rotation case below,
3582 * but use pos_cpy.x as our input because we are rotating
3583 * 270 degrees
3584 */
3585 if (pipe_split_on || odm_combine_on) {
3586 int pos_cpy_x_offset;
3587 int other_pipe_viewport_y;
3588
3589 if (pipe_split_on) {
3590 if (pipe_ctx->bottom_pipe) {
3591 other_pipe_viewport_y =
3592 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3593 } else {
3594 other_pipe_viewport_y =
3595 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3596 }
3597 } else {
3598 if (pipe_ctx->next_odm_pipe) {
3599 other_pipe_viewport_y =
3600 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3601 } else {
3602 other_pipe_viewport_y =
3603 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3604 }
3605 }
3606 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3607 other_pipe_viewport_y : viewport_y;
3608 pos_cpy.x -= pos_cpy_x_offset;
3609 if (pos_cpy.x > viewport_height) {
3610 pos_cpy.x = pos_cpy.x - viewport_height;
3611 pos_cpy.y = viewport_height - pos_cpy.x;
3612 } else {
3613 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3614 }
3615 pos_cpy.y += pos_cpy_x_offset;
3616 } else {
3617 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3618 }
3619 pos_cpy.x = temp_y;
3620 }
3621 // Mirror horizontally and vertically
3622 else if (param.rotation == ROTATION_ANGLE_180) {
3623 int viewport_width =
3624 pipe_ctx->plane_res.scl_data.viewport.width;
3625 int viewport_x =
3626 pipe_ctx->plane_res.scl_data.viewport.x;
3627
3628 if (!param.mirror) {
3629 if (pipe_split_on || odm_combine_on) {
3630 if (pos_cpy.x >= viewport_width + viewport_x) {
3631 pos_cpy.x = 2 * viewport_width
3632 - pos_cpy.x + 2 * viewport_x;
3633 } else {
3634 uint32_t temp_x = pos_cpy.x;
3635
3636 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3637 if (temp_x >= viewport_x +
3638 (int)hubp->curs_attr.width || pos_cpy.x
3639 <= (int)hubp->curs_attr.width +
3640 pipe_ctx->plane_state->src_rect.x) {
3641 pos_cpy.x = 2 * viewport_width - temp_x;
3642 }
3643 }
3644 } else {
3645 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3646 }
3647 }
3648
3649 /**
3650 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3651 * Calculation:
3652 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3653 * pos_cpy.y_new = viewport.y + delta_from_bottom
3654 * Simplify it as:
3655 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3656 */
3657 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3658 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3659 }
3660
3661 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3662 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3663 }
3664
3665 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3666 {
3667 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3668
3669 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3670 pipe_ctx->plane_res.hubp, attributes);
3671 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3672 pipe_ctx->plane_res.dpp, attributes);
3673 }
3674
3675 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3676 {
3677 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3678 struct fixed31_32 multiplier;
3679 struct dpp_cursor_attributes opt_attr = { 0 };
3680 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3681 struct custom_float_format fmt;
3682
3683 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3684 return;
3685
3686 fmt.exponenta_bits = 5;
3687 fmt.mantissa_bits = 10;
3688 fmt.sign = true;
3689
3690 if (sdr_white_level > 80) {
3691 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3692 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3693 }
3694
3695 opt_attr.scale = hw_scale;
3696 opt_attr.bias = 0;
3697
3698 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3699 pipe_ctx->plane_res.dpp, &opt_attr);
3700 }
3701
3702 /*
3703 * apply_front_porch_workaround TODO FPGA still need?
3704 *
3705 * This is a workaround for a bug that has existed since R5xx and has not been
3706 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3707 */
3708 static void apply_front_porch_workaround(
3709 struct dc_crtc_timing *timing)
3710 {
3711 if (timing->flags.INTERLACE == 1) {
3712 if (timing->v_front_porch < 2)
3713 timing->v_front_porch = 2;
3714 } else {
3715 if (timing->v_front_porch < 1)
3716 timing->v_front_porch = 1;
3717 }
3718 }
3719
3720 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3721 {
3722 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3723 struct dc_crtc_timing patched_crtc_timing;
3724 int vesa_sync_start;
3725 int asic_blank_end;
3726 int interlace_factor;
3727
3728 patched_crtc_timing = *dc_crtc_timing;
3729 apply_front_porch_workaround(&patched_crtc_timing);
3730
3731 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3732
3733 vesa_sync_start = patched_crtc_timing.v_addressable +
3734 patched_crtc_timing.v_border_bottom +
3735 patched_crtc_timing.v_front_porch;
3736
3737 asic_blank_end = (patched_crtc_timing.v_total -
3738 vesa_sync_start -
3739 patched_crtc_timing.v_border_top)
3740 * interlace_factor;
3741
3742 return asic_blank_end -
3743 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3744 }
3745
3746 void dcn10_calc_vupdate_position(
3747 struct dc *dc,
3748 struct pipe_ctx *pipe_ctx,
3749 uint32_t *start_line,
3750 uint32_t *end_line)
3751 {
3752 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3753 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3754
3755 if (vupdate_pos >= 0)
3756 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3757 else
3758 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3759 *end_line = (*start_line + 2) % timing->v_total;
3760 }
3761
3762 static void dcn10_cal_vline_position(
3763 struct dc *dc,
3764 struct pipe_ctx *pipe_ctx,
3765 uint32_t *start_line,
3766 uint32_t *end_line)
3767 {
3768 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3769 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3770
3771 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3772 if (vline_pos > 0)
3773 vline_pos--;
3774 else if (vline_pos < 0)
3775 vline_pos++;
3776
3777 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3778 if (vline_pos >= 0)
3779 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3780 else
3781 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3782 *end_line = (*start_line + 2) % timing->v_total;
3783 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3784 // vsync is line 0 so start_line is just the requested line offset
3785 *start_line = vline_pos;
3786 *end_line = (*start_line + 2) % timing->v_total;
3787 } else
3788 ASSERT(0);
3789 }
3790
3791 void dcn10_setup_periodic_interrupt(
3792 struct dc *dc,
3793 struct pipe_ctx *pipe_ctx)
3794 {
3795 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3796 uint32_t start_line = 0;
3797 uint32_t end_line = 0;
3798
3799 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3800
3801 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3802 }
3803
3804 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3805 {
3806 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3807 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3808
3809 if (start_line < 0) {
3810 ASSERT(0);
3811 start_line = 0;
3812 }
3813
3814 if (tg->funcs->setup_vertical_interrupt2)
3815 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3816 }
3817
3818 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3819 struct dc_link_settings *link_settings)
3820 {
3821 struct encoder_unblank_param params = {0};
3822 struct dc_stream_state *stream = pipe_ctx->stream;
3823 struct dc_link *link = stream->link;
3824 struct dce_hwseq *hws = link->dc->hwseq;
3825
3826 /* only 3 items below are used by unblank */
3827 params.timing = pipe_ctx->stream->timing;
3828
3829 params.link_settings.link_rate = link_settings->link_rate;
3830
3831 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3832 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3833 params.timing.pix_clk_100hz /= 2;
3834 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3835 }
3836
3837 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3838 hws->funcs.edp_backlight_control(link, true);
3839 }
3840 }
3841
3842 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3843 const uint8_t *custom_sdp_message,
3844 unsigned int sdp_message_size)
3845 {
3846 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3847 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3848 pipe_ctx->stream_res.stream_enc,
3849 custom_sdp_message,
3850 sdp_message_size);
3851 }
3852 }
3853 enum dc_status dcn10_set_clock(struct dc *dc,
3854 enum dc_clock_type clock_type,
3855 uint32_t clk_khz,
3856 uint32_t stepping)
3857 {
3858 struct dc_state *context = dc->current_state;
3859 struct dc_clock_config clock_cfg = {0};
3860 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3861
3862 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3863 return DC_FAIL_UNSUPPORTED_1;
3864
3865 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3866 context, clock_type, &clock_cfg);
3867
3868 if (clk_khz > clock_cfg.max_clock_khz)
3869 return DC_FAIL_CLK_EXCEED_MAX;
3870
3871 if (clk_khz < clock_cfg.min_clock_khz)
3872 return DC_FAIL_CLK_BELOW_MIN;
3873
3874 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3875 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3876
3877 /*update internal request clock for update clock use*/
3878 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3879 current_clocks->dispclk_khz = clk_khz;
3880 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3881 current_clocks->dppclk_khz = clk_khz;
3882 else
3883 return DC_ERROR_UNEXPECTED;
3884
3885 if (dc->clk_mgr->funcs->update_clocks)
3886 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3887 context, true);
3888 return DC_OK;
3889
3890 }
3891
3892 void dcn10_get_clock(struct dc *dc,
3893 enum dc_clock_type clock_type,
3894 struct dc_clock_config *clock_cfg)
3895 {
3896 struct dc_state *context = dc->current_state;
3897
3898 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3899 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3900
3901 }
3902
3903 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3904 {
3905 struct resource_pool *pool = dc->res_pool;
3906 int i;
3907
3908 for (i = 0; i < pool->pipe_count; i++) {
3909 struct hubp *hubp = pool->hubps[i];
3910 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3911
3912 hubp->funcs->hubp_read_state(hubp);
3913
3914 if (!s->blank_en)
3915 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3916 }
3917 }