]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
MAINTAINERS: Fix Hyperv vIOMMU driver file name
[thirdparty/linux.git] / drivers / gpu / drm / amd / display / dc / dcn10 / dcn10_hw_sequencer.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "core_types.h"
29 #include "resource.h"
30 #include "custom_float.h"
31 #include "dcn10_hw_sequencer.h"
32 #include "dce110/dce110_hw_sequencer.h"
33 #include "dce/dce_hwseq.h"
34 #include "abm.h"
35 #include "dmcu.h"
36 #include "dcn10_optc.h"
37 #include "dcn10/dcn10_dpp.h"
38 #include "dcn10/dcn10_mpc.h"
39 #include "timing_generator.h"
40 #include "opp.h"
41 #include "ipp.h"
42 #include "mpc.h"
43 #include "reg_helper.h"
44 #include "dcn10_hubp.h"
45 #include "dcn10_hubbub.h"
46 #include "dcn10_cm_common.h"
47 #include "dc_link_dp.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50
51
52 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
53 #include "dsc.h"
54 #endif
55
56 #define DC_LOGGER_INIT(logger)
57
58 #define CTX \
59 hws->ctx
60 #define REG(reg)\
61 hws->regs->reg
62
63 #undef FN
64 #define FN(reg_name, field_name) \
65 hws->shifts->field_name, hws->masks->field_name
66
67 /*print is 17 wide, first two characters are spaces*/
68 #define DTN_INFO_MICRO_SEC(ref_cycle) \
69 print_microsec(dc_ctx, log_ctx, ref_cycle)
70
71 void print_microsec(struct dc_context *dc_ctx,
72 struct dc_log_buffer_ctx *log_ctx,
73 uint32_t ref_cycle)
74 {
75 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
76 static const unsigned int frac = 1000;
77 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
78
79 DTN_INFO(" %11d.%03d",
80 us_x10 / frac,
81 us_x10 % frac);
82 }
83
84 static void log_mpc_crc(struct dc *dc,
85 struct dc_log_buffer_ctx *log_ctx)
86 {
87 struct dc_context *dc_ctx = dc->ctx;
88 struct dce_hwseq *hws = dc->hwseq;
89
90 if (REG(MPC_CRC_RESULT_GB))
91 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
92 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
93 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
94 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
95 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
96 }
97
98 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
99 {
100 struct dc_context *dc_ctx = dc->ctx;
101 struct dcn_hubbub_wm wm;
102 int i;
103
104 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
105 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
106
107 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
108 " sr_enter sr_exit dram_clk_change\n");
109
110 for (i = 0; i < 4; i++) {
111 struct dcn_hubbub_wm_set *s;
112
113 s = &wm.sets[i];
114 DTN_INFO("WM_Set[%d]:", s->wm_set);
115 DTN_INFO_MICRO_SEC(s->data_urgent);
116 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
117 DTN_INFO_MICRO_SEC(s->sr_enter);
118 DTN_INFO_MICRO_SEC(s->sr_exit);
119 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
120 DTN_INFO("\n");
121 }
122
123 DTN_INFO("\n");
124 }
125
126 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
127 {
128 struct dc_context *dc_ctx = dc->ctx;
129 struct resource_pool *pool = dc->res_pool;
130 int i;
131
132 DTN_INFO("HUBP: format addr_hi width height"
133 " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
134 " min_ttu_vblank qos_low_wm qos_high_wm\n");
135 for (i = 0; i < pool->pipe_count; i++) {
136 struct hubp *hubp = pool->hubps[i];
137 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
138
139 hubp->funcs->hubp_read_state(hubp);
140
141 if (!s->blank_en) {
142 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
143 " %6d %8d %7d %8xh",
144 hubp->inst,
145 s->pixel_format,
146 s->inuse_addr_hi,
147 s->viewport_width,
148 s->viewport_height,
149 s->rotation_angle,
150 s->h_mirror_en,
151 s->sw_mode,
152 s->dcc_en,
153 s->blank_en,
154 s->ttu_disable,
155 s->underflow_status);
156 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
157 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
158 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
159 DTN_INFO("\n");
160 }
161 }
162
163 DTN_INFO("\n=========RQ========\n");
164 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
165 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
166 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
167 for (i = 0; i < pool->pipe_count; i++) {
168 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
169 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
170
171 if (!s->blank_en)
172 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
173 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
174 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
175 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
176 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
177 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
178 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
179 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
180 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
181 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
182 }
183
184 DTN_INFO("========DLG========\n");
185 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
186 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
187 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
188 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
189 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
190 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
191 " x_rp_dlay x_rr_sfl\n");
192 for (i = 0; i < pool->pipe_count; i++) {
193 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
194 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
195
196 if (!s->blank_en)
197 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
198 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
199 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
200 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
201 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
202 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
203 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
204 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
205 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
206 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
207 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
208 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
209 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
210 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
211 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
212 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
213 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
214 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
215 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
216 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
217 dlg_regs->xfc_reg_remote_surface_flip_latency);
218 }
219
220 DTN_INFO("========TTU========\n");
221 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
222 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
223 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
224 for (i = 0; i < pool->pipe_count; i++) {
225 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
226 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
227
228 if (!s->blank_en)
229 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
230 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
231 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
232 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
233 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
234 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
235 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
236 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
237 }
238 DTN_INFO("\n");
239 }
240
241 void dcn10_log_hw_state(struct dc *dc,
242 struct dc_log_buffer_ctx *log_ctx)
243 {
244 struct dc_context *dc_ctx = dc->ctx;
245 struct resource_pool *pool = dc->res_pool;
246 int i;
247
248 DTN_INFO_BEGIN();
249
250 dcn10_log_hubbub_state(dc, log_ctx);
251
252 dcn10_log_hubp_states(dc, log_ctx);
253
254 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
255 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
256 "C31 C32 C33 C34\n");
257 for (i = 0; i < pool->pipe_count; i++) {
258 struct dpp *dpp = pool->dpps[i];
259 struct dcn_dpp_state s = {0};
260
261 dpp->funcs->dpp_read_state(dpp, &s);
262
263 if (!s.is_enabled)
264 continue;
265
266 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
267 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
268 dpp->inst,
269 s.igam_input_format,
270 (s.igam_lut_mode == 0) ? "BypassFixed" :
271 ((s.igam_lut_mode == 1) ? "BypassFloat" :
272 ((s.igam_lut_mode == 2) ? "RAM" :
273 ((s.igam_lut_mode == 3) ? "RAM" :
274 "Unknown"))),
275 (s.dgam_lut_mode == 0) ? "Bypass" :
276 ((s.dgam_lut_mode == 1) ? "sRGB" :
277 ((s.dgam_lut_mode == 2) ? "Ycc" :
278 ((s.dgam_lut_mode == 3) ? "RAM" :
279 ((s.dgam_lut_mode == 4) ? "RAM" :
280 "Unknown")))),
281 (s.rgam_lut_mode == 0) ? "Bypass" :
282 ((s.rgam_lut_mode == 1) ? "sRGB" :
283 ((s.rgam_lut_mode == 2) ? "Ycc" :
284 ((s.rgam_lut_mode == 3) ? "RAM" :
285 ((s.rgam_lut_mode == 4) ? "RAM" :
286 "Unknown")))),
287 s.gamut_remap_mode,
288 s.gamut_remap_c11_c12,
289 s.gamut_remap_c13_c14,
290 s.gamut_remap_c21_c22,
291 s.gamut_remap_c23_c24,
292 s.gamut_remap_c31_c32,
293 s.gamut_remap_c33_c34);
294 DTN_INFO("\n");
295 }
296 DTN_INFO("\n");
297
298 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
299 for (i = 0; i < pool->pipe_count; i++) {
300 struct mpcc_state s = {0};
301
302 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
303 if (s.opp_id != 0xf)
304 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
305 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
306 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
307 s.idle);
308 }
309 DTN_INFO("\n");
310
311 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel"
312 " h_bs h_be h_ss h_se hpol htot vtot underflow\n");
313
314 for (i = 0; i < pool->timing_generator_count; i++) {
315 struct timing_generator *tg = pool->timing_generators[i];
316 struct dcn_otg_state s = {0};
317
318 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
319
320 //only print if OTG master is enabled
321 if ((s.otg_enabled & 1) == 0)
322 continue;
323
324 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
325 " %5d %5d %5d %5d %9d\n",
326 tg->inst,
327 s.v_blank_start,
328 s.v_blank_end,
329 s.v_sync_a_start,
330 s.v_sync_a_end,
331 s.v_sync_a_pol,
332 s.v_total_max,
333 s.v_total_min,
334 s.v_total_max_sel,
335 s.v_total_min_sel,
336 s.h_blank_start,
337 s.h_blank_end,
338 s.h_sync_a_start,
339 s.h_sync_a_end,
340 s.h_sync_a_pol,
341 s.h_total,
342 s.v_total,
343 s.underflow_occurred_status);
344
345 // Clear underflow for debug purposes
346 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
347 // This function is called only from Windows or Diags test environment, hence it's safe to clear
348 // it from here without affecting the original intent.
349 tg->funcs->clear_optc_underflow(tg);
350 }
351 DTN_INFO("\n");
352
353 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
354 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
355 for (i = 0; i < pool->res_cap->num_dsc; i++) {
356 struct display_stream_compressor *dsc = pool->dscs[i];
357 struct dcn_dsc_state s = {0};
358
359 dsc->funcs->dsc_read_state(dsc, &s);
360 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
361 dsc->inst,
362 s.dsc_clock_en,
363 s.dsc_slice_width,
364 s.dsc_bytes_per_pixel);
365 DTN_INFO("\n");
366 }
367 DTN_INFO("\n");
368
369 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
370 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
371 for (i = 0; i < pool->stream_enc_count; i++) {
372 struct stream_encoder *enc = pool->stream_enc[i];
373 struct enc_state s = {0};
374
375 if (enc->funcs->enc_read_state) {
376 enc->funcs->enc_read_state(enc, &s);
377 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
378 enc->id,
379 s.dsc_mode,
380 s.sec_gsp_pps_line_num,
381 s.vbid6_line_reference,
382 s.vbid6_line_num,
383 s.sec_gsp_pps_enable,
384 s.sec_stream_enable);
385 DTN_INFO("\n");
386 }
387 }
388 DTN_INFO("\n");
389
390 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n");
391 for (i = 0; i < dc->link_count; i++) {
392 struct link_encoder *lenc = dc->links[i]->link_enc;
393
394 struct link_enc_state s = {0};
395
396 if (lenc->funcs->read_state) {
397 lenc->funcs->read_state(lenc, &s);
398 DTN_INFO("[%-3d]: %-12d %-22d %-22d\n",
399 i,
400 s.dphy_fec_en,
401 s.dphy_fec_ready_shadow,
402 s.dphy_fec_active_status);
403 DTN_INFO("\n");
404 }
405 }
406 DTN_INFO("\n");
407 #endif
408
409 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
410 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
411 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
412 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
413 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
414 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
415 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
416 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
417 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
418
419 log_mpc_crc(dc, log_ctx);
420
421 DTN_INFO_END();
422 }
423
424 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
425 {
426 struct hubp *hubp = pipe_ctx->plane_res.hubp;
427 struct timing_generator *tg = pipe_ctx->stream_res.tg;
428
429 if (tg->funcs->is_optc_underflow_occurred(tg)) {
430 tg->funcs->clear_optc_underflow(tg);
431 return true;
432 }
433
434 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
435 hubp->funcs->hubp_clear_underflow(hubp);
436 return true;
437 }
438 return false;
439 }
440
441 static void enable_power_gating_plane(
442 struct dce_hwseq *hws,
443 bool enable)
444 {
445 bool force_on = 1; /* disable power gating */
446
447 if (enable)
448 force_on = 0;
449
450 /* DCHUBP0/1/2/3 */
451 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
452 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
453 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
454 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
455
456 /* DPP0/1/2/3 */
457 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
458 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
459 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
460 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
461 }
462
463 static void disable_vga(
464 struct dce_hwseq *hws)
465 {
466 unsigned int in_vga1_mode = 0;
467 unsigned int in_vga2_mode = 0;
468 unsigned int in_vga3_mode = 0;
469 unsigned int in_vga4_mode = 0;
470
471 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
472 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
473 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
474 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
475
476 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
477 in_vga3_mode == 0 && in_vga4_mode == 0)
478 return;
479
480 REG_WRITE(D1VGA_CONTROL, 0);
481 REG_WRITE(D2VGA_CONTROL, 0);
482 REG_WRITE(D3VGA_CONTROL, 0);
483 REG_WRITE(D4VGA_CONTROL, 0);
484
485 /* HW Engineer's Notes:
486 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
487 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
488 *
489 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
490 * VGA_TEST_ENABLE, to leave it in the same state as before.
491 */
492 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
493 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
494 }
495
496 static void dpp_pg_control(
497 struct dce_hwseq *hws,
498 unsigned int dpp_inst,
499 bool power_on)
500 {
501 uint32_t power_gate = power_on ? 0 : 1;
502 uint32_t pwr_status = power_on ? 0 : 2;
503
504 if (hws->ctx->dc->debug.disable_dpp_power_gate)
505 return;
506 if (REG(DOMAIN1_PG_CONFIG) == 0)
507 return;
508
509 switch (dpp_inst) {
510 case 0: /* DPP0 */
511 REG_UPDATE(DOMAIN1_PG_CONFIG,
512 DOMAIN1_POWER_GATE, power_gate);
513
514 REG_WAIT(DOMAIN1_PG_STATUS,
515 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
516 1, 1000);
517 break;
518 case 1: /* DPP1 */
519 REG_UPDATE(DOMAIN3_PG_CONFIG,
520 DOMAIN3_POWER_GATE, power_gate);
521
522 REG_WAIT(DOMAIN3_PG_STATUS,
523 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
524 1, 1000);
525 break;
526 case 2: /* DPP2 */
527 REG_UPDATE(DOMAIN5_PG_CONFIG,
528 DOMAIN5_POWER_GATE, power_gate);
529
530 REG_WAIT(DOMAIN5_PG_STATUS,
531 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
532 1, 1000);
533 break;
534 case 3: /* DPP3 */
535 REG_UPDATE(DOMAIN7_PG_CONFIG,
536 DOMAIN7_POWER_GATE, power_gate);
537
538 REG_WAIT(DOMAIN7_PG_STATUS,
539 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
540 1, 1000);
541 break;
542 default:
543 BREAK_TO_DEBUGGER();
544 break;
545 }
546 }
547
548 static void hubp_pg_control(
549 struct dce_hwseq *hws,
550 unsigned int hubp_inst,
551 bool power_on)
552 {
553 uint32_t power_gate = power_on ? 0 : 1;
554 uint32_t pwr_status = power_on ? 0 : 2;
555
556 if (hws->ctx->dc->debug.disable_hubp_power_gate)
557 return;
558 if (REG(DOMAIN0_PG_CONFIG) == 0)
559 return;
560
561 switch (hubp_inst) {
562 case 0: /* DCHUBP0 */
563 REG_UPDATE(DOMAIN0_PG_CONFIG,
564 DOMAIN0_POWER_GATE, power_gate);
565
566 REG_WAIT(DOMAIN0_PG_STATUS,
567 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
568 1, 1000);
569 break;
570 case 1: /* DCHUBP1 */
571 REG_UPDATE(DOMAIN2_PG_CONFIG,
572 DOMAIN2_POWER_GATE, power_gate);
573
574 REG_WAIT(DOMAIN2_PG_STATUS,
575 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
576 1, 1000);
577 break;
578 case 2: /* DCHUBP2 */
579 REG_UPDATE(DOMAIN4_PG_CONFIG,
580 DOMAIN4_POWER_GATE, power_gate);
581
582 REG_WAIT(DOMAIN4_PG_STATUS,
583 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
584 1, 1000);
585 break;
586 case 3: /* DCHUBP3 */
587 REG_UPDATE(DOMAIN6_PG_CONFIG,
588 DOMAIN6_POWER_GATE, power_gate);
589
590 REG_WAIT(DOMAIN6_PG_STATUS,
591 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
592 1, 1000);
593 break;
594 default:
595 BREAK_TO_DEBUGGER();
596 break;
597 }
598 }
599
600 static void power_on_plane(
601 struct dce_hwseq *hws,
602 int plane_id)
603 {
604 DC_LOGGER_INIT(hws->ctx->logger);
605 if (REG(DC_IP_REQUEST_CNTL)) {
606 REG_SET(DC_IP_REQUEST_CNTL, 0,
607 IP_REQUEST_EN, 1);
608 dpp_pg_control(hws, plane_id, true);
609 hubp_pg_control(hws, plane_id, true);
610 REG_SET(DC_IP_REQUEST_CNTL, 0,
611 IP_REQUEST_EN, 0);
612 DC_LOG_DEBUG(
613 "Un-gated front end for pipe %d\n", plane_id);
614 }
615 }
616
617 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
618 {
619 struct dce_hwseq *hws = dc->hwseq;
620 struct hubp *hubp = dc->res_pool->hubps[0];
621
622 if (!hws->wa_state.DEGVIDCN10_253_applied)
623 return;
624
625 hubp->funcs->set_blank(hubp, true);
626
627 REG_SET(DC_IP_REQUEST_CNTL, 0,
628 IP_REQUEST_EN, 1);
629
630 hubp_pg_control(hws, 0, false);
631 REG_SET(DC_IP_REQUEST_CNTL, 0,
632 IP_REQUEST_EN, 0);
633
634 hws->wa_state.DEGVIDCN10_253_applied = false;
635 }
636
637 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
638 {
639 struct dce_hwseq *hws = dc->hwseq;
640 struct hubp *hubp = dc->res_pool->hubps[0];
641 int i;
642
643 if (dc->debug.disable_stutter)
644 return;
645
646 if (!hws->wa.DEGVIDCN10_253)
647 return;
648
649 for (i = 0; i < dc->res_pool->pipe_count; i++) {
650 if (!dc->res_pool->hubps[i]->power_gated)
651 return;
652 }
653
654 /* all pipe power gated, apply work around to enable stutter. */
655
656 REG_SET(DC_IP_REQUEST_CNTL, 0,
657 IP_REQUEST_EN, 1);
658
659 hubp_pg_control(hws, 0, true);
660 REG_SET(DC_IP_REQUEST_CNTL, 0,
661 IP_REQUEST_EN, 0);
662
663 hubp->funcs->set_hubp_blank_en(hubp, false);
664 hws->wa_state.DEGVIDCN10_253_applied = true;
665 }
666
667 static void bios_golden_init(struct dc *dc)
668 {
669 struct dc_bios *bp = dc->ctx->dc_bios;
670 int i;
671
672 /* initialize dcn global */
673 bp->funcs->enable_disp_power_gating(bp,
674 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
675
676 for (i = 0; i < dc->res_pool->pipe_count; i++) {
677 /* initialize dcn per pipe */
678 bp->funcs->enable_disp_power_gating(bp,
679 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
680 }
681 }
682
683 static void false_optc_underflow_wa(
684 struct dc *dc,
685 const struct dc_stream_state *stream,
686 struct timing_generator *tg)
687 {
688 int i;
689 bool underflow;
690
691 if (!dc->hwseq->wa.false_optc_underflow)
692 return;
693
694 underflow = tg->funcs->is_optc_underflow_occurred(tg);
695
696 for (i = 0; i < dc->res_pool->pipe_count; i++) {
697 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
698
699 if (old_pipe_ctx->stream != stream)
700 continue;
701
702 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
703 }
704
705 tg->funcs->set_blank_data_double_buffer(tg, true);
706
707 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
708 tg->funcs->clear_optc_underflow(tg);
709 }
710
711 static enum dc_status dcn10_enable_stream_timing(
712 struct pipe_ctx *pipe_ctx,
713 struct dc_state *context,
714 struct dc *dc)
715 {
716 struct dc_stream_state *stream = pipe_ctx->stream;
717 enum dc_color_space color_space;
718 struct tg_color black_color = {0};
719
720 /* by upper caller loop, pipe0 is parent pipe and be called first.
721 * back end is set up by for pipe0. Other children pipe share back end
722 * with pipe 0. No program is needed.
723 */
724 if (pipe_ctx->top_pipe != NULL)
725 return DC_OK;
726
727 /* TODO check if timing_changed, disable stream if timing changed */
728
729 /* HW program guide assume display already disable
730 * by unplug sequence. OTG assume stop.
731 */
732 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
733
734 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
735 pipe_ctx->clock_source,
736 &pipe_ctx->stream_res.pix_clk_params,
737 &pipe_ctx->pll_settings)) {
738 BREAK_TO_DEBUGGER();
739 return DC_ERROR_UNEXPECTED;
740 }
741
742 pipe_ctx->stream_res.tg->funcs->program_timing(
743 pipe_ctx->stream_res.tg,
744 &stream->timing,
745 pipe_ctx->pipe_dlg_param.vready_offset,
746 pipe_ctx->pipe_dlg_param.vstartup_start,
747 pipe_ctx->pipe_dlg_param.vupdate_offset,
748 pipe_ctx->pipe_dlg_param.vupdate_width,
749 pipe_ctx->stream->signal,
750 true);
751
752 #if 0 /* move to after enable_crtc */
753 /* TODO: OPP FMT, ABM. etc. should be done here. */
754 /* or FPGA now. instance 0 only. TODO: move to opp.c */
755
756 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
757
758 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
759 pipe_ctx->stream_res.opp,
760 &stream->bit_depth_params,
761 &stream->clamping);
762 #endif
763 /* program otg blank color */
764 color_space = stream->output_color_space;
765 color_space_to_black_color(dc, color_space, &black_color);
766
767 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
768 pipe_ctx->stream_res.tg->funcs->set_blank_color(
769 pipe_ctx->stream_res.tg,
770 &black_color);
771
772 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
773 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
774 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
775 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
776 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
777 }
778
779 /* VTG is within DCHUB command block. DCFCLK is always on */
780 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
781 BREAK_TO_DEBUGGER();
782 return DC_ERROR_UNEXPECTED;
783 }
784
785 /* TODO program crtc source select for non-virtual signal*/
786 /* TODO program FMT */
787 /* TODO setup link_enc */
788 /* TODO set stream attributes */
789 /* TODO program audio */
790 /* TODO enable stream if timing changed */
791 /* TODO unblank stream if DP */
792
793 return DC_OK;
794 }
795
796 static void dcn10_reset_back_end_for_pipe(
797 struct dc *dc,
798 struct pipe_ctx *pipe_ctx,
799 struct dc_state *context)
800 {
801 int i;
802 DC_LOGGER_INIT(dc->ctx->logger);
803 if (pipe_ctx->stream_res.stream_enc == NULL) {
804 pipe_ctx->stream = NULL;
805 return;
806 }
807
808 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
809 /* DPMS may already disable */
810 if (!pipe_ctx->stream->dpms_off)
811 core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
812 else if (pipe_ctx->stream_res.audio) {
813 dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
814 }
815
816 }
817
818 /* by upper caller loop, parent pipe: pipe0, will be reset last.
819 * back end share by all pipes and will be disable only when disable
820 * parent pipe.
821 */
822 if (pipe_ctx->top_pipe == NULL) {
823 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
824
825 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
826 }
827
828 for (i = 0; i < dc->res_pool->pipe_count; i++)
829 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
830 break;
831
832 if (i == dc->res_pool->pipe_count)
833 return;
834
835 pipe_ctx->stream = NULL;
836 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
837 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
838 }
839
840 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
841 {
842 struct hubp *hubp ;
843 unsigned int i;
844 bool need_recover = true;
845
846 if (!dc->debug.recovery_enabled)
847 return false;
848
849 for (i = 0; i < dc->res_pool->pipe_count; i++) {
850 struct pipe_ctx *pipe_ctx =
851 &dc->current_state->res_ctx.pipe_ctx[i];
852 if (pipe_ctx != NULL) {
853 hubp = pipe_ctx->plane_res.hubp;
854 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
855 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
856 /* one pipe underflow, we will reset all the pipes*/
857 need_recover = true;
858 }
859 }
860 }
861 }
862 if (!need_recover)
863 return false;
864 /*
865 DCHUBP_CNTL:HUBP_BLANK_EN=1
866 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
867 DCHUBP_CNTL:HUBP_DISABLE=1
868 DCHUBP_CNTL:HUBP_DISABLE=0
869 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
870 DCSURF_PRIMARY_SURFACE_ADDRESS
871 DCHUBP_CNTL:HUBP_BLANK_EN=0
872 */
873
874 for (i = 0; i < dc->res_pool->pipe_count; i++) {
875 struct pipe_ctx *pipe_ctx =
876 &dc->current_state->res_ctx.pipe_ctx[i];
877 if (pipe_ctx != NULL) {
878 hubp = pipe_ctx->plane_res.hubp;
879 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
880 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
881 hubp->funcs->set_hubp_blank_en(hubp, true);
882 }
883 }
884 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
885 hubbub1_soft_reset(dc->res_pool->hubbub, true);
886
887 for (i = 0; i < dc->res_pool->pipe_count; i++) {
888 struct pipe_ctx *pipe_ctx =
889 &dc->current_state->res_ctx.pipe_ctx[i];
890 if (pipe_ctx != NULL) {
891 hubp = pipe_ctx->plane_res.hubp;
892 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
893 if (hubp != NULL && hubp->funcs->hubp_disable_control)
894 hubp->funcs->hubp_disable_control(hubp, true);
895 }
896 }
897 for (i = 0; i < dc->res_pool->pipe_count; i++) {
898 struct pipe_ctx *pipe_ctx =
899 &dc->current_state->res_ctx.pipe_ctx[i];
900 if (pipe_ctx != NULL) {
901 hubp = pipe_ctx->plane_res.hubp;
902 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
903 if (hubp != NULL && hubp->funcs->hubp_disable_control)
904 hubp->funcs->hubp_disable_control(hubp, true);
905 }
906 }
907 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
908 hubbub1_soft_reset(dc->res_pool->hubbub, false);
909 for (i = 0; i < dc->res_pool->pipe_count; i++) {
910 struct pipe_ctx *pipe_ctx =
911 &dc->current_state->res_ctx.pipe_ctx[i];
912 if (pipe_ctx != NULL) {
913 hubp = pipe_ctx->plane_res.hubp;
914 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
915 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
916 hubp->funcs->set_hubp_blank_en(hubp, true);
917 }
918 }
919 return true;
920
921 }
922
923
924 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
925 {
926 static bool should_log_hw_state; /* prevent hw state log by default */
927
928 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
929 if (should_log_hw_state) {
930 dcn10_log_hw_state(dc, NULL);
931 }
932 BREAK_TO_DEBUGGER();
933 if (dcn10_hw_wa_force_recovery(dc)) {
934 /*check again*/
935 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
936 BREAK_TO_DEBUGGER();
937 }
938 }
939 }
940
941 /* trigger HW to start disconnect plane from stream on the next vsync */
942 void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
943 {
944 struct hubp *hubp = pipe_ctx->plane_res.hubp;
945 int dpp_id = pipe_ctx->plane_res.dpp->inst;
946 struct mpc *mpc = dc->res_pool->mpc;
947 struct mpc_tree *mpc_tree_params;
948 struct mpcc *mpcc_to_remove = NULL;
949 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
950
951 mpc_tree_params = &(opp->mpc_tree_params);
952 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
953
954 /*Already reset*/
955 if (mpcc_to_remove == NULL)
956 return;
957
958 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
959 if (opp != NULL)
960 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
961
962 dc->optimized_required = true;
963
964 if (hubp->funcs->hubp_disconnect)
965 hubp->funcs->hubp_disconnect(hubp);
966
967 if (dc->debug.sanity_checks)
968 dcn10_verify_allow_pstate_change_high(dc);
969 }
970
971 static void plane_atomic_power_down(struct dc *dc,
972 struct dpp *dpp,
973 struct hubp *hubp)
974 {
975 struct dce_hwseq *hws = dc->hwseq;
976 DC_LOGGER_INIT(dc->ctx->logger);
977
978 if (REG(DC_IP_REQUEST_CNTL)) {
979 REG_SET(DC_IP_REQUEST_CNTL, 0,
980 IP_REQUEST_EN, 1);
981 dpp_pg_control(hws, dpp->inst, false);
982 hubp_pg_control(hws, hubp->inst, false);
983 dpp->funcs->dpp_reset(dpp);
984 REG_SET(DC_IP_REQUEST_CNTL, 0,
985 IP_REQUEST_EN, 0);
986 DC_LOG_DEBUG(
987 "Power gated front end %d\n", hubp->inst);
988 }
989 }
990
991 /* disable HW used by plane.
992 * note: cannot disable until disconnect is complete
993 */
994 static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
995 {
996 struct hubp *hubp = pipe_ctx->plane_res.hubp;
997 struct dpp *dpp = pipe_ctx->plane_res.dpp;
998 int opp_id = hubp->opp_id;
999
1000 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1001
1002 hubp->funcs->hubp_clk_cntl(hubp, false);
1003
1004 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1005
1006 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1007 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1008 pipe_ctx->stream_res.opp,
1009 false);
1010
1011 hubp->power_gated = true;
1012 dc->optimized_required = false; /* We're powering off, no need to optimize */
1013
1014 plane_atomic_power_down(dc,
1015 pipe_ctx->plane_res.dpp,
1016 pipe_ctx->plane_res.hubp);
1017
1018 pipe_ctx->stream = NULL;
1019 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1020 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1021 pipe_ctx->top_pipe = NULL;
1022 pipe_ctx->bottom_pipe = NULL;
1023 pipe_ctx->plane_state = NULL;
1024 }
1025
1026 static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1027 {
1028 DC_LOGGER_INIT(dc->ctx->logger);
1029
1030 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1031 return;
1032
1033 plane_atomic_disable(dc, pipe_ctx);
1034
1035 apply_DEGVIDCN10_253_wa(dc);
1036
1037 DC_LOG_DC("Power down front end %d\n",
1038 pipe_ctx->pipe_idx);
1039 }
1040
1041 static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1042 {
1043 int i;
1044 bool can_apply_seamless_boot = false;
1045
1046 for (i = 0; i < context->stream_count; i++) {
1047 if (context->streams[i]->apply_seamless_boot_optimization) {
1048 can_apply_seamless_boot = true;
1049 break;
1050 }
1051 }
1052
1053 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1054 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1055 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1056
1057 /* There is assumption that pipe_ctx is not mapping irregularly
1058 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1059 * we will use the pipe, so don't disable
1060 */
1061 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1062 continue;
1063
1064 /* Blank controller using driver code instead of
1065 * command table.
1066 */
1067 if (tg->funcs->is_tg_enabled(tg)) {
1068 tg->funcs->lock(tg);
1069 tg->funcs->set_blank(tg, true);
1070 hwss_wait_for_blank_complete(tg);
1071 }
1072 }
1073
1074 /* Cannot reset the MPC mux if seamless boot */
1075 if (!can_apply_seamless_boot)
1076 dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
1077
1078 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1079 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1080 struct hubp *hubp = dc->res_pool->hubps[i];
1081 struct dpp *dpp = dc->res_pool->dpps[i];
1082 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1083
1084 /* There is assumption that pipe_ctx is not mapping irregularly
1085 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1086 * we will use the pipe, so don't disable
1087 */
1088 if (can_apply_seamless_boot &&
1089 pipe_ctx->stream != NULL &&
1090 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1091 pipe_ctx->stream_res.tg))
1092 continue;
1093
1094 /* Disable on the current state so the new one isn't cleared. */
1095 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1096
1097 dpp->funcs->dpp_reset(dpp);
1098
1099 pipe_ctx->stream_res.tg = tg;
1100 pipe_ctx->pipe_idx = i;
1101
1102 pipe_ctx->plane_res.hubp = hubp;
1103 pipe_ctx->plane_res.dpp = dpp;
1104 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1105 hubp->mpcc_id = dpp->inst;
1106 hubp->opp_id = OPP_ID_INVALID;
1107 hubp->power_gated = false;
1108
1109 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1110 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1111 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1112 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1113
1114 hwss1_plane_atomic_disconnect(dc, pipe_ctx);
1115
1116 if (tg->funcs->is_tg_enabled(tg))
1117 tg->funcs->unlock(tg);
1118
1119 dcn10_disable_plane(dc, pipe_ctx);
1120
1121 pipe_ctx->stream_res.tg = NULL;
1122 pipe_ctx->plane_res.hubp = NULL;
1123
1124 tg->funcs->tg_init(tg);
1125 }
1126 }
1127
1128 static void dcn10_init_hw(struct dc *dc)
1129 {
1130 int i;
1131 struct abm *abm = dc->res_pool->abm;
1132 struct dmcu *dmcu = dc->res_pool->dmcu;
1133 struct dce_hwseq *hws = dc->hwseq;
1134 struct dc_bios *dcb = dc->ctx->dc_bios;
1135
1136 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1137 REG_WRITE(REFCLK_CNTL, 0);
1138 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1139 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1140
1141 if (!dc->debug.disable_clock_gate) {
1142 /* enable all DCN clock gating */
1143 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1144
1145 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1146
1147 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1148 }
1149
1150 enable_power_gating_plane(dc->hwseq, true);
1151
1152 /* end of FPGA. Below if real ASIC */
1153 return;
1154 }
1155
1156 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1157 bool allow_self_fresh_force_enable =
1158 hububu1_is_allow_self_refresh_enabled(
1159 dc->res_pool->hubbub);
1160
1161 bios_golden_init(dc);
1162
1163 /* WA for making DF sleep when idle after resume from S0i3.
1164 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
1165 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
1166 * before calling command table and it changed to 1 after,
1167 * it should be set back to 0.
1168 */
1169 if (allow_self_fresh_force_enable == false &&
1170 hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
1171 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, true);
1172
1173 disable_vga(dc->hwseq);
1174 }
1175
1176 for (i = 0; i < dc->link_count; i++) {
1177 /* Power up AND update implementation according to the
1178 * required signal (which may be different from the
1179 * default signal on connector).
1180 */
1181 struct dc_link *link = dc->links[i];
1182
1183 link->link_enc->funcs->hw_init(link->link_enc);
1184
1185 /* Check for enabled DIG to identify enabled display */
1186 if (link->link_enc->funcs->is_dig_enabled &&
1187 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1188 link->link_status.link_active = true;
1189 }
1190
1191 /* If taking control over from VBIOS, we may want to optimize our first
1192 * mode set, so we need to skip powering down pipes until we know which
1193 * pipes we want to use.
1194 * Otherwise, if taking control is not possible, we need to power
1195 * everything down.
1196 */
1197 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1198 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1199 struct hubp *hubp = dc->res_pool->hubps[i];
1200 struct dpp *dpp = dc->res_pool->dpps[i];
1201
1202 hubp->funcs->hubp_init(hubp);
1203 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1204 plane_atomic_power_down(dc, dpp, hubp);
1205 }
1206
1207 apply_DEGVIDCN10_253_wa(dc);
1208 }
1209
1210 for (i = 0; i < dc->res_pool->audio_count; i++) {
1211 struct audio *audio = dc->res_pool->audios[i];
1212
1213 audio->funcs->hw_init(audio);
1214 }
1215
1216 if (abm != NULL) {
1217 abm->funcs->init_backlight(abm);
1218 abm->funcs->abm_init(abm);
1219 }
1220
1221 if (dmcu != NULL)
1222 dmcu->funcs->dmcu_init(dmcu);
1223
1224 if (abm != NULL && dmcu != NULL)
1225 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1226
1227 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1228 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1229
1230 if (!dc->debug.disable_clock_gate) {
1231 /* enable all DCN clock gating */
1232 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1233
1234 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1235
1236 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1237 }
1238
1239 enable_power_gating_plane(dc->hwseq, true);
1240
1241 memset(&dc->clk_mgr->clks, 0, sizeof(dc->clk_mgr->clks));
1242 }
1243
1244 static void dcn10_reset_hw_ctx_wrap(
1245 struct dc *dc,
1246 struct dc_state *context)
1247 {
1248 int i;
1249
1250 /* Reset Back End*/
1251 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1252 struct pipe_ctx *pipe_ctx_old =
1253 &dc->current_state->res_ctx.pipe_ctx[i];
1254 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1255
1256 if (!pipe_ctx_old->stream)
1257 continue;
1258
1259 if (pipe_ctx_old->top_pipe)
1260 continue;
1261
1262 if (!pipe_ctx->stream ||
1263 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1264 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1265
1266 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1267 if (dc->hwss.enable_stream_gating)
1268 dc->hwss.enable_stream_gating(dc, pipe_ctx);
1269 if (old_clk)
1270 old_clk->funcs->cs_power_down(old_clk);
1271 }
1272 }
1273 }
1274
1275 static bool patch_address_for_sbs_tb_stereo(
1276 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1277 {
1278 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1279 bool sec_split = pipe_ctx->top_pipe &&
1280 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1281 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1282 (pipe_ctx->stream->timing.timing_3d_format ==
1283 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1284 pipe_ctx->stream->timing.timing_3d_format ==
1285 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1286 *addr = plane_state->address.grph_stereo.left_addr;
1287 plane_state->address.grph_stereo.left_addr =
1288 plane_state->address.grph_stereo.right_addr;
1289 return true;
1290 } else {
1291 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1292 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1293 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1294 plane_state->address.grph_stereo.right_addr =
1295 plane_state->address.grph_stereo.left_addr;
1296 }
1297 }
1298 return false;
1299 }
1300
1301
1302
1303 static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1304 {
1305 bool addr_patched = false;
1306 PHYSICAL_ADDRESS_LOC addr;
1307 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1308
1309 if (plane_state == NULL)
1310 return;
1311
1312 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1313
1314 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1315 pipe_ctx->plane_res.hubp,
1316 &plane_state->address,
1317 plane_state->flip_immediate);
1318
1319 plane_state->status.requested_address = plane_state->address;
1320
1321 if (plane_state->flip_immediate)
1322 plane_state->status.current_address = plane_state->address;
1323
1324 if (addr_patched)
1325 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1326 }
1327
1328 static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
1329 const struct dc_plane_state *plane_state)
1330 {
1331 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1332 const struct dc_transfer_func *tf = NULL;
1333 bool result = true;
1334
1335 if (dpp_base == NULL)
1336 return false;
1337
1338 if (plane_state->in_transfer_func)
1339 tf = plane_state->in_transfer_func;
1340
1341 if (plane_state->gamma_correction &&
1342 !dpp_base->ctx->dc->debug.always_use_regamma
1343 && !plane_state->gamma_correction->is_identity
1344 && dce_use_lut(plane_state->format))
1345 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1346
1347 if (tf == NULL)
1348 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1349 else if (tf->type == TF_TYPE_PREDEFINED) {
1350 switch (tf->tf) {
1351 case TRANSFER_FUNCTION_SRGB:
1352 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1353 break;
1354 case TRANSFER_FUNCTION_BT709:
1355 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1356 break;
1357 case TRANSFER_FUNCTION_LINEAR:
1358 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1359 break;
1360 case TRANSFER_FUNCTION_PQ:
1361 default:
1362 result = false;
1363 break;
1364 }
1365 } else if (tf->type == TF_TYPE_BYPASS) {
1366 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1367 } else {
1368 cm_helper_translate_curve_to_degamma_hw_format(tf,
1369 &dpp_base->degamma_params);
1370 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1371 &dpp_base->degamma_params);
1372 result = true;
1373 }
1374
1375 return result;
1376 }
1377
1378
1379
1380
1381
1382 static bool
1383 dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
1384 const struct dc_stream_state *stream)
1385 {
1386 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1387
1388 if (dpp == NULL)
1389 return false;
1390
1391 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1392
1393 if (stream->out_transfer_func &&
1394 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1395 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1396 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1397
1398 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1399 * update.
1400 */
1401 else if (cm_helper_translate_curve_to_hw_format(
1402 stream->out_transfer_func,
1403 &dpp->regamma_params, false)) {
1404 dpp->funcs->dpp_program_regamma_pwl(
1405 dpp,
1406 &dpp->regamma_params, OPP_REGAMMA_USER);
1407 } else
1408 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1409
1410 return true;
1411 }
1412
1413 static void dcn10_pipe_control_lock(
1414 struct dc *dc,
1415 struct pipe_ctx *pipe,
1416 bool lock)
1417 {
1418 /* use TG master update lock to lock everything on the TG
1419 * therefore only top pipe need to lock
1420 */
1421 if (pipe->top_pipe)
1422 return;
1423
1424 if (dc->debug.sanity_checks)
1425 dcn10_verify_allow_pstate_change_high(dc);
1426
1427 if (lock)
1428 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1429 else
1430 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1431
1432 if (dc->debug.sanity_checks)
1433 dcn10_verify_allow_pstate_change_high(dc);
1434 }
1435
1436 static bool wait_for_reset_trigger_to_occur(
1437 struct dc_context *dc_ctx,
1438 struct timing_generator *tg)
1439 {
1440 bool rc = false;
1441
1442 /* To avoid endless loop we wait at most
1443 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1444 const uint32_t frames_to_wait_on_triggered_reset = 10;
1445 int i;
1446
1447 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1448
1449 if (!tg->funcs->is_counter_moving(tg)) {
1450 DC_ERROR("TG counter is not moving!\n");
1451 break;
1452 }
1453
1454 if (tg->funcs->did_triggered_reset_occur(tg)) {
1455 rc = true;
1456 /* usually occurs at i=1 */
1457 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1458 i);
1459 break;
1460 }
1461
1462 /* Wait for one frame. */
1463 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1464 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1465 }
1466
1467 if (false == rc)
1468 DC_ERROR("GSL: Timeout on reset trigger!\n");
1469
1470 return rc;
1471 }
1472
1473 static void dcn10_enable_timing_synchronization(
1474 struct dc *dc,
1475 int group_index,
1476 int group_size,
1477 struct pipe_ctx *grouped_pipes[])
1478 {
1479 struct dc_context *dc_ctx = dc->ctx;
1480 int i;
1481
1482 DC_SYNC_INFO("Setting up OTG reset trigger\n");
1483
1484 for (i = 1; i < group_size; i++)
1485 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1486 grouped_pipes[i]->stream_res.tg,
1487 grouped_pipes[0]->stream_res.tg->inst);
1488
1489 DC_SYNC_INFO("Waiting for trigger\n");
1490
1491 /* Need to get only check 1 pipe for having reset as all the others are
1492 * synchronized. Look at last pipe programmed to reset.
1493 */
1494
1495 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1496 for (i = 1; i < group_size; i++)
1497 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1498 grouped_pipes[i]->stream_res.tg);
1499
1500 DC_SYNC_INFO("Sync complete\n");
1501 }
1502
1503 static void dcn10_enable_per_frame_crtc_position_reset(
1504 struct dc *dc,
1505 int group_size,
1506 struct pipe_ctx *grouped_pipes[])
1507 {
1508 struct dc_context *dc_ctx = dc->ctx;
1509 int i;
1510
1511 DC_SYNC_INFO("Setting up\n");
1512 for (i = 0; i < group_size; i++)
1513 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1514 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1515 grouped_pipes[i]->stream_res.tg,
1516 0,
1517 &grouped_pipes[i]->stream->triggered_crtc_reset);
1518
1519 DC_SYNC_INFO("Waiting for trigger\n");
1520
1521 for (i = 0; i < group_size; i++)
1522 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1523
1524 DC_SYNC_INFO("Multi-display sync is complete\n");
1525 }
1526
1527 /*static void print_rq_dlg_ttu(
1528 struct dc *core_dc,
1529 struct pipe_ctx *pipe_ctx)
1530 {
1531 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1532 "\n============== DML TTU Output parameters [%d] ==============\n"
1533 "qos_level_low_wm: %d, \n"
1534 "qos_level_high_wm: %d, \n"
1535 "min_ttu_vblank: %d, \n"
1536 "qos_level_flip: %d, \n"
1537 "refcyc_per_req_delivery_l: %d, \n"
1538 "qos_level_fixed_l: %d, \n"
1539 "qos_ramp_disable_l: %d, \n"
1540 "refcyc_per_req_delivery_pre_l: %d, \n"
1541 "refcyc_per_req_delivery_c: %d, \n"
1542 "qos_level_fixed_c: %d, \n"
1543 "qos_ramp_disable_c: %d, \n"
1544 "refcyc_per_req_delivery_pre_c: %d\n"
1545 "=============================================================\n",
1546 pipe_ctx->pipe_idx,
1547 pipe_ctx->ttu_regs.qos_level_low_wm,
1548 pipe_ctx->ttu_regs.qos_level_high_wm,
1549 pipe_ctx->ttu_regs.min_ttu_vblank,
1550 pipe_ctx->ttu_regs.qos_level_flip,
1551 pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1552 pipe_ctx->ttu_regs.qos_level_fixed_l,
1553 pipe_ctx->ttu_regs.qos_ramp_disable_l,
1554 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1555 pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1556 pipe_ctx->ttu_regs.qos_level_fixed_c,
1557 pipe_ctx->ttu_regs.qos_ramp_disable_c,
1558 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1559 );
1560
1561 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1562 "\n============== DML DLG Output parameters [%d] ==============\n"
1563 "refcyc_h_blank_end: %d, \n"
1564 "dlg_vblank_end: %d, \n"
1565 "min_dst_y_next_start: %d, \n"
1566 "refcyc_per_htotal: %d, \n"
1567 "refcyc_x_after_scaler: %d, \n"
1568 "dst_y_after_scaler: %d, \n"
1569 "dst_y_prefetch: %d, \n"
1570 "dst_y_per_vm_vblank: %d, \n"
1571 "dst_y_per_row_vblank: %d, \n"
1572 "ref_freq_to_pix_freq: %d, \n"
1573 "vratio_prefetch: %d, \n"
1574 "refcyc_per_pte_group_vblank_l: %d, \n"
1575 "refcyc_per_meta_chunk_vblank_l: %d, \n"
1576 "dst_y_per_pte_row_nom_l: %d, \n"
1577 "refcyc_per_pte_group_nom_l: %d, \n",
1578 pipe_ctx->pipe_idx,
1579 pipe_ctx->dlg_regs.refcyc_h_blank_end,
1580 pipe_ctx->dlg_regs.dlg_vblank_end,
1581 pipe_ctx->dlg_regs.min_dst_y_next_start,
1582 pipe_ctx->dlg_regs.refcyc_per_htotal,
1583 pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1584 pipe_ctx->dlg_regs.dst_y_after_scaler,
1585 pipe_ctx->dlg_regs.dst_y_prefetch,
1586 pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1587 pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1588 pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1589 pipe_ctx->dlg_regs.vratio_prefetch,
1590 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1591 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1592 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1593 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1594 );
1595
1596 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1597 "\ndst_y_per_meta_row_nom_l: %d, \n"
1598 "refcyc_per_meta_chunk_nom_l: %d, \n"
1599 "refcyc_per_line_delivery_pre_l: %d, \n"
1600 "refcyc_per_line_delivery_l: %d, \n"
1601 "vratio_prefetch_c: %d, \n"
1602 "refcyc_per_pte_group_vblank_c: %d, \n"
1603 "refcyc_per_meta_chunk_vblank_c: %d, \n"
1604 "dst_y_per_pte_row_nom_c: %d, \n"
1605 "refcyc_per_pte_group_nom_c: %d, \n"
1606 "dst_y_per_meta_row_nom_c: %d, \n"
1607 "refcyc_per_meta_chunk_nom_c: %d, \n"
1608 "refcyc_per_line_delivery_pre_c: %d, \n"
1609 "refcyc_per_line_delivery_c: %d \n"
1610 "========================================================\n",
1611 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1612 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1613 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1614 pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1615 pipe_ctx->dlg_regs.vratio_prefetch_c,
1616 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1617 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1618 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1619 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1620 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1621 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1622 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1623 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1624 );
1625
1626 DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1627 "\n============== DML RQ Output parameters [%d] ==============\n"
1628 "chunk_size: %d \n"
1629 "min_chunk_size: %d \n"
1630 "meta_chunk_size: %d \n"
1631 "min_meta_chunk_size: %d \n"
1632 "dpte_group_size: %d \n"
1633 "mpte_group_size: %d \n"
1634 "swath_height: %d \n"
1635 "pte_row_height_linear: %d \n"
1636 "========================================================\n",
1637 pipe_ctx->pipe_idx,
1638 pipe_ctx->rq_regs.rq_regs_l.chunk_size,
1639 pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
1640 pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
1641 pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
1642 pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
1643 pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
1644 pipe_ctx->rq_regs.rq_regs_l.swath_height,
1645 pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
1646 );
1647 }
1648 */
1649
1650 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
1651 struct vm_system_aperture_param *apt,
1652 struct dce_hwseq *hws)
1653 {
1654 PHYSICAL_ADDRESS_LOC physical_page_number;
1655 uint32_t logical_addr_low;
1656 uint32_t logical_addr_high;
1657
1658 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1659 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
1660 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1661 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
1662
1663 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1664 LOGICAL_ADDR, &logical_addr_low);
1665
1666 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1667 LOGICAL_ADDR, &logical_addr_high);
1668
1669 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
1670 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
1671 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
1672 }
1673
1674 /* Temporary read settings, future will get values from kmd directly */
1675 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
1676 struct vm_context0_param *vm0,
1677 struct dce_hwseq *hws)
1678 {
1679 PHYSICAL_ADDRESS_LOC fb_base;
1680 PHYSICAL_ADDRESS_LOC fb_offset;
1681 uint32_t fb_base_value;
1682 uint32_t fb_offset_value;
1683
1684 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
1685 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
1686
1687 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
1688 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
1689 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
1690 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
1691
1692 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1693 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
1694 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1695 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
1696
1697 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1698 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
1699 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1700 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
1701
1702 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1703 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
1704 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1705 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
1706
1707 /*
1708 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
1709 * Therefore we need to do
1710 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
1711 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
1712 */
1713 fb_base.quad_part = (uint64_t)fb_base_value << 24;
1714 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
1715 vm0->pte_base.quad_part += fb_base.quad_part;
1716 vm0->pte_base.quad_part -= fb_offset.quad_part;
1717 }
1718
1719
1720 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
1721 {
1722 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1723 struct vm_system_aperture_param apt = { {{ 0 } } };
1724 struct vm_context0_param vm0 = { { { 0 } } };
1725
1726 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
1727 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
1728
1729 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
1730 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
1731 }
1732
1733 static void dcn10_enable_plane(
1734 struct dc *dc,
1735 struct pipe_ctx *pipe_ctx,
1736 struct dc_state *context)
1737 {
1738 struct dce_hwseq *hws = dc->hwseq;
1739
1740 if (dc->debug.sanity_checks) {
1741 dcn10_verify_allow_pstate_change_high(dc);
1742 }
1743
1744 undo_DEGVIDCN10_253_wa(dc);
1745
1746 power_on_plane(dc->hwseq,
1747 pipe_ctx->plane_res.hubp->inst);
1748
1749 /* enable DCFCLK current DCHUB */
1750 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1751
1752 /* make sure OPP_PIPE_CLOCK_EN = 1 */
1753 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1754 pipe_ctx->stream_res.opp,
1755 true);
1756
1757 /* TODO: enable/disable in dm as per update type.
1758 if (plane_state) {
1759 DC_LOG_DC(dc->ctx->logger,
1760 "Pipe:%d 0x%x: addr hi:0x%x, "
1761 "addr low:0x%x, "
1762 "src: %d, %d, %d,"
1763 " %d; dst: %d, %d, %d, %d;\n",
1764 pipe_ctx->pipe_idx,
1765 plane_state,
1766 plane_state->address.grph.addr.high_part,
1767 plane_state->address.grph.addr.low_part,
1768 plane_state->src_rect.x,
1769 plane_state->src_rect.y,
1770 plane_state->src_rect.width,
1771 plane_state->src_rect.height,
1772 plane_state->dst_rect.x,
1773 plane_state->dst_rect.y,
1774 plane_state->dst_rect.width,
1775 plane_state->dst_rect.height);
1776
1777 DC_LOG_DC(dc->ctx->logger,
1778 "Pipe %d: width, height, x, y format:%d\n"
1779 "viewport:%d, %d, %d, %d\n"
1780 "recout: %d, %d, %d, %d\n",
1781 pipe_ctx->pipe_idx,
1782 plane_state->format,
1783 pipe_ctx->plane_res.scl_data.viewport.width,
1784 pipe_ctx->plane_res.scl_data.viewport.height,
1785 pipe_ctx->plane_res.scl_data.viewport.x,
1786 pipe_ctx->plane_res.scl_data.viewport.y,
1787 pipe_ctx->plane_res.scl_data.recout.width,
1788 pipe_ctx->plane_res.scl_data.recout.height,
1789 pipe_ctx->plane_res.scl_data.recout.x,
1790 pipe_ctx->plane_res.scl_data.recout.y);
1791 print_rq_dlg_ttu(dc, pipe_ctx);
1792 }
1793 */
1794 if (dc->config.gpu_vm_support)
1795 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
1796
1797 if (dc->debug.sanity_checks) {
1798 dcn10_verify_allow_pstate_change_high(dc);
1799 }
1800 }
1801
1802 static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
1803 {
1804 int i = 0;
1805 struct dpp_grph_csc_adjustment adjust;
1806 memset(&adjust, 0, sizeof(adjust));
1807 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1808
1809
1810 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
1811 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1812 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1813 adjust.temperature_matrix[i] =
1814 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1815 }
1816
1817 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
1818 }
1819
1820 static void dcn10_program_output_csc(struct dc *dc,
1821 struct pipe_ctx *pipe_ctx,
1822 enum dc_color_space colorspace,
1823 uint16_t *matrix,
1824 int opp_id)
1825 {
1826 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
1827 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
1828 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
1829 } else {
1830 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
1831 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
1832 }
1833 }
1834
1835 bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1836 {
1837 if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1838 return true;
1839 if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
1840 return true;
1841 return false;
1842 }
1843
1844 bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1845 {
1846 if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1847 return true;
1848 if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
1849 return true;
1850 return false;
1851 }
1852
1853 bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1854 {
1855 if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1856 return true;
1857 if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
1858 return true;
1859 if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
1860 return true;
1861 return false;
1862 }
1863
1864 bool is_rgb_cspace(enum dc_color_space output_color_space)
1865 {
1866 switch (output_color_space) {
1867 case COLOR_SPACE_SRGB:
1868 case COLOR_SPACE_SRGB_LIMITED:
1869 case COLOR_SPACE_2020_RGB_FULLRANGE:
1870 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
1871 case COLOR_SPACE_ADOBERGB:
1872 return true;
1873 case COLOR_SPACE_YCBCR601:
1874 case COLOR_SPACE_YCBCR709:
1875 case COLOR_SPACE_YCBCR601_LIMITED:
1876 case COLOR_SPACE_YCBCR709_LIMITED:
1877 case COLOR_SPACE_2020_YCBCR:
1878 return false;
1879 default:
1880 /* Add a case to switch */
1881 BREAK_TO_DEBUGGER();
1882 return false;
1883 }
1884 }
1885
1886 void dcn10_get_surface_visual_confirm_color(
1887 const struct pipe_ctx *pipe_ctx,
1888 struct tg_color *color)
1889 {
1890 uint32_t color_value = MAX_TG_COLOR_VALUE;
1891
1892 switch (pipe_ctx->plane_res.scl_data.format) {
1893 case PIXEL_FORMAT_ARGB8888:
1894 /* set boarder color to red */
1895 color->color_r_cr = color_value;
1896 break;
1897
1898 case PIXEL_FORMAT_ARGB2101010:
1899 /* set boarder color to blue */
1900 color->color_b_cb = color_value;
1901 break;
1902 case PIXEL_FORMAT_420BPP8:
1903 /* set boarder color to green */
1904 color->color_g_y = color_value;
1905 break;
1906 case PIXEL_FORMAT_420BPP10:
1907 /* set boarder color to yellow */
1908 color->color_g_y = color_value;
1909 color->color_r_cr = color_value;
1910 break;
1911 case PIXEL_FORMAT_FP16:
1912 /* set boarder color to white */
1913 color->color_r_cr = color_value;
1914 color->color_b_cb = color_value;
1915 color->color_g_y = color_value;
1916 break;
1917 default:
1918 break;
1919 }
1920 }
1921
1922 void dcn10_get_hdr_visual_confirm_color(
1923 struct pipe_ctx *pipe_ctx,
1924 struct tg_color *color)
1925 {
1926 uint32_t color_value = MAX_TG_COLOR_VALUE;
1927
1928 // Determine the overscan color based on the top-most (desktop) plane's context
1929 struct pipe_ctx *top_pipe_ctx = pipe_ctx;
1930
1931 while (top_pipe_ctx->top_pipe != NULL)
1932 top_pipe_ctx = top_pipe_ctx->top_pipe;
1933
1934 switch (top_pipe_ctx->plane_res.scl_data.format) {
1935 case PIXEL_FORMAT_ARGB2101010:
1936 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
1937 /* HDR10, ARGB2101010 - set boarder color to red */
1938 color->color_r_cr = color_value;
1939 }
1940 break;
1941 case PIXEL_FORMAT_FP16:
1942 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
1943 /* HDR10, FP16 - set boarder color to blue */
1944 color->color_b_cb = color_value;
1945 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
1946 /* FreeSync 2 HDR - set boarder color to green */
1947 color->color_g_y = color_value;
1948 }
1949 break;
1950 default:
1951 /* SDR - set boarder color to Gray */
1952 color->color_r_cr = color_value/2;
1953 color->color_b_cb = color_value/2;
1954 color->color_g_y = color_value/2;
1955 break;
1956 }
1957 }
1958
1959 static uint16_t fixed_point_to_int_frac(
1960 struct fixed31_32 arg,
1961 uint8_t integer_bits,
1962 uint8_t fractional_bits)
1963 {
1964 int32_t numerator;
1965 int32_t divisor = 1 << fractional_bits;
1966
1967 uint16_t result;
1968
1969 uint16_t d = (uint16_t)dc_fixpt_floor(
1970 dc_fixpt_abs(
1971 arg));
1972
1973 if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
1974 numerator = (uint16_t)dc_fixpt_floor(
1975 dc_fixpt_mul_int(
1976 arg,
1977 divisor));
1978 else {
1979 numerator = dc_fixpt_floor(
1980 dc_fixpt_sub(
1981 dc_fixpt_from_int(
1982 1LL << integer_bits),
1983 dc_fixpt_recip(
1984 dc_fixpt_from_int(
1985 divisor))));
1986 }
1987
1988 if (numerator >= 0)
1989 result = (uint16_t)numerator;
1990 else
1991 result = (uint16_t)(
1992 (1 << (integer_bits + fractional_bits + 1)) + numerator);
1993
1994 if ((result != 0) && dc_fixpt_lt(
1995 arg, dc_fixpt_zero))
1996 result |= 1 << (integer_bits + fractional_bits);
1997
1998 return result;
1999 }
2000
2001 void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
2002 const struct dc_plane_state *plane_state)
2003 {
2004 if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
2005 && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
2006 && plane_state->input_csc_color_matrix.enable_adjustment
2007 && plane_state->coeff_reduction_factor.value != 0) {
2008 bias_and_scale->scale_blue = fixed_point_to_int_frac(
2009 dc_fixpt_mul(plane_state->coeff_reduction_factor,
2010 dc_fixpt_from_fraction(256, 255)),
2011 2,
2012 13);
2013 bias_and_scale->scale_red = bias_and_scale->scale_blue;
2014 bias_and_scale->scale_green = bias_and_scale->scale_blue;
2015 } else {
2016 bias_and_scale->scale_blue = 0x2000;
2017 bias_and_scale->scale_red = 0x2000;
2018 bias_and_scale->scale_green = 0x2000;
2019 }
2020 }
2021
2022 static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2023 {
2024 struct dc_bias_and_scale bns_params = {0};
2025
2026 // program the input csc
2027 dpp->funcs->dpp_setup(dpp,
2028 plane_state->format,
2029 EXPANSION_MODE_ZERO,
2030 plane_state->input_csc_color_matrix,
2031 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
2032 plane_state->color_space,
2033 NULL);
2034 #else
2035 plane_state->color_space);
2036 #endif
2037
2038 //set scale and bias registers
2039 dcn10_build_prescale_params(&bns_params, plane_state);
2040 if (dpp->funcs->dpp_program_bias_and_scale)
2041 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2042 }
2043
2044 static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2045 {
2046 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2047 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2048 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2049 int mpcc_id;
2050 struct mpcc *new_mpcc;
2051 struct mpc *mpc = dc->res_pool->mpc;
2052 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2053
2054 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2055 dcn10_get_hdr_visual_confirm_color(
2056 pipe_ctx, &blnd_cfg.black_color);
2057 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2058 dcn10_get_surface_visual_confirm_color(
2059 pipe_ctx, &blnd_cfg.black_color);
2060 } else {
2061 color_space_to_black_color(
2062 dc, pipe_ctx->stream->output_color_space,
2063 &blnd_cfg.black_color);
2064 }
2065
2066 if (per_pixel_alpha)
2067 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2068 else
2069 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2070
2071 blnd_cfg.overlap_only = false;
2072 blnd_cfg.global_gain = 0xff;
2073
2074 if (pipe_ctx->plane_state->global_alpha)
2075 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2076 else
2077 blnd_cfg.global_alpha = 0xff;
2078
2079 /* DCN1.0 has output CM before MPC which seems to screw with
2080 * pre-multiplied alpha.
2081 */
2082 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2083 pipe_ctx->stream->output_color_space)
2084 && per_pixel_alpha;
2085
2086
2087 /*
2088 * TODO: remove hack
2089 * Note: currently there is a bug in init_hw such that
2090 * on resume from hibernate, BIOS sets up MPCC0, and
2091 * we do mpcc_remove but the mpcc cannot go to idle
2092 * after remove. This cause us to pick mpcc1 here,
2093 * which causes a pstate hang for yet unknown reason.
2094 */
2095 mpcc_id = hubp->inst;
2096
2097 /* If there is no full update, don't need to touch MPC tree*/
2098 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2099 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2100 return;
2101 }
2102
2103 /* check if this MPCC is already being used */
2104 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2105 /* remove MPCC if being used */
2106 if (new_mpcc != NULL)
2107 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2108 else
2109 if (dc->debug.sanity_checks)
2110 mpc->funcs->assert_mpcc_idle_before_connect(
2111 dc->res_pool->mpc, mpcc_id);
2112
2113 /* Call MPC to insert new plane */
2114 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2115 mpc_tree_params,
2116 &blnd_cfg,
2117 NULL,
2118 NULL,
2119 hubp->inst,
2120 mpcc_id);
2121
2122 ASSERT(new_mpcc != NULL);
2123
2124 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2125 hubp->mpcc_id = mpcc_id;
2126 }
2127
2128 static void update_scaler(struct pipe_ctx *pipe_ctx)
2129 {
2130 bool per_pixel_alpha =
2131 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2132
2133 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2134 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2135 /* scaler configuration */
2136 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2137 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2138 }
2139
2140 void update_dchubp_dpp(
2141 struct dc *dc,
2142 struct pipe_ctx *pipe_ctx,
2143 struct dc_state *context)
2144 {
2145 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2146 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2147 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2148 union plane_size size = plane_state->plane_size;
2149 unsigned int compat_level = 0;
2150
2151 /* depends on DML calculation, DPP clock value may change dynamically */
2152 /* If request max dpp clk is lower than current dispclk, no need to
2153 * divided by 2
2154 */
2155 if (plane_state->update_flags.bits.full_update) {
2156 bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2157 dc->clk_mgr->clks.dispclk_khz / 2;
2158
2159 dpp->funcs->dpp_dppclk_control(
2160 dpp,
2161 should_divided_by_2,
2162 true);
2163
2164 if (dc->res_pool->dccg)
2165 dc->res_pool->dccg->funcs->update_dpp_dto(
2166 dc->res_pool->dccg,
2167 dpp->inst,
2168 pipe_ctx->plane_res.bw.dppclk_khz);
2169 else
2170 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2171 dc->clk_mgr->clks.dispclk_khz / 2 :
2172 dc->clk_mgr->clks.dispclk_khz;
2173 }
2174
2175 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2176 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2177 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2178 */
2179 if (plane_state->update_flags.bits.full_update) {
2180 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2181
2182 hubp->funcs->hubp_setup(
2183 hubp,
2184 &pipe_ctx->dlg_regs,
2185 &pipe_ctx->ttu_regs,
2186 &pipe_ctx->rq_regs,
2187 &pipe_ctx->pipe_dlg_param);
2188 hubp->funcs->hubp_setup_interdependent(
2189 hubp,
2190 &pipe_ctx->dlg_regs,
2191 &pipe_ctx->ttu_regs);
2192 }
2193
2194 size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2195
2196 if (plane_state->update_flags.bits.full_update ||
2197 plane_state->update_flags.bits.bpp_change)
2198 update_dpp(dpp, plane_state);
2199
2200 if (plane_state->update_flags.bits.full_update ||
2201 plane_state->update_flags.bits.per_pixel_alpha_change ||
2202 plane_state->update_flags.bits.global_alpha_change)
2203 dc->hwss.update_mpcc(dc, pipe_ctx);
2204
2205 if (plane_state->update_flags.bits.full_update ||
2206 plane_state->update_flags.bits.per_pixel_alpha_change ||
2207 plane_state->update_flags.bits.global_alpha_change ||
2208 plane_state->update_flags.bits.scaling_change ||
2209 plane_state->update_flags.bits.position_change) {
2210 update_scaler(pipe_ctx);
2211 }
2212
2213 if (plane_state->update_flags.bits.full_update ||
2214 plane_state->update_flags.bits.scaling_change ||
2215 plane_state->update_flags.bits.position_change) {
2216 hubp->funcs->mem_program_viewport(
2217 hubp,
2218 &pipe_ctx->plane_res.scl_data.viewport,
2219 &pipe_ctx->plane_res.scl_data.viewport_c);
2220 }
2221
2222 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2223 dc->hwss.set_cursor_position(pipe_ctx);
2224 dc->hwss.set_cursor_attribute(pipe_ctx);
2225
2226 if (dc->hwss.set_cursor_sdr_white_level)
2227 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2228 }
2229
2230 if (plane_state->update_flags.bits.full_update) {
2231 /*gamut remap*/
2232 program_gamut_remap(pipe_ctx);
2233
2234 dc->hwss.program_output_csc(dc,
2235 pipe_ctx,
2236 pipe_ctx->stream->output_color_space,
2237 pipe_ctx->stream->csc_color_matrix.matrix,
2238 pipe_ctx->stream_res.opp->inst);
2239 }
2240
2241 if (plane_state->update_flags.bits.full_update ||
2242 plane_state->update_flags.bits.pixel_format_change ||
2243 plane_state->update_flags.bits.horizontal_mirror_change ||
2244 plane_state->update_flags.bits.rotation_change ||
2245 plane_state->update_flags.bits.swizzle_change ||
2246 plane_state->update_flags.bits.dcc_change ||
2247 plane_state->update_flags.bits.bpp_change ||
2248 plane_state->update_flags.bits.scaling_change ||
2249 plane_state->update_flags.bits.plane_size_change) {
2250 hubp->funcs->hubp_program_surface_config(
2251 hubp,
2252 plane_state->format,
2253 &plane_state->tiling_info,
2254 &size,
2255 plane_state->rotation,
2256 &plane_state->dcc,
2257 plane_state->horizontal_mirror,
2258 compat_level);
2259 }
2260
2261 hubp->power_gated = false;
2262
2263 dc->hwss.update_plane_addr(dc, pipe_ctx);
2264
2265 if (is_pipe_tree_visible(pipe_ctx))
2266 hubp->funcs->set_blank(hubp, false);
2267 }
2268
2269 static void dcn10_blank_pixel_data(
2270 struct dc *dc,
2271 struct pipe_ctx *pipe_ctx,
2272 bool blank)
2273 {
2274 enum dc_color_space color_space;
2275 struct tg_color black_color = {0};
2276 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2277 struct dc_stream_state *stream = pipe_ctx->stream;
2278
2279 /* program otg blank color */
2280 color_space = stream->output_color_space;
2281 color_space_to_black_color(dc, color_space, &black_color);
2282
2283 /*
2284 * The way 420 is packed, 2 channels carry Y component, 1 channel
2285 * alternate between Cb and Cr, so both channels need the pixel
2286 * value for Y
2287 */
2288 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2289 black_color.color_r_cr = black_color.color_g_y;
2290
2291
2292 if (stream_res->tg->funcs->set_blank_color)
2293 stream_res->tg->funcs->set_blank_color(
2294 stream_res->tg,
2295 &black_color);
2296
2297 if (!blank) {
2298 if (stream_res->tg->funcs->set_blank)
2299 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2300 if (stream_res->abm) {
2301 stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
2302 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2303 }
2304 } else if (blank) {
2305 if (stream_res->abm)
2306 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
2307 if (stream_res->tg->funcs->set_blank)
2308 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2309 }
2310 }
2311
2312 void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2313 {
2314 struct fixed31_32 multiplier = dc_fixpt_from_fraction(
2315 pipe_ctx->plane_state->sdr_white_level, 80);
2316 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2317 struct custom_float_format fmt;
2318
2319 fmt.exponenta_bits = 6;
2320 fmt.mantissa_bits = 12;
2321 fmt.sign = true;
2322
2323 if (pipe_ctx->plane_state->sdr_white_level > 80)
2324 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2325
2326 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2327 pipe_ctx->plane_res.dpp, hw_mult);
2328 }
2329
2330 void dcn10_program_pipe(
2331 struct dc *dc,
2332 struct pipe_ctx *pipe_ctx,
2333 struct dc_state *context)
2334 {
2335 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2336 dcn10_enable_plane(dc, pipe_ctx, context);
2337
2338 update_dchubp_dpp(dc, pipe_ctx, context);
2339
2340 set_hdr_multiplier(pipe_ctx);
2341
2342 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2343 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2344 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2345 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
2346
2347 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2348 * only do gamma programming for full update.
2349 * TODO: This can be further optimized/cleaned up
2350 * Always call this for now since it does memcmp inside before
2351 * doing heavy calculation and programming
2352 */
2353 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2354 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2355 }
2356
2357 static void program_all_pipe_in_tree(
2358 struct dc *dc,
2359 struct pipe_ctx *pipe_ctx,
2360 struct dc_state *context)
2361 {
2362 if (pipe_ctx->top_pipe == NULL) {
2363 bool blank = !is_pipe_tree_visible(pipe_ctx);
2364
2365 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2366 pipe_ctx->stream_res.tg,
2367 pipe_ctx->pipe_dlg_param.vready_offset,
2368 pipe_ctx->pipe_dlg_param.vstartup_start,
2369 pipe_ctx->pipe_dlg_param.vupdate_offset,
2370 pipe_ctx->pipe_dlg_param.vupdate_width);
2371
2372 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2373 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2374
2375 dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
2376
2377 }
2378
2379 if (pipe_ctx->plane_state != NULL)
2380 dcn10_program_pipe(dc, pipe_ctx, context);
2381
2382 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2383 program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2384 }
2385
2386 struct pipe_ctx *find_top_pipe_for_stream(
2387 struct dc *dc,
2388 struct dc_state *context,
2389 const struct dc_stream_state *stream)
2390 {
2391 int i;
2392
2393 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2394 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2395 struct pipe_ctx *old_pipe_ctx =
2396 &dc->current_state->res_ctx.pipe_ctx[i];
2397
2398 if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2399 continue;
2400
2401 if (pipe_ctx->stream != stream)
2402 continue;
2403
2404 if (!pipe_ctx->top_pipe)
2405 return pipe_ctx;
2406 }
2407 return NULL;
2408 }
2409
2410 static void dcn10_apply_ctx_for_surface(
2411 struct dc *dc,
2412 const struct dc_stream_state *stream,
2413 int num_planes,
2414 struct dc_state *context)
2415 {
2416 int i;
2417 struct timing_generator *tg;
2418 uint32_t underflow_check_delay_us;
2419 bool removed_pipe[4] = { false };
2420 bool interdependent_update = false;
2421 struct pipe_ctx *top_pipe_to_program =
2422 find_top_pipe_for_stream(dc, context, stream);
2423 DC_LOGGER_INIT(dc->ctx->logger);
2424
2425 if (!top_pipe_to_program)
2426 return;
2427
2428 tg = top_pipe_to_program->stream_res.tg;
2429
2430 interdependent_update = top_pipe_to_program->plane_state &&
2431 top_pipe_to_program->plane_state->update_flags.bits.full_update;
2432
2433 underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2434
2435 if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
2436 ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
2437
2438 if (interdependent_update)
2439 lock_all_pipes(dc, context, true);
2440 else
2441 dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
2442
2443 if (underflow_check_delay_us != 0xFFFFFFFF)
2444 udelay(underflow_check_delay_us);
2445
2446 if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
2447 ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
2448
2449 if (num_planes == 0) {
2450 /* OTG blank before remove all front end */
2451 dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
2452 }
2453
2454 /* Disconnect unused mpcc */
2455 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2456 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2457 struct pipe_ctx *old_pipe_ctx =
2458 &dc->current_state->res_ctx.pipe_ctx[i];
2459 /*
2460 * Powergate reused pipes that are not powergated
2461 * fairly hacky right now, using opp_id as indicator
2462 * TODO: After move dc_post to dc_update, this will
2463 * be removed.
2464 */
2465 if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
2466 if (old_pipe_ctx->stream_res.tg == tg &&
2467 old_pipe_ctx->plane_res.hubp &&
2468 old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
2469 dcn10_disable_plane(dc, old_pipe_ctx);
2470 }
2471
2472 if ((!pipe_ctx->plane_state ||
2473 pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2474 old_pipe_ctx->plane_state &&
2475 old_pipe_ctx->stream_res.tg == tg) {
2476
2477 dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
2478 removed_pipe[i] = true;
2479
2480 DC_LOG_DC("Reset mpcc for pipe %d\n",
2481 old_pipe_ctx->pipe_idx);
2482 }
2483 }
2484
2485 if (num_planes > 0)
2486 program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2487
2488 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2489 /* Program secondary blending tree and writeback pipes */
2490 if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
2491 dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
2492 #endif
2493 if (interdependent_update)
2494 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2495 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2496 /* Skip inactive pipes and ones already updated */
2497 if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2498 !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2499 continue;
2500
2501 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2502 pipe_ctx->plane_res.hubp,
2503 &pipe_ctx->dlg_regs,
2504 &pipe_ctx->ttu_regs);
2505 }
2506
2507 if (interdependent_update)
2508 lock_all_pipes(dc, context, false);
2509 else
2510 dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
2511
2512 if (num_planes == 0)
2513 false_optc_underflow_wa(dc, stream, tg);
2514
2515 for (i = 0; i < dc->res_pool->pipe_count; i++)
2516 if (removed_pipe[i])
2517 dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2518
2519 if (dc->hwseq->wa.DEGVIDCN10_254)
2520 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2521 }
2522
2523 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2524 {
2525 uint8_t i;
2526
2527 for (i = 0; i < context->stream_count; i++) {
2528 if (context->streams[i]->timing.timing_3d_format
2529 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2530 /*
2531 * Disable stutter
2532 */
2533 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2534 break;
2535 }
2536 }
2537 }
2538
2539 static void dcn10_prepare_bandwidth(
2540 struct dc *dc,
2541 struct dc_state *context)
2542 {
2543 struct hubbub *hubbub = dc->res_pool->hubbub;
2544
2545 if (dc->debug.sanity_checks)
2546 dcn10_verify_allow_pstate_change_high(dc);
2547
2548 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2549 if (context->stream_count == 0)
2550 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2551
2552 dc->clk_mgr->funcs->update_clocks(
2553 dc->clk_mgr,
2554 context,
2555 false);
2556 }
2557
2558 hubbub->funcs->program_watermarks(hubbub,
2559 &context->bw_ctx.bw.dcn.watermarks,
2560 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2561 true);
2562 dcn10_stereo_hw_frame_pack_wa(dc, context);
2563
2564 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2565 dcn_bw_notify_pplib_of_wm_ranges(dc);
2566
2567 if (dc->debug.sanity_checks)
2568 dcn10_verify_allow_pstate_change_high(dc);
2569 }
2570
2571 static void dcn10_optimize_bandwidth(
2572 struct dc *dc,
2573 struct dc_state *context)
2574 {
2575 struct hubbub *hubbub = dc->res_pool->hubbub;
2576
2577 if (dc->debug.sanity_checks)
2578 dcn10_verify_allow_pstate_change_high(dc);
2579
2580 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2581 if (context->stream_count == 0)
2582 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2583
2584 dc->clk_mgr->funcs->update_clocks(
2585 dc->clk_mgr,
2586 context,
2587 true);
2588 }
2589
2590 hubbub->funcs->program_watermarks(hubbub,
2591 &context->bw_ctx.bw.dcn.watermarks,
2592 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2593 true);
2594 dcn10_stereo_hw_frame_pack_wa(dc, context);
2595
2596 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2597 dcn_bw_notify_pplib_of_wm_ranges(dc);
2598
2599 if (dc->debug.sanity_checks)
2600 dcn10_verify_allow_pstate_change_high(dc);
2601 }
2602
2603 static void set_drr(struct pipe_ctx **pipe_ctx,
2604 int num_pipes, int vmin, int vmax)
2605 {
2606 int i = 0;
2607 struct drr_params params = {0};
2608 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2609 unsigned int event_triggers = 0x800;
2610
2611 params.vertical_total_max = vmax;
2612 params.vertical_total_min = vmin;
2613
2614 /* TODO: If multiple pipes are to be supported, you need
2615 * some GSL stuff. Static screen triggers may be programmed differently
2616 * as well.
2617 */
2618 for (i = 0; i < num_pipes; i++) {
2619 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2620 pipe_ctx[i]->stream_res.tg, &params);
2621 if (vmax != 0 && vmin != 0)
2622 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2623 pipe_ctx[i]->stream_res.tg,
2624 event_triggers);
2625 }
2626 }
2627
2628 static void get_position(struct pipe_ctx **pipe_ctx,
2629 int num_pipes,
2630 struct crtc_position *position)
2631 {
2632 int i = 0;
2633
2634 /* TODO: handle pipes > 1
2635 */
2636 for (i = 0; i < num_pipes; i++)
2637 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2638 }
2639
2640 static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
2641 int num_pipes, const struct dc_static_screen_events *events)
2642 {
2643 unsigned int i;
2644 unsigned int value = 0;
2645
2646 if (events->surface_update)
2647 value |= 0x80;
2648 if (events->cursor_update)
2649 value |= 0x2;
2650 if (events->force_trigger)
2651 value |= 0x1;
2652
2653 for (i = 0; i < num_pipes; i++)
2654 pipe_ctx[i]->stream_res.tg->funcs->
2655 set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
2656 }
2657
2658 static void dcn10_config_stereo_parameters(
2659 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
2660 {
2661 enum view_3d_format view_format = stream->view_format;
2662 enum dc_timing_3d_format timing_3d_format =\
2663 stream->timing.timing_3d_format;
2664 bool non_stereo_timing = false;
2665
2666 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
2667 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2668 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
2669 non_stereo_timing = true;
2670
2671 if (non_stereo_timing == false &&
2672 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
2673
2674 flags->PROGRAM_STEREO = 1;
2675 flags->PROGRAM_POLARITY = 1;
2676 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
2677 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2678 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2679 enum display_dongle_type dongle = \
2680 stream->link->ddc->dongle_type;
2681 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2682 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2683 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
2684 flags->DISABLE_STEREO_DP_SYNC = 1;
2685 }
2686 flags->RIGHT_EYE_POLARITY =\
2687 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
2688 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2689 flags->FRAME_PACKED = 1;
2690 }
2691
2692 return;
2693 }
2694
2695 static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2696 {
2697 struct crtc_stereo_flags flags = { 0 };
2698 struct dc_stream_state *stream = pipe_ctx->stream;
2699
2700 dcn10_config_stereo_parameters(stream, &flags);
2701
2702 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
2703 pipe_ctx->stream_res.opp,
2704 flags.PROGRAM_STEREO == 1 ? true:false,
2705 &stream->timing);
2706
2707 pipe_ctx->stream_res.tg->funcs->program_stereo(
2708 pipe_ctx->stream_res.tg,
2709 &stream->timing,
2710 &flags);
2711
2712 return;
2713 }
2714
2715 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2716 {
2717 int i;
2718
2719 for (i = 0; i < res_pool->pipe_count; i++) {
2720 if (res_pool->hubps[i]->inst == mpcc_inst)
2721 return res_pool->hubps[i];
2722 }
2723 ASSERT(false);
2724 return NULL;
2725 }
2726
2727 static void dcn10_wait_for_mpcc_disconnect(
2728 struct dc *dc,
2729 struct resource_pool *res_pool,
2730 struct pipe_ctx *pipe_ctx)
2731 {
2732 int mpcc_inst;
2733
2734 if (dc->debug.sanity_checks) {
2735 dcn10_verify_allow_pstate_change_high(dc);
2736 }
2737
2738 if (!pipe_ctx->stream_res.opp)
2739 return;
2740
2741 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2742 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2743 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2744
2745 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2746 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2747 hubp->funcs->set_blank(hubp, true);
2748 }
2749 }
2750
2751 if (dc->debug.sanity_checks) {
2752 dcn10_verify_allow_pstate_change_high(dc);
2753 }
2754
2755 }
2756
2757 static bool dcn10_dummy_display_power_gating(
2758 struct dc *dc,
2759 uint8_t controller_id,
2760 struct dc_bios *dcb,
2761 enum pipe_gating_control power_gating)
2762 {
2763 return true;
2764 }
2765
2766 static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2767 {
2768 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2769 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2770 bool flip_pending;
2771
2772 if (plane_state == NULL)
2773 return;
2774
2775 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2776 pipe_ctx->plane_res.hubp);
2777
2778 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
2779
2780 if (!flip_pending)
2781 plane_state->status.current_address = plane_state->status.requested_address;
2782
2783 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2784 tg->funcs->is_stereo_left_eye) {
2785 plane_state->status.is_right_eye =
2786 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
2787 }
2788 }
2789
2790 static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2791 {
2792 if (hws->ctx->dc->res_pool->hubbub != NULL) {
2793 struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
2794
2795 if (hubp->funcs->hubp_update_dchub)
2796 hubp->funcs->hubp_update_dchub(hubp, dh_data);
2797 else
2798 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
2799 }
2800 }
2801
2802 static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2803 {
2804 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2805 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2806 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2807 struct dc_cursor_mi_param param = {
2808 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2809 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
2810 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2811 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2812 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2813 .rotation = pipe_ctx->plane_state->rotation,
2814 .mirror = pipe_ctx->plane_state->horizontal_mirror
2815 };
2816 uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
2817 uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
2818 uint32_t x_offset = min(x_plane, pos_cpy.x);
2819 uint32_t y_offset = min(y_plane, pos_cpy.y);
2820
2821 pos_cpy.x -= x_offset;
2822 pos_cpy.y -= y_offset;
2823 pos_cpy.x_hotspot += (x_plane - x_offset);
2824 pos_cpy.y_hotspot += (y_plane - y_offset);
2825
2826 if (pipe_ctx->plane_state->address.type
2827 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2828 pos_cpy.enable = false;
2829
2830 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
2831 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
2832 }
2833
2834 static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2835 {
2836 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2837
2838 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
2839 pipe_ctx->plane_res.hubp, attributes);
2840 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
2841 pipe_ctx->plane_res.dpp, attributes->color_format);
2842 }
2843
2844 static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
2845 {
2846 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
2847 struct fixed31_32 multiplier;
2848 struct dpp_cursor_attributes opt_attr = { 0 };
2849 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
2850 struct custom_float_format fmt;
2851
2852 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
2853 return;
2854
2855 fmt.exponenta_bits = 5;
2856 fmt.mantissa_bits = 10;
2857 fmt.sign = true;
2858
2859 if (sdr_white_level > 80) {
2860 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
2861 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
2862 }
2863
2864 opt_attr.scale = hw_scale;
2865 opt_attr.bias = 0;
2866
2867 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
2868 pipe_ctx->plane_res.dpp, &opt_attr);
2869 }
2870
2871 /**
2872 * apply_front_porch_workaround TODO FPGA still need?
2873 *
2874 * This is a workaround for a bug that has existed since R5xx and has not been
2875 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
2876 */
2877 static void apply_front_porch_workaround(
2878 struct dc_crtc_timing *timing)
2879 {
2880 if (timing->flags.INTERLACE == 1) {
2881 if (timing->v_front_porch < 2)
2882 timing->v_front_porch = 2;
2883 } else {
2884 if (timing->v_front_porch < 1)
2885 timing->v_front_porch = 1;
2886 }
2887 }
2888
2889 int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
2890 {
2891 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
2892 struct dc_crtc_timing patched_crtc_timing;
2893 int vesa_sync_start;
2894 int asic_blank_end;
2895 int interlace_factor;
2896 int vertical_line_start;
2897
2898 patched_crtc_timing = *dc_crtc_timing;
2899 apply_front_porch_workaround(&patched_crtc_timing);
2900
2901 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
2902
2903 vesa_sync_start = patched_crtc_timing.v_addressable +
2904 patched_crtc_timing.v_border_bottom +
2905 patched_crtc_timing.v_front_porch;
2906
2907 asic_blank_end = (patched_crtc_timing.v_total -
2908 vesa_sync_start -
2909 patched_crtc_timing.v_border_top)
2910 * interlace_factor;
2911
2912 vertical_line_start = asic_blank_end -
2913 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
2914
2915 return vertical_line_start;
2916 }
2917
2918 void lock_all_pipes(struct dc *dc,
2919 struct dc_state *context,
2920 bool lock)
2921 {
2922 struct pipe_ctx *pipe_ctx;
2923 struct timing_generator *tg;
2924 int i;
2925
2926 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2927 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2928 tg = pipe_ctx->stream_res.tg;
2929 /*
2930 * Only lock the top pipe's tg to prevent redundant
2931 * (un)locking. Also skip if pipe is disabled.
2932 */
2933 if (pipe_ctx->top_pipe ||
2934 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2935 !tg->funcs->is_tg_enabled(tg))
2936 continue;
2937
2938 if (lock)
2939 tg->funcs->lock(tg);
2940 else
2941 tg->funcs->unlock(tg);
2942 }
2943 }
2944
2945 static void calc_vupdate_position(
2946 struct pipe_ctx *pipe_ctx,
2947 uint32_t *start_line,
2948 uint32_t *end_line)
2949 {
2950 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
2951 int vline_int_offset_from_vupdate =
2952 pipe_ctx->stream->periodic_interrupt0.lines_offset;
2953 int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
2954 int start_position;
2955
2956 if (vline_int_offset_from_vupdate > 0)
2957 vline_int_offset_from_vupdate--;
2958 else if (vline_int_offset_from_vupdate < 0)
2959 vline_int_offset_from_vupdate++;
2960
2961 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
2962
2963 if (start_position >= 0)
2964 *start_line = start_position;
2965 else
2966 *start_line = dc_crtc_timing->v_total + start_position - 1;
2967
2968 *end_line = *start_line + 2;
2969
2970 if (*end_line >= dc_crtc_timing->v_total)
2971 *end_line = 2;
2972 }
2973
2974 static void cal_vline_position(
2975 struct pipe_ctx *pipe_ctx,
2976 enum vline_select vline,
2977 uint32_t *start_line,
2978 uint32_t *end_line)
2979 {
2980 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
2981
2982 if (vline == VLINE0)
2983 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
2984 else if (vline == VLINE1)
2985 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
2986
2987 switch (ref_point) {
2988 case START_V_UPDATE:
2989 calc_vupdate_position(
2990 pipe_ctx,
2991 start_line,
2992 end_line);
2993 break;
2994 case START_V_SYNC:
2995 // Suppose to do nothing because vsync is 0;
2996 break;
2997 default:
2998 ASSERT(0);
2999 break;
3000 }
3001 }
3002
3003 static void dcn10_setup_periodic_interrupt(
3004 struct pipe_ctx *pipe_ctx,
3005 enum vline_select vline)
3006 {
3007 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3008
3009 if (vline == VLINE0) {
3010 uint32_t start_line = 0;
3011 uint32_t end_line = 0;
3012
3013 cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
3014
3015 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3016
3017 } else if (vline == VLINE1) {
3018 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3019 tg,
3020 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3021 }
3022 }
3023
3024 static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
3025 {
3026 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3027 int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
3028
3029 if (start_line < 0) {
3030 ASSERT(0);
3031 start_line = 0;
3032 }
3033
3034 if (tg->funcs->setup_vertical_interrupt2)
3035 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3036 }
3037
3038 static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3039 struct dc_link_settings *link_settings)
3040 {
3041 struct encoder_unblank_param params = { { 0 } };
3042 struct dc_stream_state *stream = pipe_ctx->stream;
3043 struct dc_link *link = stream->link;
3044
3045 /* only 3 items below are used by unblank */
3046 params.timing = pipe_ctx->stream->timing;
3047
3048 params.link_settings.link_rate = link_settings->link_rate;
3049
3050 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3051 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3052 params.timing.pix_clk_100hz /= 2;
3053 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3054 }
3055
3056 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3057 link->dc->hwss.edp_backlight_control(link, true);
3058 }
3059 }
3060
3061 static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3062 const uint8_t *custom_sdp_message,
3063 unsigned int sdp_message_size)
3064 {
3065 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3066 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3067 pipe_ctx->stream_res.stream_enc,
3068 custom_sdp_message,
3069 sdp_message_size);
3070 }
3071 }
3072
3073 static const struct hw_sequencer_funcs dcn10_funcs = {
3074 .program_gamut_remap = program_gamut_remap,
3075 .init_hw = dcn10_init_hw,
3076 .init_pipes = dcn10_init_pipes,
3077 .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
3078 .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
3079 .update_plane_addr = dcn10_update_plane_addr,
3080 .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
3081 .update_dchub = dcn10_update_dchub,
3082 .update_mpcc = dcn10_update_mpcc,
3083 .update_pending_status = dcn10_update_pending_status,
3084 .set_input_transfer_func = dcn10_set_input_transfer_func,
3085 .set_output_transfer_func = dcn10_set_output_transfer_func,
3086 .program_output_csc = dcn10_program_output_csc,
3087 .power_down = dce110_power_down,
3088 .enable_accelerated_mode = dce110_enable_accelerated_mode,
3089 .enable_timing_synchronization = dcn10_enable_timing_synchronization,
3090 .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
3091 .update_info_frame = dce110_update_info_frame,
3092 .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
3093 .enable_stream = dce110_enable_stream,
3094 .disable_stream = dce110_disable_stream,
3095 .unblank_stream = dcn10_unblank_stream,
3096 .blank_stream = dce110_blank_stream,
3097 .enable_audio_stream = dce110_enable_audio_stream,
3098 .disable_audio_stream = dce110_disable_audio_stream,
3099 .enable_display_power_gating = dcn10_dummy_display_power_gating,
3100 .disable_plane = dcn10_disable_plane,
3101 .blank_pixel_data = dcn10_blank_pixel_data,
3102 .pipe_control_lock = dcn10_pipe_control_lock,
3103 .prepare_bandwidth = dcn10_prepare_bandwidth,
3104 .optimize_bandwidth = dcn10_optimize_bandwidth,
3105 .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
3106 .enable_stream_timing = dcn10_enable_stream_timing,
3107 .set_drr = set_drr,
3108 .get_position = get_position,
3109 .set_static_screen_control = set_static_screen_control,
3110 .setup_stereo = dcn10_setup_stereo,
3111 .set_avmute = dce110_set_avmute,
3112 .log_hw_state = dcn10_log_hw_state,
3113 .get_hw_state = dcn10_get_hw_state,
3114 .clear_status_bits = dcn10_clear_status_bits,
3115 .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
3116 .edp_backlight_control = hwss_edp_backlight_control,
3117 .edp_power_control = hwss_edp_power_control,
3118 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
3119 .set_cursor_position = dcn10_set_cursor_position,
3120 .set_cursor_attribute = dcn10_set_cursor_attribute,
3121 .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
3122 .disable_stream_gating = NULL,
3123 .enable_stream_gating = NULL,
3124 .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
3125 .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
3126 .did_underflow_occur = dcn10_did_underflow_occur
3127 };
3128
3129
3130 void dcn10_hw_sequencer_construct(struct dc *dc)
3131 {
3132 dc->hwss = dcn10_funcs;
3133 }
3134