2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * Copyright 2019 Raptor Engineering, LLC
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/slab.h>
29 #include "dm_services.h"
32 #include "dcn20_init.h"
35 #include "include/irq_service_interface.h"
36 #include "dcn20/dcn20_resource.h"
38 #include "dcn10/dcn10_hubp.h"
39 #include "dcn10/dcn10_ipp.h"
40 #include "dcn20_hubbub.h"
41 #include "dcn20_mpc.h"
42 #include "dcn20_hubp.h"
43 #include "irq/dcn20/irq_service_dcn20.h"
44 #include "dcn20_dpp.h"
45 #include "dcn20_optc.h"
46 #include "dcn20_hwseq.h"
47 #include "dce110/dce110_hw_sequencer.h"
48 #include "dcn10/dcn10_resource.h"
49 #include "dcn20_opp.h"
51 #include "dcn20_dsc.h"
53 #include "dcn20_link_encoder.h"
54 #include "dcn20_stream_encoder.h"
55 #include "dce/dce_clock_source.h"
56 #include "dce/dce_audio.h"
57 #include "dce/dce_hwseq.h"
58 #include "virtual/virtual_stream_encoder.h"
59 #include "dce110/dce110_resource.h"
60 #include "dml/display_mode_vba.h"
61 #include "dcn20_dccg.h"
62 #include "dcn20_vmid.h"
63 #include "dc_link_ddc.h"
65 #include "navi10_ip_offset.h"
67 #include "dcn/dcn_2_0_0_offset.h"
68 #include "dcn/dcn_2_0_0_sh_mask.h"
69 #include "dpcs/dpcs_2_0_0_offset.h"
70 #include "dpcs/dpcs_2_0_0_sh_mask.h"
72 #include "nbio/nbio_2_3_offset.h"
74 #include "dcn20/dcn20_dwb.h"
75 #include "dcn20/dcn20_mmhubbub.h"
77 #include "mmhub/mmhub_2_0_0_offset.h"
78 #include "mmhub/mmhub_2_0_0_sh_mask.h"
80 #include "reg_helper.h"
81 #include "dce/dce_abm.h"
82 #include "dce/dce_dmcu.h"
83 #include "dce/dce_aux.h"
84 #include "dce/dce_i2c.h"
85 #include "vm_helper.h"
87 #include "amdgpu_socbb.h"
89 #define DC_LOGGER_INIT(logger)
91 struct _vcs_dpi_ip_params_st dcn2_0_ip
= {
95 .gpuvm_max_page_table_levels
= 4,
96 .hostvm_max_page_table_levels
= 4,
97 .hostvm_cached_page_table_levels
= 0,
98 .pte_group_size_bytes
= 2048,
100 .rob_buffer_size_kbytes
= 168,
101 .det_buffer_size_kbytes
= 164,
102 .dpte_buffer_size_in_pte_reqs_luma
= 84,
103 .pde_proc_buffer_size_64k_reqs
= 48,
104 .dpp_output_buffer_pixels
= 2560,
105 .opp_output_buffer_lines
= 1,
106 .pixel_chunk_size_kbytes
= 8,
107 .pte_chunk_size_kbytes
= 2,
108 .meta_chunk_size_kbytes
= 2,
109 .writeback_chunk_size_kbytes
= 2,
110 .line_buffer_size_bits
= 789504,
111 .is_line_buffer_bpp_fixed
= 0,
112 .line_buffer_fixed_bpp
= 0,
113 .dcc_supported
= true,
114 .max_line_buffer_lines
= 12,
115 .writeback_luma_buffer_size_kbytes
= 12,
116 .writeback_chroma_buffer_size_kbytes
= 8,
117 .writeback_chroma_line_buffer_width_pixels
= 4,
118 .writeback_max_hscl_ratio
= 1,
119 .writeback_max_vscl_ratio
= 1,
120 .writeback_min_hscl_ratio
= 1,
121 .writeback_min_vscl_ratio
= 1,
122 .writeback_max_hscl_taps
= 12,
123 .writeback_max_vscl_taps
= 12,
124 .writeback_line_buffer_luma_buffer_size
= 0,
125 .writeback_line_buffer_chroma_buffer_size
= 14643,
126 .cursor_buffer_size
= 8,
127 .cursor_chunk_size
= 2,
131 .max_dchub_pscl_bw_pix_per_clk
= 4,
132 .max_pscl_lb_bw_pix_per_clk
= 2,
133 .max_lb_vscl_bw_pix_per_clk
= 4,
134 .max_vscl_hscl_bw_pix_per_clk
= 4,
141 .dispclk_ramp_margin_percent
= 1,
142 .underscan_factor
= 1.10,
143 .min_vblank_lines
= 32, //
144 .dppclk_delay_subtotal
= 77, //
145 .dppclk_delay_scl_lb_only
= 16,
146 .dppclk_delay_scl
= 50,
147 .dppclk_delay_cnvc_formatter
= 8,
148 .dppclk_delay_cnvc_cursor
= 6,
149 .dispclk_delay_subtotal
= 87, //
150 .dcfclk_cstate_latency
= 10, // SRExitTime
151 .max_inter_dcn_tile_repeaters
= 8,
153 .xfc_supported
= true,
154 .xfc_fill_bw_overhead_percent
= 10.0,
155 .xfc_fill_constant_bytes
= 0,
156 .number_of_cursors
= 1,
159 struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip
= {
163 .gpuvm_max_page_table_levels
= 4,
164 .hostvm_max_page_table_levels
= 4,
165 .hostvm_cached_page_table_levels
= 0,
167 .rob_buffer_size_kbytes
= 168,
168 .det_buffer_size_kbytes
= 164,
169 .dpte_buffer_size_in_pte_reqs_luma
= 84,
170 .dpte_buffer_size_in_pte_reqs_chroma
= 42,//todo
171 .dpp_output_buffer_pixels
= 2560,
172 .opp_output_buffer_lines
= 1,
173 .pixel_chunk_size_kbytes
= 8,
175 .max_page_table_levels
= 4,
176 .pte_chunk_size_kbytes
= 2,
177 .meta_chunk_size_kbytes
= 2,
178 .writeback_chunk_size_kbytes
= 2,
179 .line_buffer_size_bits
= 789504,
180 .is_line_buffer_bpp_fixed
= 0,
181 .line_buffer_fixed_bpp
= 0,
182 .dcc_supported
= true,
183 .max_line_buffer_lines
= 12,
184 .writeback_luma_buffer_size_kbytes
= 12,
185 .writeback_chroma_buffer_size_kbytes
= 8,
186 .writeback_chroma_line_buffer_width_pixels
= 4,
187 .writeback_max_hscl_ratio
= 1,
188 .writeback_max_vscl_ratio
= 1,
189 .writeback_min_hscl_ratio
= 1,
190 .writeback_min_vscl_ratio
= 1,
191 .writeback_max_hscl_taps
= 12,
192 .writeback_max_vscl_taps
= 12,
193 .writeback_line_buffer_luma_buffer_size
= 0,
194 .writeback_line_buffer_chroma_buffer_size
= 14643,
195 .cursor_buffer_size
= 8,
196 .cursor_chunk_size
= 2,
200 .max_dchub_pscl_bw_pix_per_clk
= 4,
201 .max_pscl_lb_bw_pix_per_clk
= 2,
202 .max_lb_vscl_bw_pix_per_clk
= 4,
203 .max_vscl_hscl_bw_pix_per_clk
= 4,
210 .dispclk_ramp_margin_percent
= 1,
211 .underscan_factor
= 1.10,
212 .min_vblank_lines
= 32, //
213 .dppclk_delay_subtotal
= 77, //
214 .dppclk_delay_scl_lb_only
= 16,
215 .dppclk_delay_scl
= 50,
216 .dppclk_delay_cnvc_formatter
= 8,
217 .dppclk_delay_cnvc_cursor
= 6,
218 .dispclk_delay_subtotal
= 87, //
219 .dcfclk_cstate_latency
= 10, // SRExitTime
220 .max_inter_dcn_tile_repeaters
= 8,
221 .xfc_supported
= true,
222 .xfc_fill_bw_overhead_percent
= 10.0,
223 .xfc_fill_constant_bytes
= 0,
225 .number_of_cursors
= 1,
228 struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc
= {
229 /* Defaults that get patched on driver load from firmware. */
234 .fabricclk_mhz
= 560.0,
235 .dispclk_mhz
= 513.0,
240 .dram_speed_mts
= 8960.0,
245 .fabricclk_mhz
= 694.0,
246 .dispclk_mhz
= 642.0,
251 .dram_speed_mts
= 11104.0,
256 .fabricclk_mhz
= 875.0,
257 .dispclk_mhz
= 734.0,
262 .dram_speed_mts
= 14000.0,
266 .dcfclk_mhz
= 1000.0,
267 .fabricclk_mhz
= 1000.0,
268 .dispclk_mhz
= 1100.0,
269 .dppclk_mhz
= 1100.0,
271 .socclk_mhz
= 1000.0,
273 .dram_speed_mts
= 16000.0,
277 .dcfclk_mhz
= 1200.0,
278 .fabricclk_mhz
= 1200.0,
279 .dispclk_mhz
= 1284.0,
280 .dppclk_mhz
= 1284.0,
282 .socclk_mhz
= 1200.0,
284 .dram_speed_mts
= 16000.0,
286 /*Extra state, no dispclk ramping*/
289 .dcfclk_mhz
= 1200.0,
290 .fabricclk_mhz
= 1200.0,
291 .dispclk_mhz
= 1284.0,
292 .dppclk_mhz
= 1284.0,
294 .socclk_mhz
= 1200.0,
296 .dram_speed_mts
= 16000.0,
300 .sr_exit_time_us
= 8.6,
301 .sr_enter_plus_exit_time_us
= 10.9,
302 .urgent_latency_us
= 4.0,
303 .urgent_latency_pixel_data_only_us
= 4.0,
304 .urgent_latency_pixel_mixed_with_vm_data_us
= 4.0,
305 .urgent_latency_vm_data_only_us
= 4.0,
306 .urgent_out_of_order_return_per_channel_pixel_only_bytes
= 4096,
307 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
= 4096,
308 .urgent_out_of_order_return_per_channel_vm_only_bytes
= 4096,
309 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only
= 40.0,
310 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
= 40.0,
311 .pct_ideal_dram_sdp_bw_after_urgent_vm_only
= 40.0,
312 .max_avg_sdp_bw_use_normal_percent
= 40.0,
313 .max_avg_dram_bw_use_normal_percent
= 40.0,
314 .writeback_latency_us
= 12.0,
315 .ideal_dram_bw_after_urgent_percent
= 40.0,
316 .max_request_size_bytes
= 256,
317 .dram_channel_width_bytes
= 2,
318 .fabric_datapath_to_dcn_data_return_bytes
= 64,
319 .dcn_downspread_percent
= 0.5,
320 .downspread_percent
= 0.38,
321 .dram_page_open_time_ns
= 50.0,
322 .dram_rw_turnaround_time_ns
= 17.5,
323 .dram_return_buffer_per_channel_bytes
= 8192,
324 .round_trip_ping_latency_dcfclk_cycles
= 131,
325 .urgent_out_of_order_return_per_channel_bytes
= 256,
326 .channel_interleave_bytes
= 256,
329 .vmm_page_size_bytes
= 4096,
330 .dram_clock_change_latency_us
= 404.0,
331 .dummy_pstate_latency_us
= 5.0,
332 .writeback_dram_clock_change_latency_us
= 23.0,
333 .return_bus_width_bytes
= 64,
334 .dispclk_dppclk_vco_speed_mhz
= 3850,
335 .xfc_bus_transport_time_us
= 20,
336 .xfc_xbuf_latency_tolerance_us
= 4,
337 .use_urgent_burst_bw
= 0
340 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc
= {
345 .fabricclk_mhz
= 560.0,
346 .dispclk_mhz
= 513.0,
351 .dram_speed_mts
= 8960.0,
356 .fabricclk_mhz
= 694.0,
357 .dispclk_mhz
= 642.0,
362 .dram_speed_mts
= 11104.0,
367 .fabricclk_mhz
= 875.0,
368 .dispclk_mhz
= 734.0,
373 .dram_speed_mts
= 14000.0,
377 .dcfclk_mhz
= 1000.0,
378 .fabricclk_mhz
= 1000.0,
379 .dispclk_mhz
= 1100.0,
380 .dppclk_mhz
= 1100.0,
382 .socclk_mhz
= 1000.0,
384 .dram_speed_mts
= 16000.0,
388 .dcfclk_mhz
= 1200.0,
389 .fabricclk_mhz
= 1200.0,
390 .dispclk_mhz
= 1284.0,
391 .dppclk_mhz
= 1284.0,
393 .socclk_mhz
= 1200.0,
395 .dram_speed_mts
= 16000.0,
397 /*Extra state, no dispclk ramping*/
400 .dcfclk_mhz
= 1200.0,
401 .fabricclk_mhz
= 1200.0,
402 .dispclk_mhz
= 1284.0,
403 .dppclk_mhz
= 1284.0,
405 .socclk_mhz
= 1200.0,
407 .dram_speed_mts
= 16000.0,
411 .sr_exit_time_us
= 8.6,
412 .sr_enter_plus_exit_time_us
= 10.9,
413 .urgent_latency_us
= 4.0,
414 .urgent_latency_pixel_data_only_us
= 4.0,
415 .urgent_latency_pixel_mixed_with_vm_data_us
= 4.0,
416 .urgent_latency_vm_data_only_us
= 4.0,
417 .urgent_out_of_order_return_per_channel_pixel_only_bytes
= 4096,
418 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
= 4096,
419 .urgent_out_of_order_return_per_channel_vm_only_bytes
= 4096,
420 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only
= 40.0,
421 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
= 40.0,
422 .pct_ideal_dram_sdp_bw_after_urgent_vm_only
= 40.0,
423 .max_avg_sdp_bw_use_normal_percent
= 40.0,
424 .max_avg_dram_bw_use_normal_percent
= 40.0,
425 .writeback_latency_us
= 12.0,
426 .ideal_dram_bw_after_urgent_percent
= 40.0,
427 .max_request_size_bytes
= 256,
428 .dram_channel_width_bytes
= 2,
429 .fabric_datapath_to_dcn_data_return_bytes
= 64,
430 .dcn_downspread_percent
= 0.5,
431 .downspread_percent
= 0.38,
432 .dram_page_open_time_ns
= 50.0,
433 .dram_rw_turnaround_time_ns
= 17.5,
434 .dram_return_buffer_per_channel_bytes
= 8192,
435 .round_trip_ping_latency_dcfclk_cycles
= 131,
436 .urgent_out_of_order_return_per_channel_bytes
= 256,
437 .channel_interleave_bytes
= 256,
440 .vmm_page_size_bytes
= 4096,
441 .dram_clock_change_latency_us
= 404.0,
442 .dummy_pstate_latency_us
= 5.0,
443 .writeback_dram_clock_change_latency_us
= 23.0,
444 .return_bus_width_bytes
= 64,
445 .dispclk_dppclk_vco_speed_mhz
= 3850,
446 .xfc_bus_transport_time_us
= 20,
447 .xfc_xbuf_latency_tolerance_us
= 4,
448 .use_urgent_burst_bw
= 0
451 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc
= { 0 };
453 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
454 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
455 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
456 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
457 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
458 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
459 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
460 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
461 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
462 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
463 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
464 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
465 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
466 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
467 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
471 enum dcn20_clk_src_array_id
{
481 /* begin *********************
482 * macros to expend register list macro defined in HW object header file */
485 /* TODO awful hack. fixup dcn20_dwb.h */
487 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
489 #define BASE(seg) BASE_INNER(seg)
491 #define SR(reg_name)\
492 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
495 #define SRI(reg_name, block, id)\
496 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
497 mm ## block ## id ## _ ## reg_name
499 #define SRIR(var_name, reg_name, block, id)\
500 .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
501 mm ## block ## id ## _ ## reg_name
503 #define SRII(reg_name, block, id)\
504 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
505 mm ## block ## id ## _ ## reg_name
507 #define DCCG_SRII(reg_name, block, id)\
508 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
509 mm ## block ## id ## _ ## reg_name
512 #define NBIO_BASE_INNER(seg) \
513 NBIO_BASE__INST0_SEG ## seg
515 #define NBIO_BASE(seg) \
518 #define NBIO_SR(reg_name)\
519 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
523 #define MMHUB_BASE_INNER(seg) \
524 MMHUB_BASE__INST0_SEG ## seg
526 #define MMHUB_BASE(seg) \
527 MMHUB_BASE_INNER(seg)
529 #define MMHUB_SR(reg_name)\
530 .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
533 static const struct bios_registers bios_regs
= {
534 NBIO_SR(BIOS_SCRATCH_3
),
535 NBIO_SR(BIOS_SCRATCH_6
)
538 #define clk_src_regs(index, pllid)\
540 CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
543 static const struct dce110_clk_src_regs clk_src_regs
[] = {
552 static const struct dce110_clk_src_shift cs_shift
= {
553 CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
556 static const struct dce110_clk_src_mask cs_mask
= {
557 CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
560 static const struct dce_dmcu_registers dmcu_regs
= {
561 DMCU_DCN10_REG_LIST()
564 static const struct dce_dmcu_shift dmcu_shift
= {
565 DMCU_MASK_SH_LIST_DCN10(__SHIFT
)
568 static const struct dce_dmcu_mask dmcu_mask
= {
569 DMCU_MASK_SH_LIST_DCN10(_MASK
)
572 static const struct dce_abm_registers abm_regs
= {
576 static const struct dce_abm_shift abm_shift
= {
577 ABM_MASK_SH_LIST_DCN20(__SHIFT
)
580 static const struct dce_abm_mask abm_mask
= {
581 ABM_MASK_SH_LIST_DCN20(_MASK
)
584 #define audio_regs(id)\
586 AUD_COMMON_REG_LIST(id)\
589 static const struct dce_audio_registers audio_regs
[] = {
599 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
600 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
601 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
602 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
604 static const struct dce_audio_shift audio_shift
= {
605 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT
)
608 static const struct dce_audio_mask audio_mask
= {
609 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK
)
612 #define stream_enc_regs(id)\
614 SE_DCN2_REG_LIST(id)\
617 static const struct dcn10_stream_enc_registers stream_enc_regs
[] = {
626 static const struct dcn10_stream_encoder_shift se_shift
= {
627 SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT
)
630 static const struct dcn10_stream_encoder_mask se_mask
= {
631 SE_COMMON_MASK_SH_LIST_DCN20(_MASK
)
635 #define aux_regs(id)\
637 DCN2_AUX_REG_LIST(id)\
640 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs
[] = {
649 #define hpd_regs(id)\
654 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs
[] = {
663 #define link_regs(id, phyid)\
665 LE_DCN10_REG_LIST(id), \
666 UNIPHY_DCN2_REG_LIST(phyid), \
667 DPCS_DCN2_REG_LIST(id), \
668 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
671 static const struct dcn10_link_enc_registers link_enc_regs
[] = {
680 static const struct dcn10_link_enc_shift le_shift
= {
681 LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT
),\
682 DPCS_DCN2_MASK_SH_LIST(__SHIFT
)
685 static const struct dcn10_link_enc_mask le_mask
= {
686 LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK
),\
687 DPCS_DCN2_MASK_SH_LIST(_MASK
)
690 #define ipp_regs(id)\
692 IPP_REG_LIST_DCN20(id),\
695 static const struct dcn10_ipp_registers ipp_regs
[] = {
704 static const struct dcn10_ipp_shift ipp_shift
= {
705 IPP_MASK_SH_LIST_DCN20(__SHIFT
)
708 static const struct dcn10_ipp_mask ipp_mask
= {
709 IPP_MASK_SH_LIST_DCN20(_MASK
),
712 #define opp_regs(id)\
714 OPP_REG_LIST_DCN20(id),\
717 static const struct dcn20_opp_registers opp_regs
[] = {
726 static const struct dcn20_opp_shift opp_shift
= {
727 OPP_MASK_SH_LIST_DCN20(__SHIFT
)
730 static const struct dcn20_opp_mask opp_mask
= {
731 OPP_MASK_SH_LIST_DCN20(_MASK
)
734 #define aux_engine_regs(id)\
736 AUX_COMMON_REG_LIST0(id), \
739 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
742 static const struct dce110_aux_registers aux_engine_regs
[] = {
753 TF_REG_LIST_DCN20(id),\
754 TF_REG_LIST_DCN20_COMMON_APPEND(id),\
757 static const struct dcn2_dpp_registers tf_regs
[] = {
766 static const struct dcn2_dpp_shift tf_shift
= {
767 TF_REG_LIST_SH_MASK_DCN20(__SHIFT
),
768 TF_DEBUG_REG_LIST_SH_DCN20
771 static const struct dcn2_dpp_mask tf_mask
= {
772 TF_REG_LIST_SH_MASK_DCN20(_MASK
),
773 TF_DEBUG_REG_LIST_MASK_DCN20
776 #define dwbc_regs_dcn2(id)\
778 DWBC_COMMON_REG_LIST_DCN2_0(id),\
781 static const struct dcn20_dwbc_registers dwbc20_regs
[] = {
785 static const struct dcn20_dwbc_shift dwbc20_shift
= {
786 DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
789 static const struct dcn20_dwbc_mask dwbc20_mask
= {
790 DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
793 #define mcif_wb_regs_dcn2(id)\
795 MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
798 static const struct dcn20_mmhubbub_registers mcif_wb20_regs
[] = {
799 mcif_wb_regs_dcn2(0),
802 static const struct dcn20_mmhubbub_shift mcif_wb20_shift
= {
803 MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
806 static const struct dcn20_mmhubbub_mask mcif_wb20_mask
= {
807 MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
810 static const struct dcn20_mpc_registers mpc_regs
= {
811 MPC_REG_LIST_DCN2_0(0),
812 MPC_REG_LIST_DCN2_0(1),
813 MPC_REG_LIST_DCN2_0(2),
814 MPC_REG_LIST_DCN2_0(3),
815 MPC_REG_LIST_DCN2_0(4),
816 MPC_REG_LIST_DCN2_0(5),
817 MPC_OUT_MUX_REG_LIST_DCN2_0(0),
818 MPC_OUT_MUX_REG_LIST_DCN2_0(1),
819 MPC_OUT_MUX_REG_LIST_DCN2_0(2),
820 MPC_OUT_MUX_REG_LIST_DCN2_0(3),
821 MPC_OUT_MUX_REG_LIST_DCN2_0(4),
822 MPC_OUT_MUX_REG_LIST_DCN2_0(5),
823 MPC_DBG_REG_LIST_DCN2_0()
826 static const struct dcn20_mpc_shift mpc_shift
= {
827 MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
),
828 MPC_DEBUG_REG_LIST_SH_DCN20
831 static const struct dcn20_mpc_mask mpc_mask
= {
832 MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK
),
833 MPC_DEBUG_REG_LIST_MASK_DCN20
837 [id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
840 static const struct dcn_optc_registers tg_regs
[] = {
849 static const struct dcn_optc_shift tg_shift
= {
850 TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
853 static const struct dcn_optc_mask tg_mask
= {
854 TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
857 #define hubp_regs(id)\
859 HUBP_REG_LIST_DCN20(id)\
862 static const struct dcn_hubp2_registers hubp_regs
[] = {
871 static const struct dcn_hubp2_shift hubp_shift
= {
872 HUBP_MASK_SH_LIST_DCN20(__SHIFT
)
875 static const struct dcn_hubp2_mask hubp_mask
= {
876 HUBP_MASK_SH_LIST_DCN20(_MASK
)
879 static const struct dcn_hubbub_registers hubbub_reg
= {
880 HUBBUB_REG_LIST_DCN20(0)
883 static const struct dcn_hubbub_shift hubbub_shift
= {
884 HUBBUB_MASK_SH_LIST_DCN20(__SHIFT
)
887 static const struct dcn_hubbub_mask hubbub_mask
= {
888 HUBBUB_MASK_SH_LIST_DCN20(_MASK
)
891 #define vmid_regs(id)\
893 DCN20_VMID_REG_LIST(id)\
896 static const struct dcn_vmid_registers vmid_regs
[] = {
915 static const struct dcn20_vmid_shift vmid_shifts
= {
916 DCN20_VMID_MASK_SH_LIST(__SHIFT
)
919 static const struct dcn20_vmid_mask vmid_masks
= {
920 DCN20_VMID_MASK_SH_LIST(_MASK
)
923 static const struct dce110_aux_registers_shift aux_shift
= {
924 DCN_AUX_MASK_SH_LIST(__SHIFT
)
927 static const struct dce110_aux_registers_mask aux_mask
= {
928 DCN_AUX_MASK_SH_LIST(_MASK
)
931 static int map_transmitter_id_to_phy_instance(
932 enum transmitter transmitter
)
934 switch (transmitter
) {
935 case TRANSMITTER_UNIPHY_A
:
938 case TRANSMITTER_UNIPHY_B
:
941 case TRANSMITTER_UNIPHY_C
:
944 case TRANSMITTER_UNIPHY_D
:
947 case TRANSMITTER_UNIPHY_E
:
950 case TRANSMITTER_UNIPHY_F
:
959 #define dsc_regsDCN20(id)\
961 DSC_REG_LIST_DCN20(id)\
964 static const struct dcn20_dsc_registers dsc_regs
[] = {
973 static const struct dcn20_dsc_shift dsc_shift
= {
974 DSC_REG_LIST_SH_MASK_DCN20(__SHIFT
)
977 static const struct dcn20_dsc_mask dsc_mask
= {
978 DSC_REG_LIST_SH_MASK_DCN20(_MASK
)
981 static const struct dccg_registers dccg_regs
= {
985 static const struct dccg_shift dccg_shift
= {
986 DCCG_MASK_SH_LIST_DCN2(__SHIFT
)
989 static const struct dccg_mask dccg_mask
= {
990 DCCG_MASK_SH_LIST_DCN2(_MASK
)
993 static const struct resource_caps res_cap_nv10
= {
994 .num_timing_generator
= 6,
996 .num_video_plane
= 6,
998 .num_stream_encoder
= 6,
1006 static const struct dc_plane_cap plane_cap
= {
1007 .type
= DC_PLANE_TYPE_DCN_UNIVERSAL
,
1008 .blends_with_above
= true,
1009 .blends_with_below
= true,
1010 .per_pixel_alpha
= true,
1012 .pixel_format_support
= {
1019 .max_upscale_factor
= {
1025 .max_downscale_factor
= {
1031 static const struct resource_caps res_cap_nv14
= {
1032 .num_timing_generator
= 5,
1034 .num_video_plane
= 5,
1036 .num_stream_encoder
= 5,
1044 static const struct dc_debug_options debug_defaults_drv
= {
1045 .disable_dmcu
= false,
1046 .force_abm_enable
= false,
1047 .timing_trace
= false,
1048 .clock_trace
= true,
1049 .disable_pplib_clock_request
= true,
1050 .pipe_split_policy
= MPC_SPLIT_DYNAMIC
,
1051 .force_single_disp_pipe_split
= false,
1052 .disable_dcc
= DCC_ENABLE
,
1053 .vsr_support
= true,
1054 .performance_trace
= false,
1055 .max_downscale_src_width
= 5120,/*upto 5K*/
1056 .disable_pplib_wm_range
= false,
1057 .scl_reset_length10
= true,
1058 .sanity_checks
= false,
1059 .disable_tri_buf
= true,
1060 .underflow_assert_delay_us
= 0xFFFFFFFF,
1063 static const struct dc_debug_options debug_defaults_diags
= {
1064 .disable_dmcu
= false,
1065 .force_abm_enable
= false,
1066 .timing_trace
= true,
1067 .clock_trace
= true,
1068 .disable_dpp_power_gate
= true,
1069 .disable_hubp_power_gate
= true,
1070 .disable_clock_gate
= true,
1071 .disable_pplib_clock_request
= true,
1072 .disable_pplib_wm_range
= true,
1073 .disable_stutter
= true,
1074 .scl_reset_length10
= true,
1075 .underflow_assert_delay_us
= 0xFFFFFFFF,
1078 void dcn20_dpp_destroy(struct dpp
**dpp
)
1080 kfree(TO_DCN20_DPP(*dpp
));
1084 struct dpp
*dcn20_dpp_create(
1085 struct dc_context
*ctx
,
1088 struct dcn20_dpp
*dpp
=
1089 kzalloc(sizeof(struct dcn20_dpp
), GFP_KERNEL
);
1094 if (dpp2_construct(dpp
, ctx
, inst
,
1095 &tf_regs
[inst
], &tf_shift
, &tf_mask
))
1098 BREAK_TO_DEBUGGER();
1103 struct input_pixel_processor
*dcn20_ipp_create(
1104 struct dc_context
*ctx
, uint32_t inst
)
1106 struct dcn10_ipp
*ipp
=
1107 kzalloc(sizeof(struct dcn10_ipp
), GFP_KERNEL
);
1110 BREAK_TO_DEBUGGER();
1114 dcn20_ipp_construct(ipp
, ctx
, inst
,
1115 &ipp_regs
[inst
], &ipp_shift
, &ipp_mask
);
1120 struct output_pixel_processor
*dcn20_opp_create(
1121 struct dc_context
*ctx
, uint32_t inst
)
1123 struct dcn20_opp
*opp
=
1124 kzalloc(sizeof(struct dcn20_opp
), GFP_KERNEL
);
1127 BREAK_TO_DEBUGGER();
1131 dcn20_opp_construct(opp
, ctx
, inst
,
1132 &opp_regs
[inst
], &opp_shift
, &opp_mask
);
1136 struct dce_aux
*dcn20_aux_engine_create(
1137 struct dc_context
*ctx
,
1140 struct aux_engine_dce110
*aux_engine
=
1141 kzalloc(sizeof(struct aux_engine_dce110
), GFP_KERNEL
);
1146 dce110_aux_engine_construct(aux_engine
, ctx
, inst
,
1147 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER
* AUX_TIMEOUT_PERIOD
,
1148 &aux_engine_regs
[inst
],
1151 ctx
->dc
->caps
.extended_aux_timeout_support
);
1153 return &aux_engine
->base
;
1155 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
1157 static const struct dce_i2c_registers i2c_hw_regs
[] = {
1166 static const struct dce_i2c_shift i2c_shifts
= {
1167 I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT
)
1170 static const struct dce_i2c_mask i2c_masks
= {
1171 I2C_COMMON_MASK_SH_LIST_DCN2(_MASK
)
1174 struct dce_i2c_hw
*dcn20_i2c_hw_create(
1175 struct dc_context
*ctx
,
1178 struct dce_i2c_hw
*dce_i2c_hw
=
1179 kzalloc(sizeof(struct dce_i2c_hw
), GFP_KERNEL
);
1184 dcn2_i2c_hw_construct(dce_i2c_hw
, ctx
, inst
,
1185 &i2c_hw_regs
[inst
], &i2c_shifts
, &i2c_masks
);
1189 struct mpc
*dcn20_mpc_create(struct dc_context
*ctx
)
1191 struct dcn20_mpc
*mpc20
= kzalloc(sizeof(struct dcn20_mpc
),
1197 dcn20_mpc_construct(mpc20
, ctx
,
1203 return &mpc20
->base
;
1206 struct hubbub
*dcn20_hubbub_create(struct dc_context
*ctx
)
1209 struct dcn20_hubbub
*hubbub
= kzalloc(sizeof(struct dcn20_hubbub
),
1215 hubbub2_construct(hubbub
, ctx
,
1220 for (i
= 0; i
< res_cap_nv10
.num_vmid
; i
++) {
1221 struct dcn20_vmid
*vmid
= &hubbub
->vmid
[i
];
1225 vmid
->regs
= &vmid_regs
[i
];
1226 vmid
->shifts
= &vmid_shifts
;
1227 vmid
->masks
= &vmid_masks
;
1230 return &hubbub
->base
;
1233 struct timing_generator
*dcn20_timing_generator_create(
1234 struct dc_context
*ctx
,
1237 struct optc
*tgn10
=
1238 kzalloc(sizeof(struct optc
), GFP_KERNEL
);
1243 tgn10
->base
.inst
= instance
;
1244 tgn10
->base
.ctx
= ctx
;
1246 tgn10
->tg_regs
= &tg_regs
[instance
];
1247 tgn10
->tg_shift
= &tg_shift
;
1248 tgn10
->tg_mask
= &tg_mask
;
1250 dcn20_timing_generator_init(tgn10
);
1252 return &tgn10
->base
;
1255 static const struct encoder_feature_support link_enc_feature
= {
1256 .max_hdmi_deep_color
= COLOR_DEPTH_121212
,
1257 .max_hdmi_pixel_clock
= 600000,
1258 .hdmi_ycbcr420_supported
= true,
1259 .dp_ycbcr420_supported
= true,
1260 .fec_supported
= true,
1261 .flags
.bits
.IS_HBR2_CAPABLE
= true,
1262 .flags
.bits
.IS_HBR3_CAPABLE
= true,
1263 .flags
.bits
.IS_TPS3_CAPABLE
= true,
1264 .flags
.bits
.IS_TPS4_CAPABLE
= true
1267 struct link_encoder
*dcn20_link_encoder_create(
1268 const struct encoder_init_data
*enc_init_data
)
1270 struct dcn20_link_encoder
*enc20
=
1271 kzalloc(sizeof(struct dcn20_link_encoder
), GFP_KERNEL
);
1278 map_transmitter_id_to_phy_instance(enc_init_data
->transmitter
);
1280 dcn20_link_encoder_construct(enc20
,
1283 &link_enc_regs
[link_regs_id
],
1284 &link_enc_aux_regs
[enc_init_data
->channel
- 1],
1285 &link_enc_hpd_regs
[enc_init_data
->hpd_source
],
1289 return &enc20
->enc10
.base
;
1292 struct clock_source
*dcn20_clock_source_create(
1293 struct dc_context
*ctx
,
1294 struct dc_bios
*bios
,
1295 enum clock_source_id id
,
1296 const struct dce110_clk_src_regs
*regs
,
1299 struct dce110_clk_src
*clk_src
=
1300 kzalloc(sizeof(struct dce110_clk_src
), GFP_KERNEL
);
1305 if (dcn20_clk_src_construct(clk_src
, ctx
, bios
, id
,
1306 regs
, &cs_shift
, &cs_mask
)) {
1307 clk_src
->base
.dp_clk_src
= dp_clk_src
;
1308 return &clk_src
->base
;
1312 BREAK_TO_DEBUGGER();
1316 static void read_dce_straps(
1317 struct dc_context
*ctx
,
1318 struct resource_straps
*straps
)
1320 generic_reg_get(ctx
, mmDC_PINSTRAPS
+ BASE(mmDC_PINSTRAPS_BASE_IDX
),
1321 FN(DC_PINSTRAPS
, DC_PINSTRAPS_AUDIO
), &straps
->dc_pinstraps_audio
);
1324 static struct audio
*dcn20_create_audio(
1325 struct dc_context
*ctx
, unsigned int inst
)
1327 return dce_audio_create(ctx
, inst
,
1328 &audio_regs
[inst
], &audio_shift
, &audio_mask
);
1331 struct stream_encoder
*dcn20_stream_encoder_create(
1332 enum engine_id eng_id
,
1333 struct dc_context
*ctx
)
1335 struct dcn10_stream_encoder
*enc1
=
1336 kzalloc(sizeof(struct dcn10_stream_encoder
), GFP_KERNEL
);
1341 if (ASICREV_IS_NAVI14_M(ctx
->asic_id
.hw_internal_rev
)) {
1342 if (eng_id
>= ENGINE_ID_DIGD
)
1346 dcn20_stream_encoder_construct(enc1
, ctx
, ctx
->dc_bios
, eng_id
,
1347 &stream_enc_regs
[eng_id
],
1348 &se_shift
, &se_mask
);
1353 static const struct dce_hwseq_registers hwseq_reg
= {
1354 HWSEQ_DCN2_REG_LIST()
1357 static const struct dce_hwseq_shift hwseq_shift
= {
1358 HWSEQ_DCN2_MASK_SH_LIST(__SHIFT
)
1361 static const struct dce_hwseq_mask hwseq_mask
= {
1362 HWSEQ_DCN2_MASK_SH_LIST(_MASK
)
1365 struct dce_hwseq
*dcn20_hwseq_create(
1366 struct dc_context
*ctx
)
1368 struct dce_hwseq
*hws
= kzalloc(sizeof(struct dce_hwseq
), GFP_KERNEL
);
1372 hws
->regs
= &hwseq_reg
;
1373 hws
->shifts
= &hwseq_shift
;
1374 hws
->masks
= &hwseq_mask
;
1379 static const struct resource_create_funcs res_create_funcs
= {
1380 .read_dce_straps
= read_dce_straps
,
1381 .create_audio
= dcn20_create_audio
,
1382 .create_stream_encoder
= dcn20_stream_encoder_create
,
1383 .create_hwseq
= dcn20_hwseq_create
,
1386 static const struct resource_create_funcs res_create_maximus_funcs
= {
1387 .read_dce_straps
= NULL
,
1388 .create_audio
= NULL
,
1389 .create_stream_encoder
= NULL
,
1390 .create_hwseq
= dcn20_hwseq_create
,
1393 static void dcn20_pp_smu_destroy(struct pp_smu_funcs
**pp_smu
);
1395 void dcn20_clock_source_destroy(struct clock_source
**clk_src
)
1397 kfree(TO_DCE110_CLK_SRC(*clk_src
));
1402 struct display_stream_compressor
*dcn20_dsc_create(
1403 struct dc_context
*ctx
, uint32_t inst
)
1405 struct dcn20_dsc
*dsc
=
1406 kzalloc(sizeof(struct dcn20_dsc
), GFP_KERNEL
);
1409 BREAK_TO_DEBUGGER();
1413 dsc2_construct(dsc
, ctx
, inst
, &dsc_regs
[inst
], &dsc_shift
, &dsc_mask
);
1417 void dcn20_dsc_destroy(struct display_stream_compressor
**dsc
)
1419 kfree(container_of(*dsc
, struct dcn20_dsc
, base
));
1424 static void dcn20_resource_destruct(struct dcn20_resource_pool
*pool
)
1428 for (i
= 0; i
< pool
->base
.stream_enc_count
; i
++) {
1429 if (pool
->base
.stream_enc
[i
] != NULL
) {
1430 kfree(DCN10STRENC_FROM_STRENC(pool
->base
.stream_enc
[i
]));
1431 pool
->base
.stream_enc
[i
] = NULL
;
1435 for (i
= 0; i
< pool
->base
.res_cap
->num_dsc
; i
++) {
1436 if (pool
->base
.dscs
[i
] != NULL
)
1437 dcn20_dsc_destroy(&pool
->base
.dscs
[i
]);
1440 if (pool
->base
.mpc
!= NULL
) {
1441 kfree(TO_DCN20_MPC(pool
->base
.mpc
));
1442 pool
->base
.mpc
= NULL
;
1444 if (pool
->base
.hubbub
!= NULL
) {
1445 kfree(pool
->base
.hubbub
);
1446 pool
->base
.hubbub
= NULL
;
1448 for (i
= 0; i
< pool
->base
.pipe_count
; i
++) {
1449 if (pool
->base
.dpps
[i
] != NULL
)
1450 dcn20_dpp_destroy(&pool
->base
.dpps
[i
]);
1452 if (pool
->base
.ipps
[i
] != NULL
)
1453 pool
->base
.ipps
[i
]->funcs
->ipp_destroy(&pool
->base
.ipps
[i
]);
1455 if (pool
->base
.hubps
[i
] != NULL
) {
1456 kfree(TO_DCN20_HUBP(pool
->base
.hubps
[i
]));
1457 pool
->base
.hubps
[i
] = NULL
;
1460 if (pool
->base
.irqs
!= NULL
) {
1461 dal_irq_service_destroy(&pool
->base
.irqs
);
1465 for (i
= 0; i
< pool
->base
.res_cap
->num_ddc
; i
++) {
1466 if (pool
->base
.engines
[i
] != NULL
)
1467 dce110_engine_destroy(&pool
->base
.engines
[i
]);
1468 if (pool
->base
.hw_i2cs
[i
] != NULL
) {
1469 kfree(pool
->base
.hw_i2cs
[i
]);
1470 pool
->base
.hw_i2cs
[i
] = NULL
;
1472 if (pool
->base
.sw_i2cs
[i
] != NULL
) {
1473 kfree(pool
->base
.sw_i2cs
[i
]);
1474 pool
->base
.sw_i2cs
[i
] = NULL
;
1478 for (i
= 0; i
< pool
->base
.res_cap
->num_opp
; i
++) {
1479 if (pool
->base
.opps
[i
] != NULL
)
1480 pool
->base
.opps
[i
]->funcs
->opp_destroy(&pool
->base
.opps
[i
]);
1483 for (i
= 0; i
< pool
->base
.res_cap
->num_timing_generator
; i
++) {
1484 if (pool
->base
.timing_generators
[i
] != NULL
) {
1485 kfree(DCN10TG_FROM_TG(pool
->base
.timing_generators
[i
]));
1486 pool
->base
.timing_generators
[i
] = NULL
;
1490 for (i
= 0; i
< pool
->base
.res_cap
->num_dwb
; i
++) {
1491 if (pool
->base
.dwbc
[i
] != NULL
) {
1492 kfree(TO_DCN20_DWBC(pool
->base
.dwbc
[i
]));
1493 pool
->base
.dwbc
[i
] = NULL
;
1495 if (pool
->base
.mcif_wb
[i
] != NULL
) {
1496 kfree(TO_DCN20_MMHUBBUB(pool
->base
.mcif_wb
[i
]));
1497 pool
->base
.mcif_wb
[i
] = NULL
;
1501 for (i
= 0; i
< pool
->base
.audio_count
; i
++) {
1502 if (pool
->base
.audios
[i
])
1503 dce_aud_destroy(&pool
->base
.audios
[i
]);
1506 for (i
= 0; i
< pool
->base
.clk_src_count
; i
++) {
1507 if (pool
->base
.clock_sources
[i
] != NULL
) {
1508 dcn20_clock_source_destroy(&pool
->base
.clock_sources
[i
]);
1509 pool
->base
.clock_sources
[i
] = NULL
;
1513 if (pool
->base
.dp_clock_source
!= NULL
) {
1514 dcn20_clock_source_destroy(&pool
->base
.dp_clock_source
);
1515 pool
->base
.dp_clock_source
= NULL
;
1519 if (pool
->base
.abm
!= NULL
)
1520 dce_abm_destroy(&pool
->base
.abm
);
1522 if (pool
->base
.dmcu
!= NULL
)
1523 dce_dmcu_destroy(&pool
->base
.dmcu
);
1525 if (pool
->base
.dccg
!= NULL
)
1526 dcn_dccg_destroy(&pool
->base
.dccg
);
1528 if (pool
->base
.pp_smu
!= NULL
)
1529 dcn20_pp_smu_destroy(&pool
->base
.pp_smu
);
1531 if (pool
->base
.oem_device
!= NULL
)
1532 dal_ddc_service_destroy(&pool
->base
.oem_device
);
1535 struct hubp
*dcn20_hubp_create(
1536 struct dc_context
*ctx
,
1539 struct dcn20_hubp
*hubp2
=
1540 kzalloc(sizeof(struct dcn20_hubp
), GFP_KERNEL
);
1545 if (hubp2_construct(hubp2
, ctx
, inst
,
1546 &hubp_regs
[inst
], &hubp_shift
, &hubp_mask
))
1547 return &hubp2
->base
;
1549 BREAK_TO_DEBUGGER();
1554 static void get_pixel_clock_parameters(
1555 struct pipe_ctx
*pipe_ctx
,
1556 struct pixel_clk_params
*pixel_clk_params
)
1558 const struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1559 struct pipe_ctx
*odm_pipe
;
1562 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
1565 pixel_clk_params
->requested_pix_clk_100hz
= stream
->timing
.pix_clk_100hz
;
1566 pixel_clk_params
->encoder_object_id
= stream
->link
->link_enc
->id
;
1567 pixel_clk_params
->signal_type
= pipe_ctx
->stream
->signal
;
1568 pixel_clk_params
->controller_id
= pipe_ctx
->stream_res
.tg
->inst
+ 1;
1569 /* TODO: un-hardcode*/
1570 pixel_clk_params
->requested_sym_clk
= LINK_RATE_LOW
*
1571 LINK_RATE_REF_FREQ_IN_KHZ
;
1572 pixel_clk_params
->flags
.ENABLE_SS
= 0;
1573 pixel_clk_params
->color_depth
=
1574 stream
->timing
.display_color_depth
;
1575 pixel_clk_params
->flags
.DISPLAY_BLANKED
= 1;
1576 pixel_clk_params
->pixel_encoding
= stream
->timing
.pixel_encoding
;
1578 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR422
)
1579 pixel_clk_params
->color_depth
= COLOR_DEPTH_888
;
1582 pixel_clk_params
->requested_pix_clk_100hz
/= 4;
1583 else if (optc2_is_two_pixels_per_containter(&stream
->timing
) || opp_cnt
== 2)
1584 pixel_clk_params
->requested_pix_clk_100hz
/= 2;
1586 if (stream
->timing
.timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING
)
1587 pixel_clk_params
->requested_pix_clk_100hz
*= 2;
1591 static void build_clamping_params(struct dc_stream_state
*stream
)
1593 stream
->clamping
.clamping_level
= CLAMPING_FULL_RANGE
;
1594 stream
->clamping
.c_depth
= stream
->timing
.display_color_depth
;
1595 stream
->clamping
.pixel_encoding
= stream
->timing
.pixel_encoding
;
1598 static enum dc_status
build_pipe_hw_param(struct pipe_ctx
*pipe_ctx
)
1601 get_pixel_clock_parameters(pipe_ctx
, &pipe_ctx
->stream_res
.pix_clk_params
);
1603 pipe_ctx
->clock_source
->funcs
->get_pix_clk_dividers(
1604 pipe_ctx
->clock_source
,
1605 &pipe_ctx
->stream_res
.pix_clk_params
,
1606 &pipe_ctx
->pll_settings
);
1608 pipe_ctx
->stream
->clamping
.pixel_encoding
= pipe_ctx
->stream
->timing
.pixel_encoding
;
1610 resource_build_bit_depth_reduction_params(pipe_ctx
->stream
,
1611 &pipe_ctx
->stream
->bit_depth_params
);
1612 build_clamping_params(pipe_ctx
->stream
);
1617 enum dc_status
dcn20_build_mapped_resource(const struct dc
*dc
, struct dc_state
*context
, struct dc_stream_state
*stream
)
1619 enum dc_status status
= DC_OK
;
1620 struct pipe_ctx
*pipe_ctx
= resource_get_head_pipe_for_stream(&context
->res_ctx
, stream
);
1622 /*TODO Seems unneeded anymore */
1623 /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
1624 if (stream != NULL && old_context->streams[i] != NULL) {
1625 todo: shouldn't have to copy missing parameter here
1626 resource_build_bit_depth_reduction_params(stream,
1627 &stream->bit_depth_params);
1628 stream->clamping.pixel_encoding =
1629 stream->timing.pixel_encoding;
1631 resource_build_bit_depth_reduction_params(stream,
1632 &stream->bit_depth_params);
1633 build_clamping_params(stream);
1641 return DC_ERROR_UNEXPECTED
;
1644 status
= build_pipe_hw_param(pipe_ctx
);
1650 static void acquire_dsc(struct resource_context
*res_ctx
,
1651 const struct resource_pool
*pool
,
1652 struct display_stream_compressor
**dsc
,
1657 ASSERT(*dsc
== NULL
);
1660 if (pool
->res_cap
->num_dsc
== pool
->res_cap
->num_opp
) {
1661 *dsc
= pool
->dscs
[pipe_idx
];
1662 res_ctx
->is_dsc_acquired
[pipe_idx
] = true;
1666 /* Find first free DSC */
1667 for (i
= 0; i
< pool
->res_cap
->num_dsc
; i
++)
1668 if (!res_ctx
->is_dsc_acquired
[i
]) {
1669 *dsc
= pool
->dscs
[i
];
1670 res_ctx
->is_dsc_acquired
[i
] = true;
1675 void dcn20_release_dsc(struct resource_context
*res_ctx
,
1676 const struct resource_pool
*pool
,
1677 struct display_stream_compressor
**dsc
)
1681 for (i
= 0; i
< pool
->res_cap
->num_dsc
; i
++)
1682 if (pool
->dscs
[i
] == *dsc
) {
1683 res_ctx
->is_dsc_acquired
[i
] = false;
1691 enum dc_status
dcn20_add_dsc_to_stream_resource(struct dc
*dc
,
1692 struct dc_state
*dc_ctx
,
1693 struct dc_stream_state
*dc_stream
)
1695 enum dc_status result
= DC_OK
;
1697 const struct resource_pool
*pool
= dc
->res_pool
;
1699 /* Get a DSC if required and available */
1700 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1701 struct pipe_ctx
*pipe_ctx
= &dc_ctx
->res_ctx
.pipe_ctx
[i
];
1703 if (pipe_ctx
->stream
!= dc_stream
)
1706 if (pipe_ctx
->stream_res
.dsc
)
1709 acquire_dsc(&dc_ctx
->res_ctx
, pool
, &pipe_ctx
->stream_res
.dsc
, i
);
1711 /* The number of DSCs can be less than the number of pipes */
1712 if (!pipe_ctx
->stream_res
.dsc
) {
1713 result
= DC_NO_DSC_RESOURCE
;
1723 static enum dc_status
remove_dsc_from_stream_resource(struct dc
*dc
,
1724 struct dc_state
*new_ctx
,
1725 struct dc_stream_state
*dc_stream
)
1727 struct pipe_ctx
*pipe_ctx
= NULL
;
1730 for (i
= 0; i
< MAX_PIPES
; i
++) {
1731 if (new_ctx
->res_ctx
.pipe_ctx
[i
].stream
== dc_stream
&& !new_ctx
->res_ctx
.pipe_ctx
[i
].top_pipe
) {
1732 pipe_ctx
= &new_ctx
->res_ctx
.pipe_ctx
[i
];
1734 if (pipe_ctx
->stream_res
.dsc
)
1735 dcn20_release_dsc(&new_ctx
->res_ctx
, dc
->res_pool
, &pipe_ctx
->stream_res
.dsc
);
1740 return DC_ERROR_UNEXPECTED
;
1746 enum dc_status
dcn20_add_stream_to_ctx(struct dc
*dc
, struct dc_state
*new_ctx
, struct dc_stream_state
*dc_stream
)
1748 enum dc_status result
= DC_ERROR_UNEXPECTED
;
1750 result
= resource_map_pool_resources(dc
, new_ctx
, dc_stream
);
1752 if (result
== DC_OK
)
1753 result
= resource_map_phy_clock_resources(dc
, new_ctx
, dc_stream
);
1755 /* Get a DSC if required and available */
1756 if (result
== DC_OK
&& dc_stream
->timing
.flags
.DSC
)
1757 result
= dcn20_add_dsc_to_stream_resource(dc
, new_ctx
, dc_stream
);
1759 if (result
== DC_OK
)
1760 result
= dcn20_build_mapped_resource(dc
, new_ctx
, dc_stream
);
1766 enum dc_status
dcn20_remove_stream_from_ctx(struct dc
*dc
, struct dc_state
*new_ctx
, struct dc_stream_state
*dc_stream
)
1768 enum dc_status result
= DC_OK
;
1770 result
= remove_dsc_from_stream_resource(dc
, new_ctx
, dc_stream
);
1776 static void swizzle_to_dml_params(
1777 enum swizzle_mode_values swizzle
,
1778 unsigned int *sw_mode
)
1782 *sw_mode
= dm_sw_linear
;
1785 *sw_mode
= dm_sw_4kb_s
;
1788 *sw_mode
= dm_sw_4kb_s_x
;
1791 *sw_mode
= dm_sw_4kb_d
;
1794 *sw_mode
= dm_sw_4kb_d_x
;
1797 *sw_mode
= dm_sw_64kb_s
;
1799 case DC_SW_64KB_S_X
:
1800 *sw_mode
= dm_sw_64kb_s_x
;
1802 case DC_SW_64KB_S_T
:
1803 *sw_mode
= dm_sw_64kb_s_t
;
1806 *sw_mode
= dm_sw_64kb_d
;
1808 case DC_SW_64KB_D_X
:
1809 *sw_mode
= dm_sw_64kb_d_x
;
1811 case DC_SW_64KB_D_T
:
1812 *sw_mode
= dm_sw_64kb_d_t
;
1814 case DC_SW_64KB_R_X
:
1815 *sw_mode
= dm_sw_64kb_r_x
;
1818 *sw_mode
= dm_sw_var_s
;
1821 *sw_mode
= dm_sw_var_s_x
;
1824 *sw_mode
= dm_sw_var_d
;
1827 *sw_mode
= dm_sw_var_d_x
;
1831 ASSERT(0); /* Not supported */
1836 bool dcn20_split_stream_for_odm(
1837 struct resource_context
*res_ctx
,
1838 const struct resource_pool
*pool
,
1839 struct pipe_ctx
*prev_odm_pipe
,
1840 struct pipe_ctx
*next_odm_pipe
)
1842 int pipe_idx
= next_odm_pipe
->pipe_idx
;
1844 *next_odm_pipe
= *prev_odm_pipe
;
1846 next_odm_pipe
->pipe_idx
= pipe_idx
;
1847 next_odm_pipe
->plane_res
.mi
= pool
->mis
[next_odm_pipe
->pipe_idx
];
1848 next_odm_pipe
->plane_res
.hubp
= pool
->hubps
[next_odm_pipe
->pipe_idx
];
1849 next_odm_pipe
->plane_res
.ipp
= pool
->ipps
[next_odm_pipe
->pipe_idx
];
1850 next_odm_pipe
->plane_res
.xfm
= pool
->transforms
[next_odm_pipe
->pipe_idx
];
1851 next_odm_pipe
->plane_res
.dpp
= pool
->dpps
[next_odm_pipe
->pipe_idx
];
1852 next_odm_pipe
->plane_res
.mpcc_inst
= pool
->dpps
[next_odm_pipe
->pipe_idx
]->inst
;
1853 next_odm_pipe
->stream_res
.dsc
= NULL
;
1854 if (prev_odm_pipe
->next_odm_pipe
&& prev_odm_pipe
->next_odm_pipe
!= next_odm_pipe
) {
1855 next_odm_pipe
->next_odm_pipe
= prev_odm_pipe
->next_odm_pipe
;
1856 next_odm_pipe
->next_odm_pipe
->prev_odm_pipe
= next_odm_pipe
;
1858 prev_odm_pipe
->next_odm_pipe
= next_odm_pipe
;
1859 next_odm_pipe
->prev_odm_pipe
= prev_odm_pipe
;
1860 ASSERT(next_odm_pipe
->top_pipe
== NULL
);
1862 if (prev_odm_pipe
->plane_state
) {
1863 struct scaler_data
*sd
= &prev_odm_pipe
->plane_res
.scl_data
;
1866 /* HACTIVE halved for odm combine */
1868 /* Calculate new vp and recout for left pipe */
1869 /* Need at least 16 pixels width per side */
1870 if (sd
->recout
.x
+ 16 >= sd
->h_active
)
1872 new_width
= sd
->h_active
- sd
->recout
.x
;
1873 sd
->viewport
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1874 sd
->ratios
.horz
, sd
->recout
.width
- new_width
));
1875 sd
->viewport_c
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1876 sd
->ratios
.horz_c
, sd
->recout
.width
- new_width
));
1877 sd
->recout
.width
= new_width
;
1879 /* Calculate new vp and recout for right pipe */
1880 sd
= &next_odm_pipe
->plane_res
.scl_data
;
1881 /* HACTIVE halved for odm combine */
1883 /* Need at least 16 pixels width per side */
1884 if (new_width
<= 16)
1886 new_width
= sd
->recout
.width
+ sd
->recout
.x
- sd
->h_active
;
1887 sd
->viewport
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1888 sd
->ratios
.horz
, sd
->recout
.width
- new_width
));
1889 sd
->viewport_c
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1890 sd
->ratios
.horz_c
, sd
->recout
.width
- new_width
));
1891 sd
->recout
.width
= new_width
;
1892 sd
->viewport
.x
+= dc_fixpt_floor(dc_fixpt_mul_int(
1893 sd
->ratios
.horz
, sd
->h_active
- sd
->recout
.x
));
1894 sd
->viewport_c
.x
+= dc_fixpt_floor(dc_fixpt_mul_int(
1895 sd
->ratios
.horz_c
, sd
->h_active
- sd
->recout
.x
));
1898 next_odm_pipe
->stream_res
.opp
= pool
->opps
[next_odm_pipe
->pipe_idx
];
1899 if (next_odm_pipe
->stream
->timing
.flags
.DSC
== 1) {
1900 acquire_dsc(res_ctx
, pool
, &next_odm_pipe
->stream_res
.dsc
, next_odm_pipe
->pipe_idx
);
1901 ASSERT(next_odm_pipe
->stream_res
.dsc
);
1902 if (next_odm_pipe
->stream_res
.dsc
== NULL
)
1909 void dcn20_split_stream_for_mpc(
1910 struct resource_context
*res_ctx
,
1911 const struct resource_pool
*pool
,
1912 struct pipe_ctx
*primary_pipe
,
1913 struct pipe_ctx
*secondary_pipe
)
1915 int pipe_idx
= secondary_pipe
->pipe_idx
;
1916 struct pipe_ctx
*sec_bot_pipe
= secondary_pipe
->bottom_pipe
;
1918 *secondary_pipe
= *primary_pipe
;
1919 secondary_pipe
->bottom_pipe
= sec_bot_pipe
;
1921 secondary_pipe
->pipe_idx
= pipe_idx
;
1922 secondary_pipe
->plane_res
.mi
= pool
->mis
[secondary_pipe
->pipe_idx
];
1923 secondary_pipe
->plane_res
.hubp
= pool
->hubps
[secondary_pipe
->pipe_idx
];
1924 secondary_pipe
->plane_res
.ipp
= pool
->ipps
[secondary_pipe
->pipe_idx
];
1925 secondary_pipe
->plane_res
.xfm
= pool
->transforms
[secondary_pipe
->pipe_idx
];
1926 secondary_pipe
->plane_res
.dpp
= pool
->dpps
[secondary_pipe
->pipe_idx
];
1927 secondary_pipe
->plane_res
.mpcc_inst
= pool
->dpps
[secondary_pipe
->pipe_idx
]->inst
;
1928 secondary_pipe
->stream_res
.dsc
= NULL
;
1929 if (primary_pipe
->bottom_pipe
&& primary_pipe
->bottom_pipe
!= secondary_pipe
) {
1930 ASSERT(!secondary_pipe
->bottom_pipe
);
1931 secondary_pipe
->bottom_pipe
= primary_pipe
->bottom_pipe
;
1932 secondary_pipe
->bottom_pipe
->top_pipe
= secondary_pipe
;
1934 primary_pipe
->bottom_pipe
= secondary_pipe
;
1935 secondary_pipe
->top_pipe
= primary_pipe
;
1937 ASSERT(primary_pipe
->plane_state
);
1938 resource_build_scaling_params(primary_pipe
);
1939 resource_build_scaling_params(secondary_pipe
);
1942 void dcn20_populate_dml_writeback_from_context(
1943 struct dc
*dc
, struct resource_context
*res_ctx
, display_e2e_pipe_params_st
*pipes
)
1947 for (i
= 0, pipe_cnt
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1948 struct dc_writeback_info
*wb_info
= &res_ctx
->pipe_ctx
[i
].stream
->writeback_info
[0];
1950 if (!res_ctx
->pipe_ctx
[i
].stream
)
1953 /* Set writeback information */
1954 pipes
[pipe_cnt
].dout
.wb_enable
= (wb_info
->wb_enabled
== true) ? 1 : 0;
1955 pipes
[pipe_cnt
].dout
.num_active_wb
++;
1956 pipes
[pipe_cnt
].dout
.wb
.wb_src_height
= wb_info
->dwb_params
.cnv_params
.crop_height
;
1957 pipes
[pipe_cnt
].dout
.wb
.wb_src_width
= wb_info
->dwb_params
.cnv_params
.crop_width
;
1958 pipes
[pipe_cnt
].dout
.wb
.wb_dst_width
= wb_info
->dwb_params
.dest_width
;
1959 pipes
[pipe_cnt
].dout
.wb
.wb_dst_height
= wb_info
->dwb_params
.dest_height
;
1960 pipes
[pipe_cnt
].dout
.wb
.wb_htaps_luma
= 1;
1961 pipes
[pipe_cnt
].dout
.wb
.wb_vtaps_luma
= 1;
1962 pipes
[pipe_cnt
].dout
.wb
.wb_htaps_chroma
= wb_info
->dwb_params
.scaler_taps
.h_taps_c
;
1963 pipes
[pipe_cnt
].dout
.wb
.wb_vtaps_chroma
= wb_info
->dwb_params
.scaler_taps
.v_taps_c
;
1964 pipes
[pipe_cnt
].dout
.wb
.wb_hratio
= 1.0;
1965 pipes
[pipe_cnt
].dout
.wb
.wb_vratio
= 1.0;
1966 if (wb_info
->dwb_params
.out_format
== dwb_scaler_mode_yuv420
) {
1967 if (wb_info
->dwb_params
.output_depth
== DWB_OUTPUT_PIXEL_DEPTH_8BPC
)
1968 pipes
[pipe_cnt
].dout
.wb
.wb_pixel_format
= dm_420_8
;
1970 pipes
[pipe_cnt
].dout
.wb
.wb_pixel_format
= dm_420_10
;
1972 pipes
[pipe_cnt
].dout
.wb
.wb_pixel_format
= dm_444_32
;
1979 int dcn20_populate_dml_pipes_from_context(
1980 struct dc
*dc
, struct dc_state
*context
, display_e2e_pipe_params_st
*pipes
)
1983 bool synchronized_vblank
= true;
1984 struct resource_context
*res_ctx
= &context
->res_ctx
;
1986 for (i
= 0, pipe_cnt
= -1; i
< dc
->res_pool
->pipe_count
; i
++) {
1987 if (!res_ctx
->pipe_ctx
[i
].stream
)
1994 if (dc
->debug
.disable_timing_sync
|| !resource_are_streams_timing_synchronizable(
1995 res_ctx
->pipe_ctx
[pipe_cnt
].stream
,
1996 res_ctx
->pipe_ctx
[i
].stream
)) {
1997 synchronized_vblank
= false;
2002 for (i
= 0, pipe_cnt
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2003 struct dc_crtc_timing
*timing
= &res_ctx
->pipe_ctx
[i
].stream
->timing
;
2004 unsigned int v_total
;
2005 unsigned int front_porch
;
2008 if (!res_ctx
->pipe_ctx
[i
].stream
)
2011 v_total
= timing
->v_total
;
2012 front_porch
= timing
->v_front_porch
;
2014 pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
2015 pipes[pipe_cnt].pipe.src.dcc = 0;
2016 pipes[pipe_cnt].pipe.src.vm = 0;*/
2018 pipes
[pipe_cnt
].clks_cfg
.refclk_mhz
= dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000.0;
2020 pipes
[pipe_cnt
].dout
.dsc_enable
= res_ctx
->pipe_ctx
[i
].stream
->timing
.flags
.DSC
;
2021 /* todo: rotation?*/
2022 pipes
[pipe_cnt
].dout
.dsc_slices
= res_ctx
->pipe_ctx
[i
].stream
->timing
.dsc_cfg
.num_slices_h
;
2023 if (res_ctx
->pipe_ctx
[i
].stream
->use_dynamic_meta
) {
2024 pipes
[pipe_cnt
].pipe
.src
.dynamic_metadata_enable
= true;
2026 pipes
[pipe_cnt
].pipe
.src
.dynamic_metadata_lines_before_active
=
2027 (v_total
- timing
->v_addressable
2028 - timing
->v_border_top
- timing
->v_border_bottom
) / 2;
2029 /* 36 bytes dp, 32 hdmi */
2030 pipes
[pipe_cnt
].pipe
.src
.dynamic_metadata_xmit_bytes
=
2031 dc_is_dp_signal(res_ctx
->pipe_ctx
[i
].stream
->signal
) ? 36 : 32;
2033 pipes
[pipe_cnt
].pipe
.src
.dcc
= false;
2034 pipes
[pipe_cnt
].pipe
.src
.dcc_rate
= 1;
2035 pipes
[pipe_cnt
].pipe
.dest
.synchronized_vblank_all_planes
= synchronized_vblank
;
2036 pipes
[pipe_cnt
].pipe
.dest
.hblank_start
= timing
->h_total
- timing
->h_front_porch
;
2037 pipes
[pipe_cnt
].pipe
.dest
.hblank_end
= pipes
[pipe_cnt
].pipe
.dest
.hblank_start
2038 - timing
->h_addressable
2039 - timing
->h_border_left
2040 - timing
->h_border_right
;
2041 pipes
[pipe_cnt
].pipe
.dest
.vblank_start
= v_total
- front_porch
;
2042 pipes
[pipe_cnt
].pipe
.dest
.vblank_end
= pipes
[pipe_cnt
].pipe
.dest
.vblank_start
2043 - timing
->v_addressable
2044 - timing
->v_border_top
2045 - timing
->v_border_bottom
;
2046 pipes
[pipe_cnt
].pipe
.dest
.htotal
= timing
->h_total
;
2047 pipes
[pipe_cnt
].pipe
.dest
.vtotal
= v_total
;
2048 pipes
[pipe_cnt
].pipe
.dest
.hactive
= timing
->h_addressable
;
2049 pipes
[pipe_cnt
].pipe
.dest
.vactive
= timing
->v_addressable
;
2050 pipes
[pipe_cnt
].pipe
.dest
.interlaced
= timing
->flags
.INTERLACE
;
2051 pipes
[pipe_cnt
].pipe
.dest
.pixel_rate_mhz
= timing
->pix_clk_100hz
/10000.0;
2052 if (timing
->timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING
)
2053 pipes
[pipe_cnt
].pipe
.dest
.pixel_rate_mhz
*= 2;
2054 pipes
[pipe_cnt
].pipe
.dest
.otg_inst
= res_ctx
->pipe_ctx
[i
].stream_res
.tg
->inst
;
2055 pipes
[pipe_cnt
].dout
.dp_lanes
= 4;
2056 pipes
[pipe_cnt
].pipe
.dest
.vtotal_min
= res_ctx
->pipe_ctx
[i
].stream
->adjust
.v_total_min
;
2057 pipes
[pipe_cnt
].pipe
.dest
.vtotal_max
= res_ctx
->pipe_ctx
[i
].stream
->adjust
.v_total_max
;
2058 switch (get_num_odm_splits(&res_ctx
->pipe_ctx
[i
])) {
2060 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= dm_odm_combine_mode_2to1
;
2063 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= dm_odm_combine_mode_disabled
;
2065 pipes
[pipe_cnt
].pipe
.src
.hsplit_grp
= res_ctx
->pipe_ctx
[i
].pipe_idx
;
2066 if (res_ctx
->pipe_ctx
[i
].top_pipe
&& res_ctx
->pipe_ctx
[i
].top_pipe
->plane_state
2067 == res_ctx
->pipe_ctx
[i
].plane_state
) {
2068 struct pipe_ctx
*first_pipe
= res_ctx
->pipe_ctx
[i
].top_pipe
;
2070 while (first_pipe
->top_pipe
&& first_pipe
->top_pipe
->plane_state
2071 == res_ctx
->pipe_ctx
[i
].plane_state
)
2072 first_pipe
= first_pipe
->top_pipe
;
2073 pipes
[pipe_cnt
].pipe
.src
.hsplit_grp
= first_pipe
->pipe_idx
;
2074 } else if (res_ctx
->pipe_ctx
[i
].prev_odm_pipe
) {
2075 struct pipe_ctx
*first_pipe
= res_ctx
->pipe_ctx
[i
].prev_odm_pipe
;
2077 while (first_pipe
->prev_odm_pipe
)
2078 first_pipe
= first_pipe
->prev_odm_pipe
;
2079 pipes
[pipe_cnt
].pipe
.src
.hsplit_grp
= first_pipe
->pipe_idx
;
2082 switch (res_ctx
->pipe_ctx
[i
].stream
->signal
) {
2083 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
2084 case SIGNAL_TYPE_DISPLAY_PORT
:
2085 pipes
[pipe_cnt
].dout
.output_type
= dm_dp
;
2087 case SIGNAL_TYPE_EDP
:
2088 pipes
[pipe_cnt
].dout
.output_type
= dm_edp
;
2090 case SIGNAL_TYPE_HDMI_TYPE_A
:
2091 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
2092 case SIGNAL_TYPE_DVI_DUAL_LINK
:
2093 pipes
[pipe_cnt
].dout
.output_type
= dm_hdmi
;
2096 /* In case there is no signal, set dp with 4 lanes to allow max config */
2097 pipes
[pipe_cnt
].dout
.output_type
= dm_dp
;
2098 pipes
[pipe_cnt
].dout
.dp_lanes
= 4;
2101 switch (res_ctx
->pipe_ctx
[i
].stream
->timing
.display_color_depth
) {
2102 case COLOR_DEPTH_666
:
2105 case COLOR_DEPTH_888
:
2108 case COLOR_DEPTH_101010
:
2111 case COLOR_DEPTH_121212
:
2114 case COLOR_DEPTH_141414
:
2117 case COLOR_DEPTH_161616
:
2120 case COLOR_DEPTH_999
:
2123 case COLOR_DEPTH_111111
:
2131 switch (res_ctx
->pipe_ctx
[i
].stream
->timing
.pixel_encoding
) {
2132 case PIXEL_ENCODING_RGB
:
2133 case PIXEL_ENCODING_YCBCR444
:
2134 pipes
[pipe_cnt
].dout
.output_format
= dm_444
;
2135 pipes
[pipe_cnt
].dout
.output_bpp
= output_bpc
* 3;
2137 case PIXEL_ENCODING_YCBCR420
:
2138 pipes
[pipe_cnt
].dout
.output_format
= dm_420
;
2139 pipes
[pipe_cnt
].dout
.output_bpp
= (output_bpc
* 3.0) / 2;
2141 case PIXEL_ENCODING_YCBCR422
:
2142 if (true) /* todo */
2143 pipes
[pipe_cnt
].dout
.output_format
= dm_s422
;
2145 pipes
[pipe_cnt
].dout
.output_format
= dm_n422
;
2146 pipes
[pipe_cnt
].dout
.output_bpp
= output_bpc
* 2;
2149 pipes
[pipe_cnt
].dout
.output_format
= dm_444
;
2150 pipes
[pipe_cnt
].dout
.output_bpp
= output_bpc
* 3;
2153 if (res_ctx
->pipe_ctx
[i
].stream
->timing
.flags
.DSC
)
2154 pipes
[pipe_cnt
].dout
.output_bpp
= res_ctx
->pipe_ctx
[i
].stream
->timing
.dsc_cfg
.bits_per_pixel
/ 16.0;
2156 /* todo: default max for now, until there is logic reflecting this in dc*/
2157 pipes
[pipe_cnt
].dout
.output_bpc
= 12;
2159 * For graphic plane, cursor number is 1, nv12 is 0
2160 * bw calculations due to cursor on/off
2162 if (res_ctx
->pipe_ctx
[i
].plane_state
&&
2163 res_ctx
->pipe_ctx
[i
].plane_state
->address
.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
)
2164 pipes
[pipe_cnt
].pipe
.src
.num_cursors
= 0;
2166 pipes
[pipe_cnt
].pipe
.src
.num_cursors
= dc
->dml
.ip
.number_of_cursors
;
2168 pipes
[pipe_cnt
].pipe
.src
.cur0_src_width
= 256;
2169 pipes
[pipe_cnt
].pipe
.src
.cur0_bpp
= dm_cur_32bit
;
2171 if (!res_ctx
->pipe_ctx
[i
].plane_state
) {
2172 pipes
[pipe_cnt
].pipe
.src
.is_hsplit
= pipes
[pipe_cnt
].pipe
.dest
.odm_combine
!= dm_odm_combine_mode_disabled
;
2173 pipes
[pipe_cnt
].pipe
.src
.source_scan
= dm_horz
;
2174 pipes
[pipe_cnt
].pipe
.src
.sw_mode
= dm_sw_linear
;
2175 pipes
[pipe_cnt
].pipe
.src
.macro_tile_size
= dm_64k_tile
;
2176 pipes
[pipe_cnt
].pipe
.src
.viewport_width
= timing
->h_addressable
;
2177 if (pipes
[pipe_cnt
].pipe
.src
.viewport_width
> 1920)
2178 pipes
[pipe_cnt
].pipe
.src
.viewport_width
= 1920;
2179 pipes
[pipe_cnt
].pipe
.src
.viewport_height
= timing
->v_addressable
;
2180 if (pipes
[pipe_cnt
].pipe
.src
.viewport_height
> 1080)
2181 pipes
[pipe_cnt
].pipe
.src
.viewport_height
= 1080;
2182 pipes
[pipe_cnt
].pipe
.src
.surface_height_y
= pipes
[pipe_cnt
].pipe
.src
.viewport_height
;
2183 pipes
[pipe_cnt
].pipe
.src
.surface_width_y
= pipes
[pipe_cnt
].pipe
.src
.viewport_width
;
2184 pipes
[pipe_cnt
].pipe
.src
.surface_height_c
= pipes
[pipe_cnt
].pipe
.src
.viewport_height
;
2185 pipes
[pipe_cnt
].pipe
.src
.surface_width_c
= pipes
[pipe_cnt
].pipe
.src
.viewport_width
;
2186 pipes
[pipe_cnt
].pipe
.src
.data_pitch
= ((pipes
[pipe_cnt
].pipe
.src
.viewport_width
+ 63) / 64) * 64; /* linear sw only */
2187 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_32
;
2188 pipes
[pipe_cnt
].pipe
.dest
.recout_width
= pipes
[pipe_cnt
].pipe
.src
.viewport_width
; /*vp_width/hratio*/
2189 pipes
[pipe_cnt
].pipe
.dest
.recout_height
= pipes
[pipe_cnt
].pipe
.src
.viewport_height
; /*vp_height/vratio*/
2190 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
= pipes
[pipe_cnt
].pipe
.dest
.recout_width
; /*when is_hsplit != 1*/
2191 pipes
[pipe_cnt
].pipe
.dest
.full_recout_height
= pipes
[pipe_cnt
].pipe
.dest
.recout_height
; /*when is_hsplit != 1*/
2192 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.lb_depth
= dm_lb_16
;
2193 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.hscl_ratio
= 1.0;
2194 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.vscl_ratio
= 1.0;
2195 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.scl_enable
= 0; /*Lb only or Full scl*/
2196 pipes
[pipe_cnt
].pipe
.scale_taps
.htaps
= 1;
2197 pipes
[pipe_cnt
].pipe
.scale_taps
.vtaps
= 1;
2198 pipes
[pipe_cnt
].pipe
.dest
.vtotal_min
= v_total
;
2199 pipes
[pipe_cnt
].pipe
.dest
.vtotal_max
= v_total
;
2201 if (pipes
[pipe_cnt
].pipe
.dest
.odm_combine
== dm_odm_combine_mode_2to1
) {
2202 pipes
[pipe_cnt
].pipe
.src
.viewport_width
/= 2;
2203 pipes
[pipe_cnt
].pipe
.dest
.recout_width
/= 2;
2206 struct dc_plane_state
*pln
= res_ctx
->pipe_ctx
[i
].plane_state
;
2207 struct scaler_data
*scl
= &res_ctx
->pipe_ctx
[i
].plane_res
.scl_data
;
2209 pipes
[pipe_cnt
].pipe
.src
.immediate_flip
= pln
->flip_immediate
;
2210 pipes
[pipe_cnt
].pipe
.src
.is_hsplit
= (res_ctx
->pipe_ctx
[i
].bottom_pipe
&& res_ctx
->pipe_ctx
[i
].bottom_pipe
->plane_state
== pln
)
2211 || (res_ctx
->pipe_ctx
[i
].top_pipe
&& res_ctx
->pipe_ctx
[i
].top_pipe
->plane_state
== pln
)
2212 || pipes
[pipe_cnt
].pipe
.dest
.odm_combine
!= dm_odm_combine_mode_disabled
;
2213 pipes
[pipe_cnt
].pipe
.src
.source_scan
= pln
->rotation
== ROTATION_ANGLE_90
2214 || pln
->rotation
== ROTATION_ANGLE_270
? dm_vert
: dm_horz
;
2215 pipes
[pipe_cnt
].pipe
.src
.viewport_y_y
= scl
->viewport
.y
;
2216 pipes
[pipe_cnt
].pipe
.src
.viewport_y_c
= scl
->viewport_c
.y
;
2217 pipes
[pipe_cnt
].pipe
.src
.viewport_width
= scl
->viewport
.width
;
2218 pipes
[pipe_cnt
].pipe
.src
.viewport_width_c
= scl
->viewport_c
.width
;
2219 pipes
[pipe_cnt
].pipe
.src
.viewport_height
= scl
->viewport
.height
;
2220 pipes
[pipe_cnt
].pipe
.src
.viewport_height_c
= scl
->viewport_c
.height
;
2221 pipes
[pipe_cnt
].pipe
.src
.surface_width_y
= pln
->plane_size
.surface_size
.width
;
2222 pipes
[pipe_cnt
].pipe
.src
.surface_height_y
= pln
->plane_size
.surface_size
.height
;
2223 pipes
[pipe_cnt
].pipe
.src
.surface_width_c
= pln
->plane_size
.chroma_size
.width
;
2224 pipes
[pipe_cnt
].pipe
.src
.surface_height_c
= pln
->plane_size
.chroma_size
.height
;
2225 if (pln
->format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2226 pipes
[pipe_cnt
].pipe
.src
.data_pitch
= pln
->plane_size
.surface_pitch
;
2227 pipes
[pipe_cnt
].pipe
.src
.data_pitch_c
= pln
->plane_size
.chroma_pitch
;
2228 pipes
[pipe_cnt
].pipe
.src
.meta_pitch
= pln
->dcc
.meta_pitch
;
2229 pipes
[pipe_cnt
].pipe
.src
.meta_pitch_c
= pln
->dcc
.meta_pitch_c
;
2231 pipes
[pipe_cnt
].pipe
.src
.data_pitch
= pln
->plane_size
.surface_pitch
;
2232 pipes
[pipe_cnt
].pipe
.src
.meta_pitch
= pln
->dcc
.meta_pitch
;
2234 pipes
[pipe_cnt
].pipe
.src
.dcc
= pln
->dcc
.enable
;
2235 pipes
[pipe_cnt
].pipe
.dest
.recout_width
= scl
->recout
.width
;
2236 pipes
[pipe_cnt
].pipe
.dest
.recout_height
= scl
->recout
.height
;
2237 pipes
[pipe_cnt
].pipe
.dest
.full_recout_height
= scl
->recout
.height
;
2238 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
= scl
->recout
.width
;
2239 if (pipes
[pipe_cnt
].pipe
.dest
.odm_combine
== dm_odm_combine_mode_2to1
)
2240 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
*= 2;
2242 struct pipe_ctx
*split_pipe
= res_ctx
->pipe_ctx
[i
].bottom_pipe
;
2244 while (split_pipe
&& split_pipe
->plane_state
== pln
) {
2245 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
+= split_pipe
->plane_res
.scl_data
.recout
.width
;
2246 split_pipe
= split_pipe
->bottom_pipe
;
2248 split_pipe
= res_ctx
->pipe_ctx
[i
].top_pipe
;
2249 while (split_pipe
&& split_pipe
->plane_state
== pln
) {
2250 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
+= split_pipe
->plane_res
.scl_data
.recout
.width
;
2251 split_pipe
= split_pipe
->top_pipe
;
2255 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.lb_depth
= dm_lb_16
;
2256 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.hscl_ratio
= (double) scl
->ratios
.horz
.value
/ (1ULL<<32);
2257 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.hscl_ratio_c
= (double) scl
->ratios
.horz_c
.value
/ (1ULL<<32);
2258 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.vscl_ratio
= (double) scl
->ratios
.vert
.value
/ (1ULL<<32);
2259 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.vscl_ratio_c
= (double) scl
->ratios
.vert_c
.value
/ (1ULL<<32);
2260 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.scl_enable
=
2261 scl
->ratios
.vert
.value
!= dc_fixpt_one
.value
2262 || scl
->ratios
.horz
.value
!= dc_fixpt_one
.value
2263 || scl
->ratios
.vert_c
.value
!= dc_fixpt_one
.value
2264 || scl
->ratios
.horz_c
.value
!= dc_fixpt_one
.value
/*Lb only or Full scl*/
2265 || dc
->debug
.always_scale
; /*support always scale*/
2266 pipes
[pipe_cnt
].pipe
.scale_taps
.htaps
= scl
->taps
.h_taps
;
2267 pipes
[pipe_cnt
].pipe
.scale_taps
.htaps_c
= scl
->taps
.h_taps_c
;
2268 pipes
[pipe_cnt
].pipe
.scale_taps
.vtaps
= scl
->taps
.v_taps
;
2269 pipes
[pipe_cnt
].pipe
.scale_taps
.vtaps_c
= scl
->taps
.v_taps_c
;
2271 pipes
[pipe_cnt
].pipe
.src
.macro_tile_size
=
2272 swizzle_mode_to_macro_tile_size(pln
->tiling_info
.gfx9
.swizzle
);
2273 swizzle_to_dml_params(pln
->tiling_info
.gfx9
.swizzle
,
2274 &pipes
[pipe_cnt
].pipe
.src
.sw_mode
);
2276 switch (pln
->format
) {
2277 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
:
2278 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
:
2279 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_420_8
;
2281 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr
:
2282 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
:
2283 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_420_10
;
2285 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616
:
2286 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
:
2287 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
:
2288 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_64
;
2290 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555
:
2291 case SURFACE_PIXEL_FORMAT_GRPH_RGB565
:
2292 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_16
;
2294 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
:
2295 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_8
;
2298 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_32
;
2306 /* populate writeback information */
2307 dc
->res_pool
->funcs
->populate_dml_writeback_from_context(dc
, res_ctx
, pipes
);
2312 unsigned int dcn20_calc_max_scaled_time(
2313 unsigned int time_per_pixel
,
2314 enum mmhubbub_wbif_mode mode
,
2315 unsigned int urgent_watermark
)
2317 unsigned int time_per_byte
= 0;
2318 unsigned int total_y_free_entry
= 0x200; /* two memory piece for luma */
2319 unsigned int total_c_free_entry
= 0x140; /* two memory piece for chroma */
2320 unsigned int small_free_entry
, max_free_entry
;
2321 unsigned int buf_lh_capability
;
2322 unsigned int max_scaled_time
;
2324 if (mode
== PACKED_444
) /* packed mode */
2325 time_per_byte
= time_per_pixel
/4;
2326 else if (mode
== PLANAR_420_8BPC
)
2327 time_per_byte
= time_per_pixel
;
2328 else if (mode
== PLANAR_420_10BPC
) /* p010 */
2329 time_per_byte
= time_per_pixel
* 819/1024;
2331 if (time_per_byte
== 0)
2334 small_free_entry
= (total_y_free_entry
> total_c_free_entry
) ? total_c_free_entry
: total_y_free_entry
;
2335 max_free_entry
= (mode
== PACKED_444
) ? total_y_free_entry
+ total_c_free_entry
: small_free_entry
;
2336 buf_lh_capability
= max_free_entry
*time_per_byte
*32/16; /* there is 4bit fraction */
2337 max_scaled_time
= buf_lh_capability
- urgent_watermark
;
2338 return max_scaled_time
;
2341 void dcn20_set_mcif_arb_params(
2343 struct dc_state
*context
,
2344 display_e2e_pipe_params_st
*pipes
,
2347 enum mmhubbub_wbif_mode wbif_mode
;
2348 struct mcif_arb_params
*wb_arb_params
;
2349 int i
, j
, k
, dwb_pipe
;
2351 /* Writeback MCIF_WB arbitration parameters */
2353 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2355 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2358 for (j
= 0; j
< MAX_DWB_PIPES
; j
++) {
2359 if (context
->res_ctx
.pipe_ctx
[i
].stream
->writeback_info
[j
].wb_enabled
== false)
2362 //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
2363 wb_arb_params
= &context
->bw_ctx
.bw
.dcn
.bw_writeback
.mcif_wb_arb
[dwb_pipe
];
2365 if (context
->res_ctx
.pipe_ctx
[i
].stream
->writeback_info
[j
].dwb_params
.out_format
== dwb_scaler_mode_yuv420
) {
2366 if (context
->res_ctx
.pipe_ctx
[i
].stream
->writeback_info
[j
].dwb_params
.output_depth
== DWB_OUTPUT_PIXEL_DEPTH_8BPC
)
2367 wbif_mode
= PLANAR_420_8BPC
;
2369 wbif_mode
= PLANAR_420_10BPC
;
2371 wbif_mode
= PACKED_444
;
2373 for (k
= 0; k
< sizeof(wb_arb_params
->cli_watermark
)/sizeof(wb_arb_params
->cli_watermark
[0]); k
++) {
2374 wb_arb_params
->cli_watermark
[k
] = get_wm_writeback_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2375 wb_arb_params
->pstate_watermark
[k
] = get_wm_writeback_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2377 wb_arb_params
->time_per_pixel
= 16.0 / context
->res_ctx
.pipe_ctx
[i
].stream
->phy_pix_clk
; /* 4 bit fraction, ms */
2378 wb_arb_params
->slice_lines
= 32;
2379 wb_arb_params
->arbitration_slice
= 2;
2380 wb_arb_params
->max_scaled_time
= dcn20_calc_max_scaled_time(wb_arb_params
->time_per_pixel
,
2382 wb_arb_params
->cli_watermark
[0]); /* assume 4 watermark sets have the same value */
2386 if (dwb_pipe
>= MAX_DWB_PIPES
)
2389 if (dwb_pipe
>= MAX_DWB_PIPES
)
2394 bool dcn20_validate_dsc(struct dc
*dc
, struct dc_state
*new_ctx
)
2398 /* Validate DSC config, dsc count validation is already done */
2399 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2400 struct pipe_ctx
*pipe_ctx
= &new_ctx
->res_ctx
.pipe_ctx
[i
];
2401 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
2402 struct dsc_config dsc_cfg
;
2403 struct pipe_ctx
*odm_pipe
;
2406 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
2409 /* Only need to validate top pipe */
2410 if (pipe_ctx
->top_pipe
|| pipe_ctx
->prev_odm_pipe
|| !stream
|| !stream
->timing
.flags
.DSC
)
2413 dsc_cfg
.pic_width
= (stream
->timing
.h_addressable
+ stream
->timing
.h_border_left
2414 + stream
->timing
.h_border_right
) / opp_cnt
;
2415 dsc_cfg
.pic_height
= stream
->timing
.v_addressable
+ stream
->timing
.v_border_top
2416 + stream
->timing
.v_border_bottom
;
2417 dsc_cfg
.pixel_encoding
= stream
->timing
.pixel_encoding
;
2418 dsc_cfg
.color_depth
= stream
->timing
.display_color_depth
;
2419 dsc_cfg
.is_odm
= pipe_ctx
->next_odm_pipe
? true : false;
2420 dsc_cfg
.dc_dsc_cfg
= stream
->timing
.dsc_cfg
;
2421 dsc_cfg
.dc_dsc_cfg
.num_slices_h
/= opp_cnt
;
2423 if (!pipe_ctx
->stream_res
.dsc
->funcs
->dsc_validate_stream(pipe_ctx
->stream_res
.dsc
, &dsc_cfg
))
2429 struct pipe_ctx
*dcn20_find_secondary_pipe(struct dc
*dc
,
2430 struct resource_context
*res_ctx
,
2431 const struct resource_pool
*pool
,
2432 const struct pipe_ctx
*primary_pipe
)
2434 struct pipe_ctx
*secondary_pipe
= NULL
;
2436 if (dc
&& primary_pipe
) {
2438 int preferred_pipe_idx
= 0;
2440 /* first check the prev dc state:
2441 * if this primary pipe has a bottom pipe in prev. state
2442 * and if the bottom pipe is still available (which it should be),
2443 * pick that pipe as secondary
2444 * Same logic applies for ODM pipes. Since mpo is not allowed with odm
2445 * check in else case.
2447 if (dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].bottom_pipe
) {
2448 preferred_pipe_idx
= dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].bottom_pipe
->pipe_idx
;
2449 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2450 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2451 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2453 } else if (dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].next_odm_pipe
) {
2454 preferred_pipe_idx
= dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].next_odm_pipe
->pipe_idx
;
2455 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2456 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2457 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2462 * if this primary pipe does not have a bottom pipe in prev. state
2463 * start backward and find a pipe that did not used to be a bottom pipe in
2464 * prev. dc state. This way we make sure we keep the same assignment as
2465 * last state and will not have to reprogram every pipe
2467 if (secondary_pipe
== NULL
) {
2468 for (j
= dc
->res_pool
->pipe_count
- 1; j
>= 0; j
--) {
2469 if (dc
->current_state
->res_ctx
.pipe_ctx
[j
].top_pipe
== NULL
2470 && dc
->current_state
->res_ctx
.pipe_ctx
[j
].prev_odm_pipe
== NULL
) {
2471 preferred_pipe_idx
= j
;
2473 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2474 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2475 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2482 * We should never hit this assert unless assignments are shuffled around
2483 * if this happens we will prob. hit a vsync tdr
2485 ASSERT(secondary_pipe
);
2487 * search backwards for the second pipe to keep pipe
2488 * assignment more consistent
2490 if (secondary_pipe
== NULL
) {
2491 for (j
= dc
->res_pool
->pipe_count
- 1; j
>= 0; j
--) {
2492 preferred_pipe_idx
= j
;
2494 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2495 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2496 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2503 return secondary_pipe
;
2506 static void dcn20_merge_pipes_for_validate(
2508 struct dc_state
*context
)
2512 /* merge previously split odm pipes since mode support needs to make the decision */
2513 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2514 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2515 struct pipe_ctx
*odm_pipe
= pipe
->next_odm_pipe
;
2517 if (pipe
->prev_odm_pipe
)
2520 pipe
->next_odm_pipe
= NULL
;
2522 struct pipe_ctx
*next_odm_pipe
= odm_pipe
->next_odm_pipe
;
2524 odm_pipe
->plane_state
= NULL
;
2525 odm_pipe
->stream
= NULL
;
2526 odm_pipe
->top_pipe
= NULL
;
2527 odm_pipe
->bottom_pipe
= NULL
;
2528 odm_pipe
->prev_odm_pipe
= NULL
;
2529 odm_pipe
->next_odm_pipe
= NULL
;
2530 if (odm_pipe
->stream_res
.dsc
)
2531 dcn20_release_dsc(&context
->res_ctx
, dc
->res_pool
, &odm_pipe
->stream_res
.dsc
);
2532 /* Clear plane_res and stream_res */
2533 memset(&odm_pipe
->plane_res
, 0, sizeof(odm_pipe
->plane_res
));
2534 memset(&odm_pipe
->stream_res
, 0, sizeof(odm_pipe
->stream_res
));
2535 odm_pipe
= next_odm_pipe
;
2537 if (pipe
->plane_state
)
2538 resource_build_scaling_params(pipe
);
2541 /* merge previously mpc split pipes since mode support needs to make the decision */
2542 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2543 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2544 struct pipe_ctx
*hsplit_pipe
= pipe
->bottom_pipe
;
2546 if (!hsplit_pipe
|| hsplit_pipe
->plane_state
!= pipe
->plane_state
)
2549 pipe
->bottom_pipe
= hsplit_pipe
->bottom_pipe
;
2550 if (hsplit_pipe
->bottom_pipe
)
2551 hsplit_pipe
->bottom_pipe
->top_pipe
= pipe
;
2552 hsplit_pipe
->plane_state
= NULL
;
2553 hsplit_pipe
->stream
= NULL
;
2554 hsplit_pipe
->top_pipe
= NULL
;
2555 hsplit_pipe
->bottom_pipe
= NULL
;
2557 /* Clear plane_res and stream_res */
2558 memset(&hsplit_pipe
->plane_res
, 0, sizeof(hsplit_pipe
->plane_res
));
2559 memset(&hsplit_pipe
->stream_res
, 0, sizeof(hsplit_pipe
->stream_res
));
2560 if (pipe
->plane_state
)
2561 resource_build_scaling_params(pipe
);
2565 int dcn20_validate_apply_pipe_split_flags(
2567 struct dc_state
*context
,
2572 int i
, pipe_idx
, vlevel_split
;
2573 int plane_count
= 0;
2574 bool force_split
= false;
2575 bool avoid_split
= dc
->debug
.pipe_split_policy
== MPC_SPLIT_AVOID
;
2577 if (context
->stream_count
> 1) {
2578 if (dc
->debug
.pipe_split_policy
== MPC_SPLIT_AVOID_MULT_DISP
)
2580 } else if (dc
->debug
.force_single_disp_pipe_split
)
2583 /* TODO: fix dc bugs and remove this split threshold thing */
2584 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2585 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2587 if (pipe
->stream
&& !pipe
->prev_odm_pipe
&&
2588 (!pipe
->top_pipe
|| pipe
->top_pipe
->plane_state
!= pipe
->plane_state
))
2591 if (plane_count
> dc
->res_pool
->pipe_count
/ 2)
2594 /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
2596 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2597 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2600 for (vlevel_split
= vlevel
; vlevel
<= context
->bw_ctx
.dml
.soc
.num_states
; vlevel
++)
2601 if (context
->bw_ctx
.dml
.vba
.NoOfDPP
[vlevel
][0][pipe_idx
] == 1)
2603 /* Impossible to not split this pipe */
2604 if (vlevel
> context
->bw_ctx
.dml
.soc
.num_states
)
2605 vlevel
= vlevel_split
;
2608 context
->bw_ctx
.dml
.vba
.maxMpcComb
= 0;
2611 /* Split loop sets which pipe should be split based on dml outputs and dc flags */
2612 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2613 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2614 int pipe_plane
= context
->bw_ctx
.dml
.vba
.pipe_plane
[pipe_idx
];
2616 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2619 if (force_split
|| context
->bw_ctx
.dml
.vba
.NoOfDPP
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_plane
] > 1)
2621 if ((pipe
->stream
->view_format
==
2622 VIEW_3D_FORMAT_SIDE_BY_SIDE
||
2623 pipe
->stream
->view_format
==
2624 VIEW_3D_FORMAT_TOP_AND_BOTTOM
) &&
2625 (pipe
->stream
->timing
.timing_3d_format
==
2626 TIMING_3D_FORMAT_TOP_AND_BOTTOM
||
2627 pipe
->stream
->timing
.timing_3d_format
==
2628 TIMING_3D_FORMAT_SIDE_BY_SIDE
))
2630 if (dc
->debug
.force_odm_combine
& (1 << pipe
->stream_res
.tg
->inst
)) {
2632 context
->bw_ctx
.dml
.vba
.ODMCombineEnablePerState
[vlevel
][pipe_plane
] = dm_odm_combine_mode_2to1
;
2634 context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_plane
] =
2635 context
->bw_ctx
.dml
.vba
.ODMCombineEnablePerState
[vlevel
][pipe_plane
];
2637 if (pipe
->prev_odm_pipe
&& context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_plane
] != dm_odm_combine_mode_disabled
) {
2638 /*Already split odm pipe tree, don't try to split again*/
2640 split
[pipe
->prev_odm_pipe
->pipe_idx
] = false;
2641 } else if (pipe
->top_pipe
&& pipe
->plane_state
== pipe
->top_pipe
->plane_state
2642 && context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_plane
] == dm_odm_combine_mode_disabled
) {
2643 /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
2645 split
[pipe
->top_pipe
->pipe_idx
] = false;
2646 } else if (pipe
->prev_odm_pipe
|| (pipe
->top_pipe
&& pipe
->plane_state
== pipe
->top_pipe
->plane_state
)) {
2647 if (split
[i
] == false) {
2648 /*Exiting mpc/odm combine*/
2650 if (pipe
->prev_odm_pipe
) {
2651 ASSERT(0); /*should not actually happen yet*/
2652 merge
[pipe
->prev_odm_pipe
->pipe_idx
] = true;
2654 merge
[pipe
->top_pipe
->pipe_idx
] = true;
2656 /*Transition from mpc combine to odm combine or vice versa*/
2657 ASSERT(0); /*should not actually happen yet*/
2660 if (pipe
->prev_odm_pipe
) {
2661 split
[pipe
->prev_odm_pipe
->pipe_idx
] = true;
2662 merge
[pipe
->prev_odm_pipe
->pipe_idx
] = true;
2664 split
[pipe
->top_pipe
->pipe_idx
] = true;
2665 merge
[pipe
->top_pipe
->pipe_idx
] = true;
2670 /* Adjust dppclk when split is forced, do not bother with dispclk */
2671 if (split
[i
] && context
->bw_ctx
.dml
.vba
.NoOfDPP
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] == 1)
2672 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] /= 2;
2679 bool dcn20_fast_validate_bw(
2681 struct dc_state
*context
,
2682 display_e2e_pipe_params_st
*pipes
,
2684 int *pipe_split_from
,
2688 bool split
[MAX_PIPES
] = { false };
2689 int pipe_cnt
, i
, pipe_idx
, vlevel
;
2695 dcn20_merge_pipes_for_validate(dc
, context
);
2697 pipe_cnt
= dc
->res_pool
->funcs
->populate_dml_pipes(dc
, context
, pipes
);
2699 *pipe_cnt_out
= pipe_cnt
;
2706 vlevel
= dml_get_voltage_level(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
);
2708 if (vlevel
> context
->bw_ctx
.dml
.soc
.num_states
)
2711 vlevel
= dcn20_validate_apply_pipe_split_flags(dc
, context
, vlevel
, split
, NULL
);
2713 /*initialize pipe_just_split_from to invalid idx*/
2714 for (i
= 0; i
< MAX_PIPES
; i
++)
2715 pipe_split_from
[i
] = -1;
2717 for (i
= 0, pipe_idx
= -1; i
< dc
->res_pool
->pipe_count
; i
++) {
2718 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2719 struct pipe_ctx
*hsplit_pipe
= pipe
->bottom_pipe
;
2721 if (!pipe
->stream
|| pipe_split_from
[i
] >= 0)
2726 if (!pipe
->top_pipe
&& !pipe
->plane_state
&& context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
]) {
2727 hsplit_pipe
= dcn20_find_secondary_pipe(dc
, &context
->res_ctx
, dc
->res_pool
, pipe
);
2728 ASSERT(hsplit_pipe
);
2729 if (!dcn20_split_stream_for_odm(
2730 &context
->res_ctx
, dc
->res_pool
,
2733 pipe_split_from
[hsplit_pipe
->pipe_idx
] = pipe_idx
;
2734 dcn20_build_mapped_resource(dc
, context
, pipe
->stream
);
2737 if (!pipe
->plane_state
)
2739 /* Skip 2nd half of already split pipe */
2740 if (pipe
->top_pipe
&& pipe
->plane_state
== pipe
->top_pipe
->plane_state
)
2743 /* We do not support mpo + odm at the moment */
2744 if (hsplit_pipe
&& hsplit_pipe
->plane_state
!= pipe
->plane_state
2745 && context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
])
2749 if (!hsplit_pipe
|| hsplit_pipe
->plane_state
!= pipe
->plane_state
) {
2750 /* pipe not split previously needs split */
2751 hsplit_pipe
= dcn20_find_secondary_pipe(dc
, &context
->res_ctx
, dc
->res_pool
, pipe
);
2752 ASSERT(hsplit_pipe
);
2754 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] *= 2;
2757 if (context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
]) {
2758 if (!dcn20_split_stream_for_odm(
2759 &context
->res_ctx
, dc
->res_pool
,
2762 dcn20_build_mapped_resource(dc
, context
, pipe
->stream
);
2764 dcn20_split_stream_for_mpc(
2765 &context
->res_ctx
, dc
->res_pool
,
2767 pipe_split_from
[hsplit_pipe
->pipe_idx
] = pipe_idx
;
2769 } else if (hsplit_pipe
&& hsplit_pipe
->plane_state
== pipe
->plane_state
) {
2770 /* merge should already have been done */
2774 /* Actual dsc count per stream dsc validation*/
2775 if (!dcn20_validate_dsc(dc
, context
)) {
2776 context
->bw_ctx
.dml
.vba
.ValidationStatus
[context
->bw_ctx
.dml
.vba
.soc
.num_states
] =
2777 DML_FAIL_DSC_VALIDATION_FAILURE
;
2781 *vlevel_out
= vlevel
;
2793 static void dcn20_calculate_wm(
2794 struct dc
*dc
, struct dc_state
*context
,
2795 display_e2e_pipe_params_st
*pipes
,
2797 int *pipe_split_from
,
2800 int pipe_cnt
, i
, pipe_idx
;
2802 for (i
= 0, pipe_idx
= 0, pipe_cnt
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2803 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2806 pipes
[pipe_cnt
].clks_cfg
.refclk_mhz
= dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000.0;
2807 pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
= context
->bw_ctx
.dml
.vba
.RequiredDISPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
];
2809 if (pipe_split_from
[i
] < 0) {
2810 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
=
2811 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
];
2812 if (context
->bw_ctx
.dml
.vba
.BlendingAndTiming
[pipe_idx
] == pipe_idx
)
2813 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
=
2814 context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
];
2816 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= 0;
2819 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
=
2820 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_split_from
[i
]];
2821 if (context
->bw_ctx
.dml
.vba
.BlendingAndTiming
[pipe_split_from
[i
]] == pipe_split_from
[i
])
2822 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
=
2823 context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_split_from
[i
]];
2825 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= 0;
2828 if (dc
->config
.forced_clocks
) {
2829 pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[0].dispclk_mhz
;
2830 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[0].dppclk_mhz
;
2832 if (dc
->debug
.min_disp_clk_khz
> pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
* 1000)
2833 pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
= dc
->debug
.min_disp_clk_khz
/ 1000.0;
2834 if (dc
->debug
.min_dpp_clk_khz
> pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
* 1000)
2835 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
= dc
->debug
.min_dpp_clk_khz
/ 1000.0;
2840 if (pipe_cnt
!= pipe_idx
) {
2841 if (dc
->res_pool
->funcs
->populate_dml_pipes
)
2842 pipe_cnt
= dc
->res_pool
->funcs
->populate_dml_pipes(dc
,
2845 pipe_cnt
= dcn20_populate_dml_pipes_from_context(dc
,
2849 *out_pipe_cnt
= pipe_cnt
;
2851 pipes
[0].clks_cfg
.voltage
= vlevel
;
2852 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dcfclk_mhz
;
2853 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].socclk_mhz
;
2855 /* only pipe 0 is read for voltage and dcf/soc clocks */
2857 pipes
[0].clks_cfg
.voltage
= 1;
2858 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[1].dcfclk_mhz
;
2859 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[1].socclk_mhz
;
2861 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2862 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2863 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2864 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2865 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2866 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2867 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2868 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.urgent_latency_ns
= get_urgent_latency(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2871 pipes
[0].clks_cfg
.voltage
= 2;
2872 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].dcfclk_mhz
;
2873 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].socclk_mhz
;
2875 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2876 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2877 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2878 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2879 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2880 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2881 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2884 pipes
[0].clks_cfg
.voltage
= 3;
2885 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].dcfclk_mhz
;
2886 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].socclk_mhz
;
2888 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2889 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2890 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2891 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2892 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2893 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2894 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2896 pipes
[0].clks_cfg
.voltage
= vlevel
;
2897 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dcfclk_mhz
;
2898 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].socclk_mhz
;
2899 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2900 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2901 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2902 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2903 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2904 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2905 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2908 void dcn20_calculate_dlg_params(
2909 struct dc
*dc
, struct dc_state
*context
,
2910 display_e2e_pipe_params_st
*pipes
,
2914 int i
, j
, pipe_idx
, pipe_idx_unsplit
;
2915 bool visited
[MAX_PIPES
] = { 0 };
2917 /* Writeback MCIF_WB arbitration parameters */
2918 dc
->res_pool
->funcs
->set_mcif_arb_params(dc
, context
, pipes
, pipe_cnt
);
2920 context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
= context
->bw_ctx
.dml
.vba
.DISPCLK
* 1000;
2921 context
->bw_ctx
.bw
.dcn
.clk
.dcfclk_khz
= context
->bw_ctx
.dml
.vba
.DCFCLK
* 1000;
2922 context
->bw_ctx
.bw
.dcn
.clk
.socclk_khz
= context
->bw_ctx
.dml
.vba
.SOCCLK
* 1000;
2923 context
->bw_ctx
.bw
.dcn
.clk
.dramclk_khz
= context
->bw_ctx
.dml
.vba
.DRAMSpeed
* 1000 / 16;
2924 context
->bw_ctx
.bw
.dcn
.clk
.dcfclk_deep_sleep_khz
= context
->bw_ctx
.dml
.vba
.DCFCLKDeepSleep
* 1000;
2925 context
->bw_ctx
.bw
.dcn
.clk
.fclk_khz
= context
->bw_ctx
.dml
.vba
.FabricClock
* 1000;
2926 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
=
2927 context
->bw_ctx
.dml
.vba
.DRAMClockChangeSupport
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
]
2928 != dm_dram_clock_change_unsupported
;
2929 context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
= 0;
2931 if (context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
< dc
->debug
.min_disp_clk_khz
)
2932 context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
= dc
->debug
.min_disp_clk_khz
;
2935 * An artifact of dml pipe split/odm is that pipes get merged back together for
2936 * calculation. Therefore we need to only extract for first pipe in ascending index order
2937 * and copy into the other split half.
2939 for (i
= 0, pipe_idx
= 0, pipe_idx_unsplit
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2940 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2943 if (!visited
[pipe_idx
]) {
2944 display_pipe_source_params_st
*src
= &pipes
[pipe_idx
].pipe
.src
;
2945 display_pipe_dest_params_st
*dst
= &pipes
[pipe_idx
].pipe
.dest
;
2947 dst
->vstartup_start
= context
->bw_ctx
.dml
.vba
.VStartup
[pipe_idx_unsplit
];
2948 dst
->vupdate_offset
= context
->bw_ctx
.dml
.vba
.VUpdateOffsetPix
[pipe_idx_unsplit
];
2949 dst
->vupdate_width
= context
->bw_ctx
.dml
.vba
.VUpdateWidthPix
[pipe_idx_unsplit
];
2950 dst
->vready_offset
= context
->bw_ctx
.dml
.vba
.VReadyOffsetPix
[pipe_idx_unsplit
];
2952 * j iterates inside pipes array, unlike i which iterates inside
2956 for (j
= pipe_idx
+ 1; j
< pipe_cnt
; j
++) {
2957 display_pipe_source_params_st
*src_j
= &pipes
[j
].pipe
.src
;
2958 display_pipe_dest_params_st
*dst_j
= &pipes
[j
].pipe
.dest
;
2960 if (src_j
->is_hsplit
&& !visited
[j
]
2961 && src
->hsplit_grp
== src_j
->hsplit_grp
) {
2962 dst_j
->vstartup_start
= context
->bw_ctx
.dml
.vba
.VStartup
[pipe_idx_unsplit
];
2963 dst_j
->vupdate_offset
= context
->bw_ctx
.dml
.vba
.VUpdateOffsetPix
[pipe_idx_unsplit
];
2964 dst_j
->vupdate_width
= context
->bw_ctx
.dml
.vba
.VUpdateWidthPix
[pipe_idx_unsplit
];
2965 dst_j
->vready_offset
= context
->bw_ctx
.dml
.vba
.VReadyOffsetPix
[pipe_idx_unsplit
];
2969 visited
[pipe_idx
] = true;
2975 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2976 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2978 if (context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
< pipes
[pipe_idx
].clks_cfg
.dppclk_mhz
* 1000)
2979 context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
= pipes
[pipe_idx
].clks_cfg
.dppclk_mhz
* 1000;
2980 context
->res_ctx
.pipe_ctx
[i
].plane_res
.bw
.dppclk_khz
=
2981 pipes
[pipe_idx
].clks_cfg
.dppclk_mhz
* 1000;
2982 ASSERT(visited
[pipe_idx
]);
2983 context
->res_ctx
.pipe_ctx
[i
].pipe_dlg_param
= pipes
[pipe_idx
].pipe
.dest
;
2986 /*save a original dppclock copy*/
2987 context
->bw_ctx
.bw
.dcn
.clk
.bw_dppclk_khz
= context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
;
2988 context
->bw_ctx
.bw
.dcn
.clk
.bw_dispclk_khz
= context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
;
2989 context
->bw_ctx
.bw
.dcn
.clk
.max_supported_dppclk_khz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dppclk_mhz
* 1000;
2990 context
->bw_ctx
.bw
.dcn
.clk
.max_supported_dispclk_khz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dispclk_mhz
* 1000;
2992 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2993 bool cstate_en
= context
->bw_ctx
.dml
.vba
.PrefetchMode
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
] != 2;
2995 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2998 context
->bw_ctx
.dml
.funcs
.rq_dlg_get_dlg_reg(&context
->bw_ctx
.dml
,
2999 &context
->res_ctx
.pipe_ctx
[i
].dlg_regs
,
3000 &context
->res_ctx
.pipe_ctx
[i
].ttu_regs
,
3005 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
,
3006 false, false, false);
3008 context
->bw_ctx
.dml
.funcs
.rq_dlg_get_rq_reg(&context
->bw_ctx
.dml
,
3009 &context
->res_ctx
.pipe_ctx
[i
].rq_regs
,
3010 pipes
[pipe_idx
].pipe
);
3015 static bool dcn20_validate_bandwidth_internal(struct dc
*dc
, struct dc_state
*context
,
3020 BW_VAL_TRACE_SETUP();
3023 int pipe_split_from
[MAX_PIPES
];
3025 display_e2e_pipe_params_st
*pipes
= kzalloc(dc
->res_pool
->pipe_count
* sizeof(display_e2e_pipe_params_st
), GFP_KERNEL
);
3026 DC_LOGGER_INIT(dc
->ctx
->logger
);
3028 BW_VAL_TRACE_COUNT();
3030 out
= dcn20_fast_validate_bw(dc
, context
, pipes
, &pipe_cnt
, pipe_split_from
, &vlevel
);
3038 BW_VAL_TRACE_END_VOLTAGE_LEVEL();
3040 if (fast_validate
) {
3041 BW_VAL_TRACE_SKIP(fast
);
3045 dcn20_calculate_wm(dc
, context
, pipes
, &pipe_cnt
, pipe_split_from
, vlevel
);
3046 dcn20_calculate_dlg_params(dc
, context
, pipes
, pipe_cnt
, vlevel
);
3048 BW_VAL_TRACE_END_WATERMARKS();
3053 DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
3054 dml_get_status_message(context
->bw_ctx
.dml
.vba
.ValidationStatus
[context
->bw_ctx
.dml
.vba
.soc
.num_states
]));
3056 BW_VAL_TRACE_SKIP(fail
);
3062 BW_VAL_TRACE_FINISH();
3068 bool dcn20_validate_bandwidth(struct dc
*dc
, struct dc_state
*context
,
3071 bool voltage_supported
= false;
3072 bool full_pstate_supported
= false;
3073 bool dummy_pstate_supported
= false;
3074 double p_state_latency_us
;
3077 p_state_latency_us
= context
->bw_ctx
.dml
.soc
.dram_clock_change_latency_us
;
3078 context
->bw_ctx
.dml
.soc
.disable_dram_clock_change_vactive_support
=
3079 dc
->debug
.disable_dram_clock_change_vactive_support
;
3081 if (fast_validate
) {
3082 voltage_supported
= dcn20_validate_bandwidth_internal(dc
, context
, true);
3085 return voltage_supported
;
3088 // Best case, we support full UCLK switch latency
3089 voltage_supported
= dcn20_validate_bandwidth_internal(dc
, context
, false);
3090 full_pstate_supported
= context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
;
3092 if (context
->bw_ctx
.dml
.soc
.dummy_pstate_latency_us
== 0 ||
3093 (voltage_supported
&& full_pstate_supported
)) {
3094 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
= full_pstate_supported
;
3095 goto restore_dml_state
;
3098 // Fallback: Try to only support G6 temperature read latency
3099 context
->bw_ctx
.dml
.soc
.dram_clock_change_latency_us
= context
->bw_ctx
.dml
.soc
.dummy_pstate_latency_us
;
3101 voltage_supported
= dcn20_validate_bandwidth_internal(dc
, context
, false);
3102 dummy_pstate_supported
= context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
;
3104 if (voltage_supported
&& dummy_pstate_supported
) {
3105 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
= false;
3106 goto restore_dml_state
;
3109 // ERROR: fallback is supposed to always work.
3113 context
->bw_ctx
.dml
.soc
.dram_clock_change_latency_us
= p_state_latency_us
;
3116 return voltage_supported
;
3119 struct pipe_ctx
*dcn20_acquire_idle_pipe_for_layer(
3120 struct dc_state
*state
,
3121 const struct resource_pool
*pool
,
3122 struct dc_stream_state
*stream
)
3124 struct resource_context
*res_ctx
= &state
->res_ctx
;
3125 struct pipe_ctx
*head_pipe
= resource_get_head_pipe_for_stream(res_ctx
, stream
);
3126 struct pipe_ctx
*idle_pipe
= find_idle_secondary_pipe(res_ctx
, pool
, head_pipe
);
3134 idle_pipe
->stream
= head_pipe
->stream
;
3135 idle_pipe
->stream_res
.tg
= head_pipe
->stream_res
.tg
;
3136 idle_pipe
->stream_res
.opp
= head_pipe
->stream_res
.opp
;
3138 idle_pipe
->plane_res
.hubp
= pool
->hubps
[idle_pipe
->pipe_idx
];
3139 idle_pipe
->plane_res
.ipp
= pool
->ipps
[idle_pipe
->pipe_idx
];
3140 idle_pipe
->plane_res
.dpp
= pool
->dpps
[idle_pipe
->pipe_idx
];
3141 idle_pipe
->plane_res
.mpcc_inst
= pool
->dpps
[idle_pipe
->pipe_idx
]->inst
;
3146 bool dcn20_get_dcc_compression_cap(const struct dc
*dc
,
3147 const struct dc_dcc_surface_param
*input
,
3148 struct dc_surface_dcc_cap
*output
)
3150 return dc
->res_pool
->hubbub
->funcs
->get_dcc_compression_cap(
3151 dc
->res_pool
->hubbub
,
3156 static void dcn20_destroy_resource_pool(struct resource_pool
**pool
)
3158 struct dcn20_resource_pool
*dcn20_pool
= TO_DCN20_RES_POOL(*pool
);
3160 dcn20_resource_destruct(dcn20_pool
);
3166 static struct dc_cap_funcs cap_funcs
= {
3167 .get_dcc_compression_cap
= dcn20_get_dcc_compression_cap
3171 enum dc_status
dcn20_patch_unknown_plane_state(struct dc_plane_state
*plane_state
)
3173 enum dc_status result
= DC_OK
;
3175 enum surface_pixel_format surf_pix_format
= plane_state
->format
;
3176 unsigned int bpp
= resource_pixel_format_to_bpp(surf_pix_format
);
3178 enum swizzle_mode_values swizzle
= DC_SW_LINEAR
;
3181 swizzle
= DC_SW_64KB_D
;
3183 swizzle
= DC_SW_64KB_S
;
3185 plane_state
->tiling_info
.gfx9
.swizzle
= swizzle
;
3189 static struct resource_funcs dcn20_res_pool_funcs
= {
3190 .destroy
= dcn20_destroy_resource_pool
,
3191 .link_enc_create
= dcn20_link_encoder_create
,
3192 .validate_bandwidth
= dcn20_validate_bandwidth
,
3193 .acquire_idle_pipe_for_layer
= dcn20_acquire_idle_pipe_for_layer
,
3194 .add_stream_to_ctx
= dcn20_add_stream_to_ctx
,
3195 .remove_stream_from_ctx
= dcn20_remove_stream_from_ctx
,
3196 .populate_dml_writeback_from_context
= dcn20_populate_dml_writeback_from_context
,
3197 .patch_unknown_plane_state
= dcn20_patch_unknown_plane_state
,
3198 .set_mcif_arb_params
= dcn20_set_mcif_arb_params
,
3199 .populate_dml_pipes
= dcn20_populate_dml_pipes_from_context
,
3200 .find_first_free_match_stream_enc_for_link
= dcn10_find_first_free_match_stream_enc_for_link
3203 bool dcn20_dwbc_create(struct dc_context
*ctx
, struct resource_pool
*pool
)
3206 uint32_t pipe_count
= pool
->res_cap
->num_dwb
;
3208 for (i
= 0; i
< pipe_count
; i
++) {
3209 struct dcn20_dwbc
*dwbc20
= kzalloc(sizeof(struct dcn20_dwbc
),
3213 dm_error("DC: failed to create dwbc20!\n");
3216 dcn20_dwbc_construct(dwbc20
, ctx
,
3221 pool
->dwbc
[i
] = &dwbc20
->base
;
3226 bool dcn20_mmhubbub_create(struct dc_context
*ctx
, struct resource_pool
*pool
)
3229 uint32_t pipe_count
= pool
->res_cap
->num_dwb
;
3231 ASSERT(pipe_count
> 0);
3233 for (i
= 0; i
< pipe_count
; i
++) {
3234 struct dcn20_mmhubbub
*mcif_wb20
= kzalloc(sizeof(struct dcn20_mmhubbub
),
3238 dm_error("DC: failed to create mcif_wb20!\n");
3242 dcn20_mmhubbub_construct(mcif_wb20
, ctx
,
3248 pool
->mcif_wb
[i
] = &mcif_wb20
->base
;
3253 static struct pp_smu_funcs
*dcn20_pp_smu_create(struct dc_context
*ctx
)
3255 struct pp_smu_funcs
*pp_smu
= kzalloc(sizeof(*pp_smu
), GFP_KERNEL
);
3260 dm_pp_get_funcs(ctx
, pp_smu
);
3262 if (pp_smu
->ctx
.ver
!= PP_SMU_VER_NV
)
3263 pp_smu
= memset(pp_smu
, 0, sizeof(struct pp_smu_funcs
));
3268 static void dcn20_pp_smu_destroy(struct pp_smu_funcs
**pp_smu
)
3270 if (pp_smu
&& *pp_smu
) {
3276 void dcn20_cap_soc_clocks(
3277 struct _vcs_dpi_soc_bounding_box_st
*bb
,
3278 struct pp_smu_nv_clock_table max_clocks
)
3282 // First pass - cap all clocks higher than the reported max
3283 for (i
= 0; i
< bb
->num_states
; i
++) {
3284 if ((bb
->clock_limits
[i
].dcfclk_mhz
> (max_clocks
.dcfClockInKhz
/ 1000))
3285 && max_clocks
.dcfClockInKhz
!= 0)
3286 bb
->clock_limits
[i
].dcfclk_mhz
= (max_clocks
.dcfClockInKhz
/ 1000);
3288 if ((bb
->clock_limits
[i
].dram_speed_mts
> (max_clocks
.uClockInKhz
/ 1000) * 16)
3289 && max_clocks
.uClockInKhz
!= 0)
3290 bb
->clock_limits
[i
].dram_speed_mts
= (max_clocks
.uClockInKhz
/ 1000) * 16;
3292 if ((bb
->clock_limits
[i
].fabricclk_mhz
> (max_clocks
.fabricClockInKhz
/ 1000))
3293 && max_clocks
.fabricClockInKhz
!= 0)
3294 bb
->clock_limits
[i
].fabricclk_mhz
= (max_clocks
.fabricClockInKhz
/ 1000);
3296 if ((bb
->clock_limits
[i
].dispclk_mhz
> (max_clocks
.displayClockInKhz
/ 1000))
3297 && max_clocks
.displayClockInKhz
!= 0)
3298 bb
->clock_limits
[i
].dispclk_mhz
= (max_clocks
.displayClockInKhz
/ 1000);
3300 if ((bb
->clock_limits
[i
].dppclk_mhz
> (max_clocks
.dppClockInKhz
/ 1000))
3301 && max_clocks
.dppClockInKhz
!= 0)
3302 bb
->clock_limits
[i
].dppclk_mhz
= (max_clocks
.dppClockInKhz
/ 1000);
3304 if ((bb
->clock_limits
[i
].phyclk_mhz
> (max_clocks
.phyClockInKhz
/ 1000))
3305 && max_clocks
.phyClockInKhz
!= 0)
3306 bb
->clock_limits
[i
].phyclk_mhz
= (max_clocks
.phyClockInKhz
/ 1000);
3308 if ((bb
->clock_limits
[i
].socclk_mhz
> (max_clocks
.socClockInKhz
/ 1000))
3309 && max_clocks
.socClockInKhz
!= 0)
3310 bb
->clock_limits
[i
].socclk_mhz
= (max_clocks
.socClockInKhz
/ 1000);
3312 if ((bb
->clock_limits
[i
].dscclk_mhz
> (max_clocks
.dscClockInKhz
/ 1000))
3313 && max_clocks
.dscClockInKhz
!= 0)
3314 bb
->clock_limits
[i
].dscclk_mhz
= (max_clocks
.dscClockInKhz
/ 1000);
3317 // Second pass - remove all duplicate clock states
3318 for (i
= bb
->num_states
- 1; i
> 1; i
--) {
3319 bool duplicate
= true;
3321 if (bb
->clock_limits
[i
-1].dcfclk_mhz
!= bb
->clock_limits
[i
].dcfclk_mhz
)
3323 if (bb
->clock_limits
[i
-1].dispclk_mhz
!= bb
->clock_limits
[i
].dispclk_mhz
)
3325 if (bb
->clock_limits
[i
-1].dppclk_mhz
!= bb
->clock_limits
[i
].dppclk_mhz
)
3327 if (bb
->clock_limits
[i
-1].dram_speed_mts
!= bb
->clock_limits
[i
].dram_speed_mts
)
3329 if (bb
->clock_limits
[i
-1].dscclk_mhz
!= bb
->clock_limits
[i
].dscclk_mhz
)
3331 if (bb
->clock_limits
[i
-1].fabricclk_mhz
!= bb
->clock_limits
[i
].fabricclk_mhz
)
3333 if (bb
->clock_limits
[i
-1].phyclk_mhz
!= bb
->clock_limits
[i
].phyclk_mhz
)
3335 if (bb
->clock_limits
[i
-1].socclk_mhz
!= bb
->clock_limits
[i
].socclk_mhz
)
3343 void dcn20_update_bounding_box(struct dc
*dc
, struct _vcs_dpi_soc_bounding_box_st
*bb
,
3344 struct pp_smu_nv_clock_table
*max_clocks
, unsigned int *uclk_states
, unsigned int num_states
)
3346 struct _vcs_dpi_voltage_scaling_st calculated_states
[DC__VOLTAGE_STATES
];
3348 int num_calculated_states
= 0;
3351 if (num_states
== 0)
3354 memset(calculated_states
, 0, sizeof(calculated_states
));
3356 if (dc
->bb_overrides
.min_dcfclk_mhz
> 0)
3357 min_dcfclk
= dc
->bb_overrides
.min_dcfclk_mhz
;
3359 if (ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
))
3362 // Accounting for SOC/DCF relationship, we can go as high as
3367 for (i
= 0; i
< num_states
; i
++) {
3368 int min_fclk_required_by_uclk
;
3369 calculated_states
[i
].state
= i
;
3370 calculated_states
[i
].dram_speed_mts
= uclk_states
[i
] * 16 / 1000;
3372 // FCLK:UCLK ratio is 1.08
3373 min_fclk_required_by_uclk
= mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states
[i
], 32);
3375 calculated_states
[i
].fabricclk_mhz
= (min_fclk_required_by_uclk
< min_dcfclk
) ?
3376 min_dcfclk
: min_fclk_required_by_uclk
;
3378 calculated_states
[i
].socclk_mhz
= (calculated_states
[i
].fabricclk_mhz
> max_clocks
->socClockInKhz
/ 1000) ?
3379 max_clocks
->socClockInKhz
/ 1000 : calculated_states
[i
].fabricclk_mhz
;
3381 calculated_states
[i
].dcfclk_mhz
= (calculated_states
[i
].fabricclk_mhz
> max_clocks
->dcfClockInKhz
/ 1000) ?
3382 max_clocks
->dcfClockInKhz
/ 1000 : calculated_states
[i
].fabricclk_mhz
;
3384 calculated_states
[i
].dispclk_mhz
= max_clocks
->displayClockInKhz
/ 1000;
3385 calculated_states
[i
].dppclk_mhz
= max_clocks
->displayClockInKhz
/ 1000;
3386 calculated_states
[i
].dscclk_mhz
= max_clocks
->displayClockInKhz
/ (1000 * 3);
3388 calculated_states
[i
].phyclk_mhz
= max_clocks
->phyClockInKhz
/ 1000;
3390 num_calculated_states
++;
3393 calculated_states
[num_calculated_states
- 1].socclk_mhz
= max_clocks
->socClockInKhz
/ 1000;
3394 calculated_states
[num_calculated_states
- 1].fabricclk_mhz
= max_clocks
->socClockInKhz
/ 1000;
3395 calculated_states
[num_calculated_states
- 1].dcfclk_mhz
= max_clocks
->dcfClockInKhz
/ 1000;
3397 memcpy(bb
->clock_limits
, calculated_states
, sizeof(bb
->clock_limits
));
3398 bb
->num_states
= num_calculated_states
;
3400 // Duplicate the last state, DML always an extra state identical to max state to work
3401 memcpy(&bb
->clock_limits
[num_calculated_states
], &bb
->clock_limits
[num_calculated_states
- 1], sizeof(struct _vcs_dpi_voltage_scaling_st
));
3402 bb
->clock_limits
[num_calculated_states
].state
= bb
->num_states
;
3405 void dcn20_patch_bounding_box(struct dc
*dc
, struct _vcs_dpi_soc_bounding_box_st
*bb
)
3407 if ((int)(bb
->sr_exit_time_us
* 1000) != dc
->bb_overrides
.sr_exit_time_ns
3408 && dc
->bb_overrides
.sr_exit_time_ns
) {
3409 bb
->sr_exit_time_us
= dc
->bb_overrides
.sr_exit_time_ns
/ 1000.0;
3412 if ((int)(bb
->sr_enter_plus_exit_time_us
* 1000)
3413 != dc
->bb_overrides
.sr_enter_plus_exit_time_ns
3414 && dc
->bb_overrides
.sr_enter_plus_exit_time_ns
) {
3415 bb
->sr_enter_plus_exit_time_us
=
3416 dc
->bb_overrides
.sr_enter_plus_exit_time_ns
/ 1000.0;
3419 if ((int)(bb
->urgent_latency_us
* 1000) != dc
->bb_overrides
.urgent_latency_ns
3420 && dc
->bb_overrides
.urgent_latency_ns
) {
3421 bb
->urgent_latency_us
= dc
->bb_overrides
.urgent_latency_ns
/ 1000.0;
3424 if ((int)(bb
->dram_clock_change_latency_us
* 1000)
3425 != dc
->bb_overrides
.dram_clock_change_latency_ns
3426 && dc
->bb_overrides
.dram_clock_change_latency_ns
) {
3427 bb
->dram_clock_change_latency_us
=
3428 dc
->bb_overrides
.dram_clock_change_latency_ns
/ 1000.0;
3432 static struct _vcs_dpi_soc_bounding_box_st
*get_asic_rev_soc_bb(
3433 uint32_t hw_internal_rev
)
3435 if (ASICREV_IS_NAVI14_M(hw_internal_rev
))
3436 return &dcn2_0_nv14_soc
;
3438 if (ASICREV_IS_NAVI12_P(hw_internal_rev
))
3439 return &dcn2_0_nv12_soc
;
3444 static struct _vcs_dpi_ip_params_st
*get_asic_rev_ip_params(
3445 uint32_t hw_internal_rev
)
3448 if (ASICREV_IS_NAVI14_M(hw_internal_rev
))
3449 return &dcn2_0_nv14_ip
;
3455 static enum dml_project
get_dml_project_version(uint32_t hw_internal_rev
)
3457 return DML_PROJECT_NAVI10v2
;
3460 #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
3461 #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
3463 static bool init_soc_bounding_box(struct dc
*dc
,
3464 struct dcn20_resource_pool
*pool
)
3466 const struct gpu_info_soc_bounding_box_v1_0
*bb
= dc
->soc_bounding_box
;
3467 struct _vcs_dpi_soc_bounding_box_st
*loaded_bb
=
3468 get_asic_rev_soc_bb(dc
->ctx
->asic_id
.hw_internal_rev
);
3469 struct _vcs_dpi_ip_params_st
*loaded_ip
=
3470 get_asic_rev_ip_params(dc
->ctx
->asic_id
.hw_internal_rev
);
3472 DC_LOGGER_INIT(dc
->ctx
->logger
);
3474 /* TODO: upstream NV12 bounding box when its launched */
3475 if (!bb
&& ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
)) {
3476 DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__
);
3480 if (bb
&& ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
)) {
3483 dcn2_0_nv12_soc
.sr_exit_time_us
=
3484 fixed16_to_double_to_cpu(bb
->sr_exit_time_us
);
3485 dcn2_0_nv12_soc
.sr_enter_plus_exit_time_us
=
3486 fixed16_to_double_to_cpu(bb
->sr_enter_plus_exit_time_us
);
3487 dcn2_0_nv12_soc
.urgent_latency_us
=
3488 fixed16_to_double_to_cpu(bb
->urgent_latency_us
);
3489 dcn2_0_nv12_soc
.urgent_latency_pixel_data_only_us
=
3490 fixed16_to_double_to_cpu(bb
->urgent_latency_pixel_data_only_us
);
3491 dcn2_0_nv12_soc
.urgent_latency_pixel_mixed_with_vm_data_us
=
3492 fixed16_to_double_to_cpu(bb
->urgent_latency_pixel_mixed_with_vm_data_us
);
3493 dcn2_0_nv12_soc
.urgent_latency_vm_data_only_us
=
3494 fixed16_to_double_to_cpu(bb
->urgent_latency_vm_data_only_us
);
3495 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_pixel_only_bytes
=
3496 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_pixel_only_bytes
);
3497 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
=
3498 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
);
3499 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_vm_only_bytes
=
3500 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_vm_only_bytes
);
3501 dcn2_0_nv12_soc
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only
=
3502 fixed16_to_double_to_cpu(bb
->pct_ideal_dram_sdp_bw_after_urgent_pixel_only
);
3503 dcn2_0_nv12_soc
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
=
3504 fixed16_to_double_to_cpu(bb
->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
);
3505 dcn2_0_nv12_soc
.pct_ideal_dram_sdp_bw_after_urgent_vm_only
=
3506 fixed16_to_double_to_cpu(bb
->pct_ideal_dram_sdp_bw_after_urgent_vm_only
);
3507 dcn2_0_nv12_soc
.max_avg_sdp_bw_use_normal_percent
=
3508 fixed16_to_double_to_cpu(bb
->max_avg_sdp_bw_use_normal_percent
);
3509 dcn2_0_nv12_soc
.max_avg_dram_bw_use_normal_percent
=
3510 fixed16_to_double_to_cpu(bb
->max_avg_dram_bw_use_normal_percent
);
3511 dcn2_0_nv12_soc
.writeback_latency_us
=
3512 fixed16_to_double_to_cpu(bb
->writeback_latency_us
);
3513 dcn2_0_nv12_soc
.ideal_dram_bw_after_urgent_percent
=
3514 fixed16_to_double_to_cpu(bb
->ideal_dram_bw_after_urgent_percent
);
3515 dcn2_0_nv12_soc
.max_request_size_bytes
=
3516 le32_to_cpu(bb
->max_request_size_bytes
);
3517 dcn2_0_nv12_soc
.dram_channel_width_bytes
=
3518 le32_to_cpu(bb
->dram_channel_width_bytes
);
3519 dcn2_0_nv12_soc
.fabric_datapath_to_dcn_data_return_bytes
=
3520 le32_to_cpu(bb
->fabric_datapath_to_dcn_data_return_bytes
);
3521 dcn2_0_nv12_soc
.dcn_downspread_percent
=
3522 fixed16_to_double_to_cpu(bb
->dcn_downspread_percent
);
3523 dcn2_0_nv12_soc
.downspread_percent
=
3524 fixed16_to_double_to_cpu(bb
->downspread_percent
);
3525 dcn2_0_nv12_soc
.dram_page_open_time_ns
=
3526 fixed16_to_double_to_cpu(bb
->dram_page_open_time_ns
);
3527 dcn2_0_nv12_soc
.dram_rw_turnaround_time_ns
=
3528 fixed16_to_double_to_cpu(bb
->dram_rw_turnaround_time_ns
);
3529 dcn2_0_nv12_soc
.dram_return_buffer_per_channel_bytes
=
3530 le32_to_cpu(bb
->dram_return_buffer_per_channel_bytes
);
3531 dcn2_0_nv12_soc
.round_trip_ping_latency_dcfclk_cycles
=
3532 le32_to_cpu(bb
->round_trip_ping_latency_dcfclk_cycles
);
3533 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_bytes
=
3534 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_bytes
);
3535 dcn2_0_nv12_soc
.channel_interleave_bytes
=
3536 le32_to_cpu(bb
->channel_interleave_bytes
);
3537 dcn2_0_nv12_soc
.num_banks
=
3538 le32_to_cpu(bb
->num_banks
);
3539 dcn2_0_nv12_soc
.num_chans
=
3540 le32_to_cpu(bb
->num_chans
);
3541 dcn2_0_nv12_soc
.vmm_page_size_bytes
=
3542 le32_to_cpu(bb
->vmm_page_size_bytes
);
3543 dcn2_0_nv12_soc
.dram_clock_change_latency_us
=
3544 fixed16_to_double_to_cpu(bb
->dram_clock_change_latency_us
);
3545 // HACK!! Lower uclock latency switch time so we don't switch
3546 dcn2_0_nv12_soc
.dram_clock_change_latency_us
= 10;
3547 dcn2_0_nv12_soc
.writeback_dram_clock_change_latency_us
=
3548 fixed16_to_double_to_cpu(bb
->writeback_dram_clock_change_latency_us
);
3549 dcn2_0_nv12_soc
.return_bus_width_bytes
=
3550 le32_to_cpu(bb
->return_bus_width_bytes
);
3551 dcn2_0_nv12_soc
.dispclk_dppclk_vco_speed_mhz
=
3552 le32_to_cpu(bb
->dispclk_dppclk_vco_speed_mhz
);
3553 dcn2_0_nv12_soc
.xfc_bus_transport_time_us
=
3554 le32_to_cpu(bb
->xfc_bus_transport_time_us
);
3555 dcn2_0_nv12_soc
.xfc_xbuf_latency_tolerance_us
=
3556 le32_to_cpu(bb
->xfc_xbuf_latency_tolerance_us
);
3557 dcn2_0_nv12_soc
.use_urgent_burst_bw
=
3558 le32_to_cpu(bb
->use_urgent_burst_bw
);
3559 dcn2_0_nv12_soc
.num_states
=
3560 le32_to_cpu(bb
->num_states
);
3562 for (i
= 0; i
< dcn2_0_nv12_soc
.num_states
; i
++) {
3563 dcn2_0_nv12_soc
.clock_limits
[i
].state
=
3564 le32_to_cpu(bb
->clock_limits
[i
].state
);
3565 dcn2_0_nv12_soc
.clock_limits
[i
].dcfclk_mhz
=
3566 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dcfclk_mhz
);
3567 dcn2_0_nv12_soc
.clock_limits
[i
].fabricclk_mhz
=
3568 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].fabricclk_mhz
);
3569 dcn2_0_nv12_soc
.clock_limits
[i
].dispclk_mhz
=
3570 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dispclk_mhz
);
3571 dcn2_0_nv12_soc
.clock_limits
[i
].dppclk_mhz
=
3572 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dppclk_mhz
);
3573 dcn2_0_nv12_soc
.clock_limits
[i
].phyclk_mhz
=
3574 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].phyclk_mhz
);
3575 dcn2_0_nv12_soc
.clock_limits
[i
].socclk_mhz
=
3576 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].socclk_mhz
);
3577 dcn2_0_nv12_soc
.clock_limits
[i
].dscclk_mhz
=
3578 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dscclk_mhz
);
3579 dcn2_0_nv12_soc
.clock_limits
[i
].dram_speed_mts
=
3580 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dram_speed_mts
);
3584 if (pool
->base
.pp_smu
) {
3585 struct pp_smu_nv_clock_table max_clocks
= {0};
3586 unsigned int uclk_states
[8] = {0};
3587 unsigned int num_states
= 0;
3588 enum pp_smu_status status
;
3589 bool clock_limits_available
= false;
3590 bool uclk_states_available
= false;
3592 if (pool
->base
.pp_smu
->nv_funcs
.get_uclk_dpm_states
) {
3593 status
= (pool
->base
.pp_smu
->nv_funcs
.get_uclk_dpm_states
)
3594 (&pool
->base
.pp_smu
->nv_funcs
.pp_smu
, uclk_states
, &num_states
);
3596 uclk_states_available
= (status
== PP_SMU_RESULT_OK
);
3599 if (pool
->base
.pp_smu
->nv_funcs
.get_maximum_sustainable_clocks
) {
3600 status
= (*pool
->base
.pp_smu
->nv_funcs
.get_maximum_sustainable_clocks
)
3601 (&pool
->base
.pp_smu
->nv_funcs
.pp_smu
, &max_clocks
);
3602 /* SMU cannot set DCF clock to anything equal to or higher than SOC clock
3604 if (max_clocks
.dcfClockInKhz
>= max_clocks
.socClockInKhz
)
3605 max_clocks
.dcfClockInKhz
= max_clocks
.socClockInKhz
- 1000;
3606 clock_limits_available
= (status
== PP_SMU_RESULT_OK
);
3609 if (clock_limits_available
&& uclk_states_available
&& num_states
)
3610 dcn20_update_bounding_box(dc
, loaded_bb
, &max_clocks
, uclk_states
, num_states
);
3611 else if (clock_limits_available
)
3612 dcn20_cap_soc_clocks(loaded_bb
, max_clocks
);
3615 loaded_ip
->max_num_otg
= pool
->base
.res_cap
->num_timing_generator
;
3616 loaded_ip
->max_num_dpp
= pool
->base
.pipe_count
;
3617 dcn20_patch_bounding_box(dc
, loaded_bb
);
3622 static bool dcn20_resource_construct(
3623 uint8_t num_virtual_links
,
3625 struct dcn20_resource_pool
*pool
)
3628 struct dc_context
*ctx
= dc
->ctx
;
3629 struct irq_service_init_data init_data
;
3630 struct ddc_service_init_data ddc_init_data
;
3631 struct _vcs_dpi_soc_bounding_box_st
*loaded_bb
=
3632 get_asic_rev_soc_bb(ctx
->asic_id
.hw_internal_rev
);
3633 struct _vcs_dpi_ip_params_st
*loaded_ip
=
3634 get_asic_rev_ip_params(ctx
->asic_id
.hw_internal_rev
);
3635 enum dml_project dml_project_version
=
3636 get_dml_project_version(ctx
->asic_id
.hw_internal_rev
);
3640 ctx
->dc_bios
->regs
= &bios_regs
;
3641 pool
->base
.funcs
= &dcn20_res_pool_funcs
;
3643 if (ASICREV_IS_NAVI14_M(ctx
->asic_id
.hw_internal_rev
)) {
3644 pool
->base
.res_cap
= &res_cap_nv14
;
3645 pool
->base
.pipe_count
= 5;
3646 pool
->base
.mpcc_count
= 5;
3648 pool
->base
.res_cap
= &res_cap_nv10
;
3649 pool
->base
.pipe_count
= 6;
3650 pool
->base
.mpcc_count
= 6;
3652 /*************************************************
3653 * Resource + asic cap harcoding *
3654 *************************************************/
3655 pool
->base
.underlay_pipe_index
= NO_UNDERLAY_PIPE
;
3657 dc
->caps
.max_downscale_ratio
= 200;
3658 dc
->caps
.i2c_speed_in_khz
= 100;
3659 dc
->caps
.max_cursor_size
= 256;
3660 dc
->caps
.dmdata_alloc_size
= 2048;
3662 dc
->caps
.max_slave_planes
= 1;
3663 dc
->caps
.post_blend_color_processing
= true;
3664 dc
->caps
.force_dp_tps4_for_cp2520
= true;
3665 dc
->caps
.hw_3d_lut
= true;
3666 dc
->caps
.extended_aux_timeout_support
= true;
3668 if (dc
->ctx
->dce_environment
== DCE_ENV_PRODUCTION_DRV
) {
3669 dc
->debug
= debug_defaults_drv
;
3670 } else if (dc
->ctx
->dce_environment
== DCE_ENV_FPGA_MAXIMUS
) {
3671 pool
->base
.pipe_count
= 4;
3672 pool
->base
.mpcc_count
= pool
->base
.pipe_count
;
3673 dc
->debug
= debug_defaults_diags
;
3675 dc
->debug
= debug_defaults_diags
;
3678 dc
->work_arounds
.dedcn20_305_wa
= true;
3680 // Init the vm_helper
3682 vm_helper_init(dc
->vm_helper
, 16);
3684 /*************************************************
3685 * Create resources *
3686 *************************************************/
3688 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL0
] =
3689 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3690 CLOCK_SOURCE_COMBO_PHY_PLL0
,
3691 &clk_src_regs
[0], false);
3692 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL1
] =
3693 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3694 CLOCK_SOURCE_COMBO_PHY_PLL1
,
3695 &clk_src_regs
[1], false);
3696 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL2
] =
3697 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3698 CLOCK_SOURCE_COMBO_PHY_PLL2
,
3699 &clk_src_regs
[2], false);
3700 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL3
] =
3701 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3702 CLOCK_SOURCE_COMBO_PHY_PLL3
,
3703 &clk_src_regs
[3], false);
3704 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL4
] =
3705 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3706 CLOCK_SOURCE_COMBO_PHY_PLL4
,
3707 &clk_src_regs
[4], false);
3708 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL5
] =
3709 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3710 CLOCK_SOURCE_COMBO_PHY_PLL5
,
3711 &clk_src_regs
[5], false);
3712 pool
->base
.clk_src_count
= DCN20_CLK_SRC_TOTAL
;
3713 /* todo: not reuse phy_pll registers */
3714 pool
->base
.dp_clock_source
=
3715 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3716 CLOCK_SOURCE_ID_DP_DTO
,
3717 &clk_src_regs
[0], true);
3719 for (i
= 0; i
< pool
->base
.clk_src_count
; i
++) {
3720 if (pool
->base
.clock_sources
[i
] == NULL
) {
3721 dm_error("DC: failed to create clock sources!\n");
3722 BREAK_TO_DEBUGGER();
3727 pool
->base
.dccg
= dccg2_create(ctx
, &dccg_regs
, &dccg_shift
, &dccg_mask
);
3728 if (pool
->base
.dccg
== NULL
) {
3729 dm_error("DC: failed to create dccg!\n");
3730 BREAK_TO_DEBUGGER();
3734 pool
->base
.dmcu
= dcn20_dmcu_create(ctx
,
3738 if (pool
->base
.dmcu
== NULL
) {
3739 dm_error("DC: failed to create dmcu!\n");
3740 BREAK_TO_DEBUGGER();
3744 pool
->base
.abm
= dce_abm_create(ctx
,
3748 if (pool
->base
.abm
== NULL
) {
3749 dm_error("DC: failed to create abm!\n");
3750 BREAK_TO_DEBUGGER();
3754 pool
->base
.pp_smu
= dcn20_pp_smu_create(ctx
);
3757 if (!init_soc_bounding_box(dc
, pool
)) {
3758 dm_error("DC: failed to initialize soc bounding box!\n");
3759 BREAK_TO_DEBUGGER();
3763 dml_init_instance(&dc
->dml
, loaded_bb
, loaded_ip
, dml_project_version
);
3765 if (!dc
->debug
.disable_pplib_wm_range
) {
3766 struct pp_smu_wm_range_sets ranges
= {0};
3769 ranges
.num_reader_wm_sets
= 0;
3771 if (loaded_bb
->num_states
== 1) {
3772 ranges
.reader_wm_sets
[0].wm_inst
= i
;
3773 ranges
.reader_wm_sets
[0].min_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3774 ranges
.reader_wm_sets
[0].max_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3775 ranges
.reader_wm_sets
[0].min_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3776 ranges
.reader_wm_sets
[0].max_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3778 ranges
.num_reader_wm_sets
= 1;
3779 } else if (loaded_bb
->num_states
> 1) {
3780 for (i
= 0; i
< 4 && i
< loaded_bb
->num_states
; i
++) {
3781 ranges
.reader_wm_sets
[i
].wm_inst
= i
;
3782 ranges
.reader_wm_sets
[i
].min_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3783 ranges
.reader_wm_sets
[i
].max_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3784 ranges
.reader_wm_sets
[i
].min_fill_clk_mhz
= (i
> 0) ? (loaded_bb
->clock_limits
[i
- 1].dram_speed_mts
/ 16) + 1 : 0;
3785 ranges
.reader_wm_sets
[i
].max_fill_clk_mhz
= loaded_bb
->clock_limits
[i
].dram_speed_mts
/ 16;
3787 ranges
.num_reader_wm_sets
= i
+ 1;
3790 ranges
.reader_wm_sets
[0].min_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3791 ranges
.reader_wm_sets
[ranges
.num_reader_wm_sets
- 1].max_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3794 ranges
.num_writer_wm_sets
= 1;
3796 ranges
.writer_wm_sets
[0].wm_inst
= 0;
3797 ranges
.writer_wm_sets
[0].min_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3798 ranges
.writer_wm_sets
[0].max_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3799 ranges
.writer_wm_sets
[0].min_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3800 ranges
.writer_wm_sets
[0].max_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3802 /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
3803 if (pool
->base
.pp_smu
->nv_funcs
.set_wm_ranges
)
3804 pool
->base
.pp_smu
->nv_funcs
.set_wm_ranges(&pool
->base
.pp_smu
->nv_funcs
.pp_smu
, &ranges
);
3807 init_data
.ctx
= dc
->ctx
;
3808 pool
->base
.irqs
= dal_irq_service_dcn20_create(&init_data
);
3809 if (!pool
->base
.irqs
)
3812 /* mem input -> ipp -> dpp -> opp -> TG */
3813 for (i
= 0; i
< pool
->base
.pipe_count
; i
++) {
3814 pool
->base
.hubps
[i
] = dcn20_hubp_create(ctx
, i
);
3815 if (pool
->base
.hubps
[i
] == NULL
) {
3816 BREAK_TO_DEBUGGER();
3818 "DC: failed to create memory input!\n");
3822 pool
->base
.ipps
[i
] = dcn20_ipp_create(ctx
, i
);
3823 if (pool
->base
.ipps
[i
] == NULL
) {
3824 BREAK_TO_DEBUGGER();
3826 "DC: failed to create input pixel processor!\n");
3830 pool
->base
.dpps
[i
] = dcn20_dpp_create(ctx
, i
);
3831 if (pool
->base
.dpps
[i
] == NULL
) {
3832 BREAK_TO_DEBUGGER();
3834 "DC: failed to create dpps!\n");
3838 for (i
= 0; i
< pool
->base
.res_cap
->num_ddc
; i
++) {
3839 pool
->base
.engines
[i
] = dcn20_aux_engine_create(ctx
, i
);
3840 if (pool
->base
.engines
[i
] == NULL
) {
3841 BREAK_TO_DEBUGGER();
3843 "DC:failed to create aux engine!!\n");
3846 pool
->base
.hw_i2cs
[i
] = dcn20_i2c_hw_create(ctx
, i
);
3847 if (pool
->base
.hw_i2cs
[i
] == NULL
) {
3848 BREAK_TO_DEBUGGER();
3850 "DC:failed to create hw i2c!!\n");
3853 pool
->base
.sw_i2cs
[i
] = NULL
;
3856 for (i
= 0; i
< pool
->base
.res_cap
->num_opp
; i
++) {
3857 pool
->base
.opps
[i
] = dcn20_opp_create(ctx
, i
);
3858 if (pool
->base
.opps
[i
] == NULL
) {
3859 BREAK_TO_DEBUGGER();
3861 "DC: failed to create output pixel processor!\n");
3866 for (i
= 0; i
< pool
->base
.res_cap
->num_timing_generator
; i
++) {
3867 pool
->base
.timing_generators
[i
] = dcn20_timing_generator_create(
3869 if (pool
->base
.timing_generators
[i
] == NULL
) {
3870 BREAK_TO_DEBUGGER();
3871 dm_error("DC: failed to create tg!\n");
3876 pool
->base
.timing_generator_count
= i
;
3878 pool
->base
.mpc
= dcn20_mpc_create(ctx
);
3879 if (pool
->base
.mpc
== NULL
) {
3880 BREAK_TO_DEBUGGER();
3881 dm_error("DC: failed to create mpc!\n");
3885 pool
->base
.hubbub
= dcn20_hubbub_create(ctx
);
3886 if (pool
->base
.hubbub
== NULL
) {
3887 BREAK_TO_DEBUGGER();
3888 dm_error("DC: failed to create hubbub!\n");
3892 for (i
= 0; i
< pool
->base
.res_cap
->num_dsc
; i
++) {
3893 pool
->base
.dscs
[i
] = dcn20_dsc_create(ctx
, i
);
3894 if (pool
->base
.dscs
[i
] == NULL
) {
3895 BREAK_TO_DEBUGGER();
3896 dm_error("DC: failed to create display stream compressor %d!\n", i
);
3901 if (!dcn20_dwbc_create(ctx
, &pool
->base
)) {
3902 BREAK_TO_DEBUGGER();
3903 dm_error("DC: failed to create dwbc!\n");
3906 if (!dcn20_mmhubbub_create(ctx
, &pool
->base
)) {
3907 BREAK_TO_DEBUGGER();
3908 dm_error("DC: failed to create mcif_wb!\n");
3912 if (!resource_construct(num_virtual_links
, dc
, &pool
->base
,
3913 (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
) ?
3914 &res_create_funcs
: &res_create_maximus_funcs
)))
3917 dcn20_hw_sequencer_construct(dc
);
3919 // IF NV12, set PG function pointer to NULL. It's not that
3920 // PG isn't supported for NV12, it's that we don't want to
3921 // program the registers because that will cause more power
3922 // to be consumed. We could have created dcn20_init_hw to get
3923 // the same effect by checking ASIC rev, but there was a
3924 // request at some point to not check ASIC rev on hw sequencer.
3925 if (ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
))
3926 dc
->hwseq
->funcs
.enable_power_gating_plane
= NULL
;
3928 dc
->caps
.max_planes
= pool
->base
.pipe_count
;
3930 for (i
= 0; i
< dc
->caps
.max_planes
; ++i
)
3931 dc
->caps
.planes
[i
] = plane_cap
;
3933 dc
->cap_funcs
= cap_funcs
;
3935 if (dc
->ctx
->dc_bios
->fw_info
.oem_i2c_present
) {
3936 ddc_init_data
.ctx
= dc
->ctx
;
3937 ddc_init_data
.link
= NULL
;
3938 ddc_init_data
.id
.id
= dc
->ctx
->dc_bios
->fw_info
.oem_i2c_obj_id
;
3939 ddc_init_data
.id
.enum_id
= 0;
3940 ddc_init_data
.id
.type
= OBJECT_TYPE_GENERIC
;
3941 pool
->base
.oem_device
= dal_ddc_service_create(&ddc_init_data
);
3943 pool
->base
.oem_device
= NULL
;
3952 dcn20_resource_destruct(pool
);
3957 struct resource_pool
*dcn20_create_resource_pool(
3958 const struct dc_init_data
*init_data
,
3961 struct dcn20_resource_pool
*pool
=
3962 kzalloc(sizeof(struct dcn20_resource_pool
), GFP_KERNEL
);
3967 if (dcn20_resource_construct(init_data
->num_virtual_links
, dc
, pool
))
3970 BREAK_TO_DEBUGGER();