2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/slab.h>
28 #include "dm_services.h"
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
43 #include "bios_parser_interface.h"
44 #include "include/irq_service_interface.h"
45 #include "transform.h"
48 #include "timing_generator.h"
50 #include "virtual/virtual_link_encoder.h"
52 #include "link_hwss.h"
53 #include "link_encoder.h"
55 #include "dc_link_ddc.h"
56 #include "dm_helpers.h"
57 #include "mem_input.h"
60 #include "dc_link_dp.h"
61 #include "dc_dmub_srv.h"
65 #include "vm_helper.h"
67 #include "dce/dce_i2c.h"
75 static const char DC_BUILD_ID
[] = "production-build";
80 * DC is the OS-agnostic component of the amdgpu DC driver.
82 * DC maintains and validates a set of structs representing the state of the
83 * driver and writes that state to AMD hardware
87 * struct dc - The central struct. One per driver. Created on driver load,
88 * destroyed on driver unload.
90 * struct dc_context - One per driver.
91 * Used as a backpointer by most other structs in dc.
93 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
94 * plugpoints). Created on driver load, destroyed on driver unload.
96 * struct dc_sink - One per display. Created on boot or hotplug.
97 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
98 * (the display directly attached). It may also have one or more remote
99 * sinks (in the Multi-Stream Transport case)
101 * struct resource_pool - One per driver. Represents the hw blocks not in the
102 * main pipeline. Not directly accessible by dm.
104 * Main dc state structs:
106 * These structs can be created and destroyed as needed. There is a full set of
107 * these structs in dc->current_state representing the currently programmed state.
109 * struct dc_state - The global DC state to track global state information,
110 * such as bandwidth values.
112 * struct dc_stream_state - Represents the hw configuration for the pipeline from
113 * a framebuffer to a display. Maps one-to-one with dc_sink.
115 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
116 * and may have more in the Multi-Plane Overlay case.
118 * struct resource_context - Represents the programmable state of everything in
119 * the resource_pool. Not directly accessible by dm.
121 * struct pipe_ctx - A member of struct resource_context. Represents the
122 * internal hardware pipeline components. Each dc_plane_state has either
123 * one or two (in the pipe-split case).
126 /*******************************************************************************
128 ******************************************************************************/
130 static inline void elevate_update_type(enum surface_update_type
*original
, enum surface_update_type
new)
136 static void destroy_links(struct dc
*dc
)
140 for (i
= 0; i
< dc
->link_count
; i
++) {
141 if (NULL
!= dc
->links
[i
])
142 link_destroy(&dc
->links
[i
]);
146 static bool create_links(
148 uint32_t num_virtual_links
)
152 struct dc_bios
*bios
= dc
->ctx
->dc_bios
;
156 connectors_num
= bios
->funcs
->get_connectors_number(bios
);
158 if (connectors_num
> ENUM_ID_COUNT
) {
160 "DC: Number of connectors %d exceeds maximum of %d!\n",
166 dm_output_to_console(
167 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
172 for (i
= 0; i
< connectors_num
; i
++) {
173 struct link_init_data link_init_params
= {0};
174 struct dc_link
*link
;
176 link_init_params
.ctx
= dc
->ctx
;
177 /* next BIOS object table connector */
178 link_init_params
.connector_index
= i
;
179 link_init_params
.link_index
= dc
->link_count
;
180 link_init_params
.dc
= dc
;
181 link
= link_create(&link_init_params
);
184 bool should_destory_link
= false;
186 if (link
->connector_signal
== SIGNAL_TYPE_EDP
) {
187 if (dc
->config
.edp_not_connected
)
188 should_destory_link
= true;
189 else if (dc
->debug
.remove_disconnect_edp
) {
190 enum dc_connection_type type
;
191 dc_link_detect_sink(link
, &type
);
192 if (type
== dc_connection_none
)
193 should_destory_link
= true;
197 if (dc
->config
.force_enum_edp
|| !should_destory_link
) {
198 dc
->links
[dc
->link_count
] = link
;
207 for (i
= 0; i
< num_virtual_links
; i
++) {
208 struct dc_link
*link
= kzalloc(sizeof(*link
), GFP_KERNEL
);
209 struct encoder_init_data enc_init
= {0};
216 link
->link_index
= dc
->link_count
;
217 dc
->links
[dc
->link_count
] = link
;
222 link
->connector_signal
= SIGNAL_TYPE_VIRTUAL
;
223 link
->link_id
.type
= OBJECT_TYPE_CONNECTOR
;
224 link
->link_id
.id
= CONNECTOR_ID_VIRTUAL
;
225 link
->link_id
.enum_id
= ENUM_ID_1
;
226 link
->link_enc
= kzalloc(sizeof(*link
->link_enc
), GFP_KERNEL
);
228 if (!link
->link_enc
) {
233 link
->link_status
.dpcd_caps
= &link
->dpcd_caps
;
235 enc_init
.ctx
= dc
->ctx
;
236 enc_init
.channel
= CHANNEL_ID_UNKNOWN
;
237 enc_init
.hpd_source
= HPD_SOURCEID_UNKNOWN
;
238 enc_init
.transmitter
= TRANSMITTER_UNKNOWN
;
239 enc_init
.connector
= link
->link_id
;
240 enc_init
.encoder
.type
= OBJECT_TYPE_ENCODER
;
241 enc_init
.encoder
.id
= ENCODER_ID_INTERNAL_VIRTUAL
;
242 enc_init
.encoder
.enum_id
= ENUM_ID_1
;
243 virtual_link_encoder_construct(link
->link_enc
, &enc_init
);
252 static struct dc_perf_trace
*dc_perf_trace_create(void)
254 return kzalloc(sizeof(struct dc_perf_trace
), GFP_KERNEL
);
257 static void dc_perf_trace_destroy(struct dc_perf_trace
**perf_trace
)
264 *****************************************************************************
265 * Function: dc_stream_adjust_vmin_vmax
268 * Looks up the pipe context of dc_stream_state and updates the
269 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
270 * Rate, which is a power-saving feature that targets reducing panel
271 * refresh rate while the screen is static
273 * @param [in] dc: dc reference
274 * @param [in] stream: Initial dc stream state
275 * @param [in] adjust: Updated parameters for vertical_total_min and
277 *****************************************************************************
279 bool dc_stream_adjust_vmin_vmax(struct dc
*dc
,
280 struct dc_stream_state
*stream
,
281 struct dc_crtc_timing_adjust
*adjust
)
286 stream
->adjust
= *adjust
;
288 for (i
= 0; i
< MAX_PIPES
; i
++) {
289 struct pipe_ctx
*pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
291 if (pipe
->stream
== stream
&& pipe
->stream_res
.tg
) {
292 dc
->hwss
.set_drr(&pipe
,
297 adjust
->v_total_mid_frame_num
);
305 bool dc_stream_get_crtc_position(struct dc
*dc
,
306 struct dc_stream_state
**streams
, int num_streams
,
307 unsigned int *v_pos
, unsigned int *nom_v_pos
)
309 /* TODO: Support multiple streams */
310 const struct dc_stream_state
*stream
= streams
[0];
313 struct crtc_position position
;
315 for (i
= 0; i
< MAX_PIPES
; i
++) {
316 struct pipe_ctx
*pipe
=
317 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
319 if (pipe
->stream
== stream
&& pipe
->stream_res
.stream_enc
) {
320 dc
->hwss
.get_position(&pipe
, 1, &position
);
322 *v_pos
= position
.vertical_count
;
323 *nom_v_pos
= position
.nominal_vcount
;
331 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
333 * @stream: The stream to configure CRC on.
334 * @enable: Enable CRC if true, disable otherwise.
335 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
338 * By default, only CRC0 is configured, and the entire frame is used to
341 bool dc_stream_configure_crc(struct dc
*dc
, struct dc_stream_state
*stream
,
342 bool enable
, bool continuous
)
345 struct pipe_ctx
*pipe
;
346 struct crc_params param
;
347 struct timing_generator
*tg
;
349 for (i
= 0; i
< MAX_PIPES
; i
++) {
350 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
351 if (pipe
->stream
== stream
)
354 /* Stream not found */
358 /* Always capture the full frame */
359 param
.windowa_x_start
= 0;
360 param
.windowa_y_start
= 0;
361 param
.windowa_x_end
= pipe
->stream
->timing
.h_addressable
;
362 param
.windowa_y_end
= pipe
->stream
->timing
.v_addressable
;
363 param
.windowb_x_start
= 0;
364 param
.windowb_y_start
= 0;
365 param
.windowb_x_end
= pipe
->stream
->timing
.h_addressable
;
366 param
.windowb_y_end
= pipe
->stream
->timing
.v_addressable
;
368 /* Default to the union of both windows */
369 param
.selection
= UNION_WINDOW_A_B
;
370 param
.continuous_mode
= continuous
;
371 param
.enable
= enable
;
373 tg
= pipe
->stream_res
.tg
;
375 /* Only call if supported */
376 if (tg
->funcs
->configure_crc
)
377 return tg
->funcs
->configure_crc(tg
, ¶m
);
378 DC_LOG_WARNING("CRC capture not supported.");
383 * dc_stream_get_crc() - Get CRC values for the given stream.
385 * @stream: The DC stream state of the stream to get CRCs from.
386 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
388 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
389 * Return false if stream is not found, or if CRCs are not enabled.
391 bool dc_stream_get_crc(struct dc
*dc
, struct dc_stream_state
*stream
,
392 uint32_t *r_cr
, uint32_t *g_y
, uint32_t *b_cb
)
395 struct pipe_ctx
*pipe
;
396 struct timing_generator
*tg
;
398 for (i
= 0; i
< MAX_PIPES
; i
++) {
399 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
400 if (pipe
->stream
== stream
)
403 /* Stream not found */
407 tg
= pipe
->stream_res
.tg
;
409 if (tg
->funcs
->get_crc
)
410 return tg
->funcs
->get_crc(tg
, r_cr
, g_y
, b_cb
);
411 DC_LOG_WARNING("CRC capture not supported.");
415 void dc_stream_set_dyn_expansion(struct dc
*dc
, struct dc_stream_state
*stream
,
416 enum dc_dynamic_expansion option
)
418 /* OPP FMT dyn expansion updates*/
420 struct pipe_ctx
*pipe_ctx
;
422 for (i
= 0; i
< MAX_PIPES
; i
++) {
423 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
425 pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
426 pipe_ctx
->stream_res
.opp
->dyn_expansion
= option
;
427 pipe_ctx
->stream_res
.opp
->funcs
->opp_set_dyn_expansion(
428 pipe_ctx
->stream_res
.opp
,
429 COLOR_SPACE_YCBCR601
,
430 stream
->timing
.display_color_depth
,
436 void dc_stream_set_dither_option(struct dc_stream_state
*stream
,
437 enum dc_dither_option option
)
439 struct bit_depth_reduction_params params
;
440 struct dc_link
*link
= stream
->link
;
441 struct pipe_ctx
*pipes
= NULL
;
444 for (i
= 0; i
< MAX_PIPES
; i
++) {
445 if (link
->dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
==
447 pipes
= &link
->dc
->current_state
->res_ctx
.pipe_ctx
[i
];
454 if (option
> DITHER_OPTION_MAX
)
457 stream
->dither_option
= option
;
459 memset(¶ms
, 0, sizeof(params
));
460 resource_build_bit_depth_reduction_params(stream
, ¶ms
);
461 stream
->bit_depth_params
= params
;
463 if (pipes
->plane_res
.xfm
&&
464 pipes
->plane_res
.xfm
->funcs
->transform_set_pixel_storage_depth
) {
465 pipes
->plane_res
.xfm
->funcs
->transform_set_pixel_storage_depth(
466 pipes
->plane_res
.xfm
,
467 pipes
->plane_res
.scl_data
.lb_params
.depth
,
468 &stream
->bit_depth_params
);
471 pipes
->stream_res
.opp
->funcs
->
472 opp_program_bit_depth_reduction(pipes
->stream_res
.opp
, ¶ms
);
475 bool dc_stream_set_gamut_remap(struct dc
*dc
, const struct dc_stream_state
*stream
)
479 struct pipe_ctx
*pipes
;
481 for (i
= 0; i
< MAX_PIPES
; i
++) {
482 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
== stream
) {
483 pipes
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
484 dc
->hwss
.program_gamut_remap(pipes
);
492 bool dc_stream_program_csc_matrix(struct dc
*dc
, struct dc_stream_state
*stream
)
496 struct pipe_ctx
*pipes
;
498 for (i
= 0; i
< MAX_PIPES
; i
++) {
499 if (dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
502 pipes
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
503 dc
->hwss
.program_output_csc(dc
,
505 stream
->output_color_space
,
506 stream
->csc_color_matrix
.matrix
,
507 pipes
->stream_res
.opp
->inst
);
515 void dc_stream_set_static_screen_params(struct dc
*dc
,
516 struct dc_stream_state
**streams
,
518 const struct dc_static_screen_params
*params
)
522 struct pipe_ctx
*pipes_affected
[MAX_PIPES
];
523 int num_pipes_affected
= 0;
525 for (i
= 0; i
< num_streams
; i
++) {
526 struct dc_stream_state
*stream
= streams
[i
];
528 for (j
= 0; j
< MAX_PIPES
; j
++) {
529 if (dc
->current_state
->res_ctx
.pipe_ctx
[j
].stream
531 pipes_affected
[num_pipes_affected
++] =
532 &dc
->current_state
->res_ctx
.pipe_ctx
[j
];
537 dc
->hwss
.set_static_screen_control(pipes_affected
, num_pipes_affected
, params
);
540 static void dc_destruct(struct dc
*dc
)
542 if (dc
->current_state
) {
543 dc_release_state(dc
->current_state
);
544 dc
->current_state
= NULL
;
550 dc_destroy_clk_mgr(dc
->clk_mgr
);
554 dc_destroy_resource_pool(dc
);
556 if (dc
->ctx
->gpio_service
)
557 dal_gpio_service_destroy(&dc
->ctx
->gpio_service
);
559 if (dc
->ctx
->created_bios
)
560 dal_bios_parser_destroy(&dc
->ctx
->dc_bios
);
562 dc_perf_trace_destroy(&dc
->ctx
->perf_trace
);
573 #ifdef CONFIG_DRM_AMD_DC_DCN
581 kfree(dc
->vm_helper
);
582 dc
->vm_helper
= NULL
;
586 static bool dc_construct_ctx(struct dc
*dc
,
587 const struct dc_init_data
*init_params
)
589 struct dc_context
*dc_ctx
;
590 enum dce_version dc_version
= DCE_VERSION_UNKNOWN
;
592 dc_ctx
= kzalloc(sizeof(*dc_ctx
), GFP_KERNEL
);
596 dc_ctx
->cgs_device
= init_params
->cgs_device
;
597 dc_ctx
->driver_context
= init_params
->driver
;
599 dc_ctx
->asic_id
= init_params
->asic_id
;
600 dc_ctx
->dc_sink_id_count
= 0;
601 dc_ctx
->dc_stream_id_count
= 0;
602 dc_ctx
->dce_environment
= init_params
->dce_environment
;
606 dc_version
= resource_parse_asic_id(init_params
->asic_id
);
607 dc_ctx
->dce_version
= dc_version
;
609 dc_ctx
->perf_trace
= dc_perf_trace_create();
610 if (!dc_ctx
->perf_trace
) {
611 ASSERT_CRITICAL(false);
620 static bool dc_construct(struct dc
*dc
,
621 const struct dc_init_data
*init_params
)
623 struct dc_context
*dc_ctx
;
624 struct bw_calcs_dceip
*dc_dceip
;
625 struct bw_calcs_vbios
*dc_vbios
;
626 #ifdef CONFIG_DRM_AMD_DC_DCN
627 struct dcn_soc_bounding_box
*dcn_soc
;
628 struct dcn_ip_params
*dcn_ip
;
631 dc
->config
= init_params
->flags
;
633 // Allocate memory for the vm_helper
634 dc
->vm_helper
= kzalloc(sizeof(struct vm_helper
), GFP_KERNEL
);
635 if (!dc
->vm_helper
) {
636 dm_error("%s: failed to create dc->vm_helper\n", __func__
);
640 memcpy(&dc
->bb_overrides
, &init_params
->bb_overrides
, sizeof(dc
->bb_overrides
));
642 dc_dceip
= kzalloc(sizeof(*dc_dceip
), GFP_KERNEL
);
644 dm_error("%s: failed to create dceip\n", __func__
);
648 dc
->bw_dceip
= dc_dceip
;
650 dc_vbios
= kzalloc(sizeof(*dc_vbios
), GFP_KERNEL
);
652 dm_error("%s: failed to create vbios\n", __func__
);
656 dc
->bw_vbios
= dc_vbios
;
657 #ifdef CONFIG_DRM_AMD_DC_DCN
658 dcn_soc
= kzalloc(sizeof(*dcn_soc
), GFP_KERNEL
);
660 dm_error("%s: failed to create dcn_soc\n", __func__
);
664 dc
->dcn_soc
= dcn_soc
;
666 dcn_ip
= kzalloc(sizeof(*dcn_ip
), GFP_KERNEL
);
668 dm_error("%s: failed to create dcn_ip\n", __func__
);
673 dc
->soc_bounding_box
= init_params
->soc_bounding_box
;
676 if (!dc_construct_ctx(dc
, init_params
)) {
677 dm_error("%s: failed to create ctx\n", __func__
);
683 /* Resource should construct all asic specific resources.
684 * This should be the only place where we need to parse the asic id
686 if (init_params
->vbios_override
)
687 dc_ctx
->dc_bios
= init_params
->vbios_override
;
689 /* Create BIOS parser */
690 struct bp_init_data bp_init_data
;
692 bp_init_data
.ctx
= dc_ctx
;
693 bp_init_data
.bios
= init_params
->asic_id
.atombios_base_address
;
695 dc_ctx
->dc_bios
= dal_bios_parser_create(
696 &bp_init_data
, dc_ctx
->dce_version
);
698 if (!dc_ctx
->dc_bios
) {
699 ASSERT_CRITICAL(false);
703 dc_ctx
->created_bios
= true;
706 dc
->vendor_signature
= init_params
->vendor_signature
;
708 /* Create GPIO service */
709 dc_ctx
->gpio_service
= dal_gpio_service_create(
711 dc_ctx
->dce_environment
,
714 if (!dc_ctx
->gpio_service
) {
715 ASSERT_CRITICAL(false);
719 dc
->res_pool
= dc_create_resource_pool(dc
, init_params
, dc_ctx
->dce_version
);
723 dc
->clk_mgr
= dc_clk_mgr_create(dc
->ctx
, dc
->res_pool
->pp_smu
, dc
->res_pool
->dccg
);
727 if (dc
->res_pool
->funcs
->update_bw_bounding_box
)
728 dc
->res_pool
->funcs
->update_bw_bounding_box(dc
, dc
->clk_mgr
->bw_params
);
730 /* Creation of current_state must occur after dc->dml
731 * is initialized in dc_create_resource_pool because
732 * on creation it copies the contents of dc->dml
735 dc
->current_state
= dc_create_state(dc
);
737 if (!dc
->current_state
) {
738 dm_error("%s: failed to create validate ctx\n", __func__
);
742 dc_resource_state_construct(dc
, dc
->current_state
);
744 if (!create_links(dc
, init_params
->num_virtual_links
))
753 static bool disable_all_writeback_pipes_for_stream(
755 struct dc_stream_state
*stream
,
756 struct dc_state
*context
)
760 for (i
= 0; i
< stream
->num_wb_info
; i
++)
761 stream
->writeback_info
[i
].wb_enabled
= false;
766 void apply_ctx_interdependent_lock(struct dc
*dc
, struct dc_state
*context
, struct dc_stream_state
*stream
, bool lock
)
770 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
771 if (dc
->hwss
.interdependent_update_lock
)
772 dc
->hwss
.interdependent_update_lock(dc
, context
, lock
);
774 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
775 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
776 struct pipe_ctx
*old_pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
778 // Copied conditions that were previously in dce110_apply_ctx_for_surface
779 if (stream
== pipe_ctx
->stream
) {
780 if (!pipe_ctx
->top_pipe
&&
781 (pipe_ctx
->plane_state
|| old_pipe_ctx
->plane_state
))
782 dc
->hwss
.pipe_control_lock(dc
, pipe_ctx
, lock
);
788 static void disable_dangling_plane(struct dc
*dc
, struct dc_state
*context
)
791 struct dc_state
*dangling_context
= dc_create_state(dc
);
792 struct dc_state
*current_ctx
;
794 if (dangling_context
== NULL
)
797 dc_resource_state_copy_construct(dc
->current_state
, dangling_context
);
799 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
800 struct dc_stream_state
*old_stream
=
801 dc
->current_state
->res_ctx
.pipe_ctx
[i
].stream
;
802 bool should_disable
= true;
804 for (j
= 0; j
< context
->stream_count
; j
++) {
805 if (old_stream
== context
->streams
[j
]) {
806 should_disable
= false;
810 if (should_disable
&& old_stream
) {
811 dc_rem_all_planes_for_stream(dc
, old_stream
, dangling_context
);
812 disable_all_writeback_pipes_for_stream(dc
, old_stream
, dangling_context
);
814 if (dc
->hwss
.apply_ctx_for_surface
) {
815 apply_ctx_interdependent_lock(dc
, dc
->current_state
, old_stream
, true);
816 dc
->hwss
.apply_ctx_for_surface(dc
, old_stream
, 0, dangling_context
);
817 apply_ctx_interdependent_lock(dc
, dc
->current_state
, old_stream
, false);
818 dc
->hwss
.post_unlock_program_front_end(dc
, dangling_context
);
820 if (dc
->hwss
.program_front_end_for_ctx
) {
821 dc
->hwss
.interdependent_update_lock(dc
, dc
->current_state
, true);
822 dc
->hwss
.program_front_end_for_ctx(dc
, dangling_context
);
823 dc
->hwss
.interdependent_update_lock(dc
, dc
->current_state
, false);
824 dc
->hwss
.post_unlock_program_front_end(dc
, dangling_context
);
829 current_ctx
= dc
->current_state
;
830 dc
->current_state
= dangling_context
;
831 dc_release_state(current_ctx
);
834 static void wait_for_no_pipes_pending(struct dc
*dc
, struct dc_state
*context
)
838 struct pipe_ctx
*pipe
;
840 for (i
= 0; i
< MAX_PIPES
; i
++) {
841 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
843 if (!pipe
->plane_state
)
847 while (count
< 100000) {
848 /* Must set to false to start with, due to OR in update function */
849 pipe
->plane_state
->status
.is_flip_pending
= false;
850 dc
->hwss
.update_pending_status(pipe
);
851 if (!pipe
->plane_state
->status
.is_flip_pending
)
856 ASSERT(!pipe
->plane_state
->status
.is_flip_pending
);
861 /*******************************************************************************
863 ******************************************************************************/
865 struct dc
*dc_create(const struct dc_init_data
*init_params
)
867 struct dc
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
868 unsigned int full_pipe_count
;
873 if (init_params
->dce_environment
== DCE_ENV_VIRTUAL_HW
) {
874 if (false == dc_construct_ctx(dc
, init_params
)) {
879 if (false == dc_construct(dc
, init_params
)) {
884 full_pipe_count
= dc
->res_pool
->pipe_count
;
885 if (dc
->res_pool
->underlay_pipe_index
!= NO_UNDERLAY_PIPE
)
887 dc
->caps
.max_streams
= min(
889 dc
->res_pool
->stream_enc_count
);
891 dc
->optimize_seamless_boot_streams
= 0;
892 dc
->caps
.max_links
= dc
->link_count
;
893 dc
->caps
.max_audios
= dc
->res_pool
->audio_count
;
894 dc
->caps
.linear_pitch_alignment
= 64;
896 dc
->caps
.max_dp_protocol_version
= DP_VERSION_1_4
;
898 if (dc
->res_pool
->dmcu
!= NULL
)
899 dc
->versions
.dmcu_version
= dc
->res_pool
->dmcu
->dmcu_version
;
902 /* Populate versioning information */
903 dc
->versions
.dc_ver
= DC_VER
;
905 dc
->build_id
= DC_BUILD_ID
;
907 DC_LOG_DC("Display Core initialized\n");
920 void dc_hardware_init(struct dc
*dc
)
922 if (dc
->ctx
->dce_environment
!= DCE_ENV_VIRTUAL_HW
)
923 dc
->hwss
.init_hw(dc
);
926 void dc_init_callbacks(struct dc
*dc
,
927 const struct dc_callback_init
*init_params
)
929 #ifdef CONFIG_DRM_AMD_DC_HDCP
930 dc
->ctx
->cp_psp
= init_params
->cp_psp
;
934 void dc_deinit_callbacks(struct dc
*dc
)
936 #ifdef CONFIG_DRM_AMD_DC_HDCP
937 memset(&dc
->ctx
->cp_psp
, 0, sizeof(dc
->ctx
->cp_psp
));
941 void dc_destroy(struct dc
**dc
)
948 static void enable_timing_multisync(
950 struct dc_state
*ctx
)
952 int i
= 0, multisync_count
= 0;
953 int pipe_count
= dc
->res_pool
->pipe_count
;
954 struct pipe_ctx
*multisync_pipes
[MAX_PIPES
] = { NULL
};
956 for (i
= 0; i
< pipe_count
; i
++) {
957 if (!ctx
->res_ctx
.pipe_ctx
[i
].stream
||
958 !ctx
->res_ctx
.pipe_ctx
[i
].stream
->triggered_crtc_reset
.enabled
)
960 if (ctx
->res_ctx
.pipe_ctx
[i
].stream
== ctx
->res_ctx
.pipe_ctx
[i
].stream
->triggered_crtc_reset
.event_source
)
962 multisync_pipes
[multisync_count
] = &ctx
->res_ctx
.pipe_ctx
[i
];
966 if (multisync_count
> 0) {
967 dc
->hwss
.enable_per_frame_crtc_position_reset(
968 dc
, multisync_count
, multisync_pipes
);
972 static void program_timing_sync(
974 struct dc_state
*ctx
)
979 int pipe_count
= dc
->res_pool
->pipe_count
;
980 struct pipe_ctx
*unsynced_pipes
[MAX_PIPES
] = { NULL
};
982 for (i
= 0; i
< pipe_count
; i
++) {
983 if (!ctx
->res_ctx
.pipe_ctx
[i
].stream
|| ctx
->res_ctx
.pipe_ctx
[i
].top_pipe
)
986 unsynced_pipes
[i
] = &ctx
->res_ctx
.pipe_ctx
[i
];
989 for (i
= 0; i
< pipe_count
; i
++) {
991 struct pipe_ctx
*pipe_set
[MAX_PIPES
];
993 if (!unsynced_pipes
[i
])
996 pipe_set
[0] = unsynced_pipes
[i
];
997 unsynced_pipes
[i
] = NULL
;
999 /* Add tg to the set, search rest of the tg's for ones with
1000 * same timing, add all tgs with same timing to the group
1002 for (j
= i
+ 1; j
< pipe_count
; j
++) {
1003 if (!unsynced_pipes
[j
])
1006 if (resource_are_streams_timing_synchronizable(
1007 unsynced_pipes
[j
]->stream
,
1008 pipe_set
[0]->stream
)) {
1009 pipe_set
[group_size
] = unsynced_pipes
[j
];
1010 unsynced_pipes
[j
] = NULL
;
1015 /* set first pipe with plane as master */
1016 for (j
= 0; j
< group_size
; j
++) {
1017 if (pipe_set
[j
]->plane_state
) {
1021 swap(pipe_set
[0], pipe_set
[j
]);
1027 for (k
= 0; k
< group_size
; k
++) {
1028 struct dc_stream_status
*status
= dc_stream_get_status_from_state(ctx
, pipe_set
[k
]->stream
);
1030 status
->timing_sync_info
.group_id
= num_group
;
1031 status
->timing_sync_info
.group_size
= group_size
;
1033 status
->timing_sync_info
.master
= true;
1035 status
->timing_sync_info
.master
= false;
1038 /* remove any other pipes with plane as they have already been synced */
1039 for (j
= j
+ 1; j
< group_size
; j
++) {
1040 if (pipe_set
[j
]->plane_state
) {
1042 pipe_set
[j
] = pipe_set
[group_size
];
1047 if (group_size
> 1) {
1048 dc
->hwss
.enable_timing_synchronization(
1049 dc
, group_index
, group_size
, pipe_set
);
1056 static bool context_changed(
1058 struct dc_state
*context
)
1062 if (context
->stream_count
!= dc
->current_state
->stream_count
)
1065 for (i
= 0; i
< dc
->current_state
->stream_count
; i
++) {
1066 if (dc
->current_state
->streams
[i
] != context
->streams
[i
])
1073 bool dc_validate_seamless_boot_timing(const struct dc
*dc
,
1074 const struct dc_sink
*sink
,
1075 struct dc_crtc_timing
*crtc_timing
)
1077 struct timing_generator
*tg
;
1078 struct stream_encoder
*se
= NULL
;
1080 struct dc_crtc_timing hw_crtc_timing
= {0};
1082 struct dc_link
*link
= sink
->link
;
1083 unsigned int i
, enc_inst
, tg_inst
= 0;
1085 // Seamless port only support single DP and EDP so far
1086 if (sink
->sink_signal
!= SIGNAL_TYPE_DISPLAY_PORT
&&
1087 sink
->sink_signal
!= SIGNAL_TYPE_EDP
)
1090 /* Check for enabled DIG to identify enabled display */
1091 if (!link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
))
1094 enc_inst
= link
->link_enc
->funcs
->get_dig_frontend(link
->link_enc
);
1096 if (enc_inst
== ENGINE_ID_UNKNOWN
)
1099 for (i
= 0; i
< dc
->res_pool
->stream_enc_count
; i
++) {
1100 if (dc
->res_pool
->stream_enc
[i
]->id
== enc_inst
) {
1102 se
= dc
->res_pool
->stream_enc
[i
];
1104 tg_inst
= dc
->res_pool
->stream_enc
[i
]->funcs
->dig_source_otg(
1105 dc
->res_pool
->stream_enc
[i
]);
1110 // tg_inst not found
1111 if (i
== dc
->res_pool
->stream_enc_count
)
1114 if (tg_inst
>= dc
->res_pool
->timing_generator_count
)
1117 tg
= dc
->res_pool
->timing_generators
[tg_inst
];
1119 if (!tg
->funcs
->get_hw_timing
)
1122 if (!tg
->funcs
->get_hw_timing(tg
, &hw_crtc_timing
))
1125 if (crtc_timing
->h_total
!= hw_crtc_timing
.h_total
)
1128 if (crtc_timing
->h_border_left
!= hw_crtc_timing
.h_border_left
)
1131 if (crtc_timing
->h_addressable
!= hw_crtc_timing
.h_addressable
)
1134 if (crtc_timing
->h_border_right
!= hw_crtc_timing
.h_border_right
)
1137 if (crtc_timing
->h_front_porch
!= hw_crtc_timing
.h_front_porch
)
1140 if (crtc_timing
->h_sync_width
!= hw_crtc_timing
.h_sync_width
)
1143 if (crtc_timing
->v_total
!= hw_crtc_timing
.v_total
)
1146 if (crtc_timing
->v_border_top
!= hw_crtc_timing
.v_border_top
)
1149 if (crtc_timing
->v_addressable
!= hw_crtc_timing
.v_addressable
)
1152 if (crtc_timing
->v_border_bottom
!= hw_crtc_timing
.v_border_bottom
)
1155 if (crtc_timing
->v_front_porch
!= hw_crtc_timing
.v_front_porch
)
1158 if (crtc_timing
->v_sync_width
!= hw_crtc_timing
.v_sync_width
)
1161 if (dc_is_dp_signal(link
->connector_signal
)) {
1162 unsigned int pix_clk_100hz
;
1164 dc
->res_pool
->dp_clock_source
->funcs
->get_pixel_clk_frequency_100hz(
1165 dc
->res_pool
->dp_clock_source
,
1166 tg_inst
, &pix_clk_100hz
);
1168 if (crtc_timing
->pix_clk_100hz
!= pix_clk_100hz
)
1171 if (!se
->funcs
->dp_get_pixel_format
)
1174 if (!se
->funcs
->dp_get_pixel_format(
1176 &hw_crtc_timing
.pixel_encoding
,
1177 &hw_crtc_timing
.display_color_depth
))
1180 if (hw_crtc_timing
.display_color_depth
!= crtc_timing
->display_color_depth
)
1183 if (hw_crtc_timing
.pixel_encoding
!= crtc_timing
->pixel_encoding
)
1190 bool dc_enable_stereo(
1192 struct dc_state
*context
,
1193 struct dc_stream_state
*streams
[],
1194 uint8_t stream_count
)
1198 struct pipe_ctx
*pipe
;
1200 for (i
= 0; i
< MAX_PIPES
; i
++) {
1201 if (context
!= NULL
)
1202 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
1204 pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1205 for (j
= 0 ; pipe
&& j
< stream_count
; j
++) {
1206 if (streams
[j
] && streams
[j
] == pipe
->stream
&&
1207 dc
->hwss
.setup_stereo
)
1208 dc
->hwss
.setup_stereo(pipe
, dc
);
1216 * Applies given context to HW and copy it into current context.
1217 * It's up to the user to release the src context afterwards.
1219 static enum dc_status
dc_commit_state_no_check(struct dc
*dc
, struct dc_state
*context
)
1221 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
1222 enum dc_status result
= DC_ERROR_UNEXPECTED
;
1223 struct pipe_ctx
*pipe
;
1225 struct dc_stream_state
*dc_streams
[MAX_STREAMS
] = {0};
1227 disable_dangling_plane(dc
, context
);
1229 for (i
= 0; i
< context
->stream_count
; i
++)
1230 dc_streams
[i
] = context
->streams
[i
];
1232 if (!dcb
->funcs
->is_accelerated_mode(dcb
))
1233 dc
->hwss
.enable_accelerated_mode(dc
, context
);
1235 for (i
= 0; i
< context
->stream_count
; i
++) {
1236 if (context
->streams
[i
]->apply_seamless_boot_optimization
)
1237 dc
->optimize_seamless_boot_streams
++;
1240 if (dc
->optimize_seamless_boot_streams
== 0)
1241 dc
->hwss
.prepare_bandwidth(dc
, context
);
1243 /* re-program planes for existing stream, in case we need to
1244 * free up plane resource for later use
1246 if (dc
->hwss
.apply_ctx_for_surface
) {
1247 for (i
= 0; i
< context
->stream_count
; i
++) {
1248 if (context
->streams
[i
]->mode_changed
)
1250 apply_ctx_interdependent_lock(dc
, context
, context
->streams
[i
], true);
1251 dc
->hwss
.apply_ctx_for_surface(
1252 dc
, context
->streams
[i
],
1253 context
->stream_status
[i
].plane_count
,
1254 context
); /* use new pipe config in new context */
1255 apply_ctx_interdependent_lock(dc
, context
, context
->streams
[i
], false);
1256 dc
->hwss
.post_unlock_program_front_end(dc
, context
);
1260 /* Program hardware */
1261 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1262 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
1263 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe
);
1266 result
= dc
->hwss
.apply_ctx_to_hw(dc
, context
);
1268 if (result
!= DC_OK
)
1271 if (context
->stream_count
> 1 && !dc
->debug
.disable_timing_sync
) {
1272 enable_timing_multisync(dc
, context
);
1273 program_timing_sync(dc
, context
);
1276 /* Program all planes within new context*/
1277 if (dc
->hwss
.program_front_end_for_ctx
) {
1278 dc
->hwss
.interdependent_update_lock(dc
, context
, true);
1279 dc
->hwss
.program_front_end_for_ctx(dc
, context
);
1280 dc
->hwss
.interdependent_update_lock(dc
, context
, false);
1281 dc
->hwss
.post_unlock_program_front_end(dc
, context
);
1283 for (i
= 0; i
< context
->stream_count
; i
++) {
1284 const struct dc_link
*link
= context
->streams
[i
]->link
;
1286 if (!context
->streams
[i
]->mode_changed
)
1289 if (dc
->hwss
.apply_ctx_for_surface
) {
1290 apply_ctx_interdependent_lock(dc
, context
, context
->streams
[i
], true);
1291 dc
->hwss
.apply_ctx_for_surface(
1292 dc
, context
->streams
[i
],
1293 context
->stream_status
[i
].plane_count
,
1295 apply_ctx_interdependent_lock(dc
, context
, context
->streams
[i
], false);
1296 dc
->hwss
.post_unlock_program_front_end(dc
, context
);
1301 * TODO rework dc_enable_stereo call to work with validation sets?
1303 for (k
= 0; k
< MAX_PIPES
; k
++) {
1304 pipe
= &context
->res_ctx
.pipe_ctx
[k
];
1306 for (l
= 0 ; pipe
&& l
< context
->stream_count
; l
++) {
1307 if (context
->streams
[l
] &&
1308 context
->streams
[l
] == pipe
->stream
&&
1309 dc
->hwss
.setup_stereo
)
1310 dc
->hwss
.setup_stereo(pipe
, dc
);
1314 CONN_MSG_MODE(link
, "{%dx%d, %dx%d@%dKhz}",
1315 context
->streams
[i
]->timing
.h_addressable
,
1316 context
->streams
[i
]->timing
.v_addressable
,
1317 context
->streams
[i
]->timing
.h_total
,
1318 context
->streams
[i
]->timing
.v_total
,
1319 context
->streams
[i
]->timing
.pix_clk_100hz
/ 10);
1322 dc_enable_stereo(dc
, context
, dc_streams
, context
->stream_count
);
1324 if (dc
->optimize_seamless_boot_streams
== 0) {
1325 /* Must wait for no flips to be pending before doing optimize bw */
1326 wait_for_no_pipes_pending(dc
, context
);
1327 /* pplib is notified if disp_num changed */
1328 dc
->hwss
.optimize_bandwidth(dc
, context
);
1331 for (i
= 0; i
< context
->stream_count
; i
++)
1332 context
->streams
[i
]->mode_changed
= false;
1334 dc_release_state(dc
->current_state
);
1336 dc
->current_state
= context
;
1338 dc_retain_state(dc
->current_state
);
1343 bool dc_commit_state(struct dc
*dc
, struct dc_state
*context
)
1345 enum dc_status result
= DC_ERROR_UNEXPECTED
;
1348 if (false == context_changed(dc
, context
))
1351 DC_LOG_DC("%s: %d streams\n",
1352 __func__
, context
->stream_count
);
1354 for (i
= 0; i
< context
->stream_count
; i
++) {
1355 struct dc_stream_state
*stream
= context
->streams
[i
];
1357 dc_stream_log(dc
, stream
);
1360 result
= dc_commit_state_no_check(dc
, context
);
1362 return (result
== DC_OK
);
1365 static bool is_flip_pending_in_pipes(struct dc
*dc
, struct dc_state
*context
)
1368 struct pipe_ctx
*pipe
;
1370 for (i
= 0; i
< MAX_PIPES
; i
++) {
1371 pipe
= &context
->res_ctx
.pipe_ctx
[i
];
1373 if (!pipe
->plane_state
)
1376 /* Must set to false to start with, due to OR in update function */
1377 pipe
->plane_state
->status
.is_flip_pending
= false;
1378 dc
->hwss
.update_pending_status(pipe
);
1379 if (pipe
->plane_state
->status
.is_flip_pending
)
1385 bool dc_post_update_surfaces_to_stream(struct dc
*dc
)
1388 struct dc_state
*context
= dc
->current_state
;
1390 if ((!dc
->optimized_required
) || dc
->optimize_seamless_boot_streams
> 0)
1393 post_surface_trace(dc
);
1395 if (is_flip_pending_in_pipes(dc
, context
))
1398 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
1399 if (context
->res_ctx
.pipe_ctx
[i
].stream
== NULL
||
1400 context
->res_ctx
.pipe_ctx
[i
].plane_state
== NULL
) {
1401 context
->res_ctx
.pipe_ctx
[i
].pipe_idx
= i
;
1402 dc
->hwss
.disable_plane(dc
, &context
->res_ctx
.pipe_ctx
[i
]);
1405 dc
->hwss
.optimize_bandwidth(dc
, context
);
1407 dc
->optimized_required
= false;
1408 dc
->wm_optimized_required
= false;
1413 struct dc_state
*dc_create_state(struct dc
*dc
)
1415 struct dc_state
*context
= kvzalloc(sizeof(struct dc_state
),
1420 /* Each context must have their own instance of VBA and in order to
1421 * initialize and obtain IP and SOC the base DML instance from DC is
1422 * initially copied into every context
1424 #ifdef CONFIG_DRM_AMD_DC_DCN
1425 memcpy(&context
->bw_ctx
.dml
, &dc
->dml
, sizeof(struct display_mode_lib
));
1428 kref_init(&context
->refcount
);
1433 struct dc_state
*dc_copy_state(struct dc_state
*src_ctx
)
1436 struct dc_state
*new_ctx
= kvmalloc(sizeof(struct dc_state
), GFP_KERNEL
);
1440 memcpy(new_ctx
, src_ctx
, sizeof(struct dc_state
));
1442 for (i
= 0; i
< MAX_PIPES
; i
++) {
1443 struct pipe_ctx
*cur_pipe
= &new_ctx
->res_ctx
.pipe_ctx
[i
];
1445 if (cur_pipe
->top_pipe
)
1446 cur_pipe
->top_pipe
= &new_ctx
->res_ctx
.pipe_ctx
[cur_pipe
->top_pipe
->pipe_idx
];
1448 if (cur_pipe
->bottom_pipe
)
1449 cur_pipe
->bottom_pipe
= &new_ctx
->res_ctx
.pipe_ctx
[cur_pipe
->bottom_pipe
->pipe_idx
];
1451 if (cur_pipe
->prev_odm_pipe
)
1452 cur_pipe
->prev_odm_pipe
= &new_ctx
->res_ctx
.pipe_ctx
[cur_pipe
->prev_odm_pipe
->pipe_idx
];
1454 if (cur_pipe
->next_odm_pipe
)
1455 cur_pipe
->next_odm_pipe
= &new_ctx
->res_ctx
.pipe_ctx
[cur_pipe
->next_odm_pipe
->pipe_idx
];
1459 for (i
= 0; i
< new_ctx
->stream_count
; i
++) {
1460 dc_stream_retain(new_ctx
->streams
[i
]);
1461 for (j
= 0; j
< new_ctx
->stream_status
[i
].plane_count
; j
++)
1462 dc_plane_state_retain(
1463 new_ctx
->stream_status
[i
].plane_states
[j
]);
1466 kref_init(&new_ctx
->refcount
);
1471 void dc_retain_state(struct dc_state
*context
)
1473 kref_get(&context
->refcount
);
1476 static void dc_state_free(struct kref
*kref
)
1478 struct dc_state
*context
= container_of(kref
, struct dc_state
, refcount
);
1479 dc_resource_state_destruct(context
);
1483 void dc_release_state(struct dc_state
*context
)
1485 kref_put(&context
->refcount
, dc_state_free
);
1488 bool dc_set_generic_gpio_for_stereo(bool enable
,
1489 struct gpio_service
*gpio_service
)
1491 enum gpio_result gpio_result
= GPIO_RESULT_NON_SPECIFIC_ERROR
;
1492 struct gpio_pin_info pin_info
;
1493 struct gpio
*generic
;
1494 struct gpio_generic_mux_config
*config
= kzalloc(sizeof(struct gpio_generic_mux_config
),
1499 pin_info
= dal_gpio_get_generic_pin_info(gpio_service
, GPIO_ID_GENERIC
, 0);
1501 if (pin_info
.mask
== 0xFFFFFFFF || pin_info
.offset
== 0xFFFFFFFF) {
1505 generic
= dal_gpio_service_create_generic_mux(
1516 gpio_result
= dal_gpio_open(generic
, GPIO_MODE_OUTPUT
);
1518 config
->enable_output_from_mux
= enable
;
1519 config
->mux_select
= GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC
;
1521 if (gpio_result
== GPIO_RESULT_OK
)
1522 gpio_result
= dal_mux_setup_config(generic
, config
);
1524 if (gpio_result
== GPIO_RESULT_OK
) {
1525 dal_gpio_close(generic
);
1526 dal_gpio_destroy_generic_mux(&generic
);
1530 dal_gpio_close(generic
);
1531 dal_gpio_destroy_generic_mux(&generic
);
1537 static bool is_surface_in_context(
1538 const struct dc_state
*context
,
1539 const struct dc_plane_state
*plane_state
)
1543 for (j
= 0; j
< MAX_PIPES
; j
++) {
1544 const struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
1546 if (plane_state
== pipe_ctx
->plane_state
) {
1554 static enum surface_update_type
get_plane_info_update_type(const struct dc_surface_update
*u
)
1556 union surface_update_flags
*update_flags
= &u
->surface
->update_flags
;
1557 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
1560 return UPDATE_TYPE_FAST
;
1562 if (u
->plane_info
->color_space
!= u
->surface
->color_space
) {
1563 update_flags
->bits
.color_space_change
= 1;
1564 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1567 if (u
->plane_info
->horizontal_mirror
!= u
->surface
->horizontal_mirror
) {
1568 update_flags
->bits
.horizontal_mirror_change
= 1;
1569 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1572 if (u
->plane_info
->rotation
!= u
->surface
->rotation
) {
1573 update_flags
->bits
.rotation_change
= 1;
1574 elevate_update_type(&update_type
, UPDATE_TYPE_FULL
);
1577 if (u
->plane_info
->format
!= u
->surface
->format
) {
1578 update_flags
->bits
.pixel_format_change
= 1;
1579 elevate_update_type(&update_type
, UPDATE_TYPE_FULL
);
1582 if (u
->plane_info
->stereo_format
!= u
->surface
->stereo_format
) {
1583 update_flags
->bits
.stereo_format_change
= 1;
1584 elevate_update_type(&update_type
, UPDATE_TYPE_FULL
);
1587 if (u
->plane_info
->per_pixel_alpha
!= u
->surface
->per_pixel_alpha
) {
1588 update_flags
->bits
.per_pixel_alpha_change
= 1;
1589 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1592 if (u
->plane_info
->global_alpha_value
!= u
->surface
->global_alpha_value
) {
1593 update_flags
->bits
.global_alpha_change
= 1;
1594 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1597 if (u
->plane_info
->dcc
.enable
!= u
->surface
->dcc
.enable
1598 || u
->plane_info
->dcc
.independent_64b_blks
!= u
->surface
->dcc
.independent_64b_blks
1599 || u
->plane_info
->dcc
.meta_pitch
!= u
->surface
->dcc
.meta_pitch
) {
1600 update_flags
->bits
.dcc_change
= 1;
1601 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1604 if (resource_pixel_format_to_bpp(u
->plane_info
->format
) !=
1605 resource_pixel_format_to_bpp(u
->surface
->format
)) {
1606 /* different bytes per element will require full bandwidth
1607 * and DML calculation
1609 update_flags
->bits
.bpp_change
= 1;
1610 elevate_update_type(&update_type
, UPDATE_TYPE_FULL
);
1613 if (u
->plane_info
->plane_size
.surface_pitch
!= u
->surface
->plane_size
.surface_pitch
1614 || u
->plane_info
->plane_size
.chroma_pitch
!= u
->surface
->plane_size
.chroma_pitch
) {
1615 update_flags
->bits
.plane_size_change
= 1;
1616 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1620 if (memcmp(&u
->plane_info
->tiling_info
, &u
->surface
->tiling_info
,
1621 sizeof(union dc_tiling_info
)) != 0) {
1622 update_flags
->bits
.swizzle_change
= 1;
1623 elevate_update_type(&update_type
, UPDATE_TYPE_MED
);
1625 /* todo: below are HW dependent, we should add a hook to
1626 * DCE/N resource and validated there.
1628 if (u
->plane_info
->tiling_info
.gfx9
.swizzle
!= DC_SW_LINEAR
) {
1629 /* swizzled mode requires RQ to be setup properly,
1630 * thus need to run DML to calculate RQ settings
1632 update_flags
->bits
.bandwidth_change
= 1;
1633 elevate_update_type(&update_type
, UPDATE_TYPE_FULL
);
1637 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1641 static enum surface_update_type
get_scaling_info_update_type(
1642 const struct dc_surface_update
*u
)
1644 union surface_update_flags
*update_flags
= &u
->surface
->update_flags
;
1646 if (!u
->scaling_info
)
1647 return UPDATE_TYPE_FAST
;
1649 if (u
->scaling_info
->clip_rect
.width
!= u
->surface
->clip_rect
.width
1650 || u
->scaling_info
->clip_rect
.height
!= u
->surface
->clip_rect
.height
1651 || u
->scaling_info
->dst_rect
.width
!= u
->surface
->dst_rect
.width
1652 || u
->scaling_info
->dst_rect
.height
!= u
->surface
->dst_rect
.height
1653 || u
->scaling_info
->scaling_quality
.integer_scaling
!=
1654 u
->surface
->scaling_quality
.integer_scaling
1656 update_flags
->bits
.scaling_change
= 1;
1658 if ((u
->scaling_info
->dst_rect
.width
< u
->surface
->dst_rect
.width
1659 || u
->scaling_info
->dst_rect
.height
< u
->surface
->dst_rect
.height
)
1660 && (u
->scaling_info
->dst_rect
.width
< u
->surface
->src_rect
.width
1661 || u
->scaling_info
->dst_rect
.height
< u
->surface
->src_rect
.height
))
1662 /* Making dst rect smaller requires a bandwidth change */
1663 update_flags
->bits
.bandwidth_change
= 1;
1666 if (u
->scaling_info
->src_rect
.width
!= u
->surface
->src_rect
.width
1667 || u
->scaling_info
->src_rect
.height
!= u
->surface
->src_rect
.height
) {
1669 update_flags
->bits
.scaling_change
= 1;
1670 if (u
->scaling_info
->src_rect
.width
> u
->surface
->src_rect
.width
1671 || u
->scaling_info
->src_rect
.height
> u
->surface
->src_rect
.height
)
1672 /* Making src rect bigger requires a bandwidth change */
1673 update_flags
->bits
.clock_change
= 1;
1676 if (u
->scaling_info
->src_rect
.x
!= u
->surface
->src_rect
.x
1677 || u
->scaling_info
->src_rect
.y
!= u
->surface
->src_rect
.y
1678 || u
->scaling_info
->clip_rect
.x
!= u
->surface
->clip_rect
.x
1679 || u
->scaling_info
->clip_rect
.y
!= u
->surface
->clip_rect
.y
1680 || u
->scaling_info
->dst_rect
.x
!= u
->surface
->dst_rect
.x
1681 || u
->scaling_info
->dst_rect
.y
!= u
->surface
->dst_rect
.y
)
1682 update_flags
->bits
.position_change
= 1;
1684 if (update_flags
->bits
.clock_change
1685 || update_flags
->bits
.bandwidth_change
1686 || update_flags
->bits
.scaling_change
)
1687 return UPDATE_TYPE_FULL
;
1689 if (update_flags
->bits
.position_change
)
1690 return UPDATE_TYPE_MED
;
1692 return UPDATE_TYPE_FAST
;
1695 static enum surface_update_type
det_surface_update(const struct dc
*dc
,
1696 const struct dc_surface_update
*u
)
1698 const struct dc_state
*context
= dc
->current_state
;
1699 enum surface_update_type type
;
1700 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1701 union surface_update_flags
*update_flags
= &u
->surface
->update_flags
;
1704 update_flags
->bits
.addr_update
= 1;
1706 if (!is_surface_in_context(context
, u
->surface
) || u
->surface
->force_full_update
) {
1707 update_flags
->raw
= 0xFFFFFFFF;
1708 return UPDATE_TYPE_FULL
;
1711 update_flags
->raw
= 0; // Reset all flags
1713 type
= get_plane_info_update_type(u
);
1714 elevate_update_type(&overall_type
, type
);
1716 type
= get_scaling_info_update_type(u
);
1717 elevate_update_type(&overall_type
, type
);
1720 update_flags
->bits
.addr_update
= 1;
1722 if (u
->in_transfer_func
)
1723 update_flags
->bits
.in_transfer_func_change
= 1;
1725 if (u
->input_csc_color_matrix
)
1726 update_flags
->bits
.input_csc_change
= 1;
1728 if (u
->coeff_reduction_factor
)
1729 update_flags
->bits
.coeff_reduction_change
= 1;
1731 if (u
->gamut_remap_matrix
)
1732 update_flags
->bits
.gamut_remap_change
= 1;
1735 enum surface_pixel_format format
= SURFACE_PIXEL_FORMAT_GRPH_BEGIN
;
1738 format
= u
->plane_info
->format
;
1739 else if (u
->surface
)
1740 format
= u
->surface
->format
;
1742 if (dce_use_lut(format
))
1743 update_flags
->bits
.gamma_change
= 1;
1746 if (u
->hdr_mult
.value
)
1747 if (u
->hdr_mult
.value
!= u
->surface
->hdr_mult
.value
) {
1748 update_flags
->bits
.hdr_mult
= 1;
1749 elevate_update_type(&overall_type
, UPDATE_TYPE_MED
);
1752 if (update_flags
->bits
.in_transfer_func_change
) {
1753 type
= UPDATE_TYPE_MED
;
1754 elevate_update_type(&overall_type
, type
);
1757 if (update_flags
->bits
.input_csc_change
1758 || update_flags
->bits
.coeff_reduction_change
1759 || update_flags
->bits
.gamma_change
1760 || update_flags
->bits
.gamut_remap_change
) {
1761 type
= UPDATE_TYPE_FULL
;
1762 elevate_update_type(&overall_type
, type
);
1765 return overall_type
;
1768 static enum surface_update_type
check_update_surfaces_for_stream(
1770 struct dc_surface_update
*updates
,
1772 struct dc_stream_update
*stream_update
,
1773 const struct dc_stream_status
*stream_status
)
1776 enum surface_update_type overall_type
= UPDATE_TYPE_FAST
;
1778 if (stream_status
== NULL
|| stream_status
->plane_count
!= surface_count
)
1779 overall_type
= UPDATE_TYPE_FULL
;
1781 /* some stream updates require passive update */
1782 if (stream_update
) {
1783 union stream_update_flags
*su_flags
= &stream_update
->stream
->update_flags
;
1785 if ((stream_update
->src
.height
!= 0 && stream_update
->src
.width
!= 0) ||
1786 (stream_update
->dst
.height
!= 0 && stream_update
->dst
.width
!= 0) ||
1787 stream_update
->integer_scaling_update
)
1788 su_flags
->bits
.scaling
= 1;
1790 if (stream_update
->out_transfer_func
)
1791 su_flags
->bits
.out_tf
= 1;
1793 if (stream_update
->abm_level
)
1794 su_flags
->bits
.abm_level
= 1;
1796 if (stream_update
->dpms_off
)
1797 su_flags
->bits
.dpms_off
= 1;
1799 if (stream_update
->gamut_remap
)
1800 su_flags
->bits
.gamut_remap
= 1;
1802 if (stream_update
->wb_update
)
1803 su_flags
->bits
.wb_update
= 1;
1805 if (stream_update
->dsc_config
)
1806 su_flags
->bits
.dsc_changed
= 1;
1808 if (su_flags
->raw
!= 0)
1809 overall_type
= UPDATE_TYPE_FULL
;
1811 if (stream_update
->output_csc_transform
|| stream_update
->output_color_space
)
1812 su_flags
->bits
.out_csc
= 1;
1815 for (i
= 0 ; i
< surface_count
; i
++) {
1816 enum surface_update_type type
=
1817 det_surface_update(dc
, &updates
[i
]);
1819 elevate_update_type(&overall_type
, type
);
1822 return overall_type
;
1826 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1828 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1830 enum surface_update_type
dc_check_update_surfaces_for_stream(
1832 struct dc_surface_update
*updates
,
1834 struct dc_stream_update
*stream_update
,
1835 const struct dc_stream_status
*stream_status
)
1838 enum surface_update_type type
;
1841 stream_update
->stream
->update_flags
.raw
= 0;
1842 for (i
= 0; i
< surface_count
; i
++)
1843 updates
[i
].surface
->update_flags
.raw
= 0;
1845 type
= check_update_surfaces_for_stream(dc
, updates
, surface_count
, stream_update
, stream_status
);
1846 if (type
== UPDATE_TYPE_FULL
) {
1847 if (stream_update
) {
1848 uint32_t dsc_changed
= stream_update
->stream
->update_flags
.bits
.dsc_changed
;
1849 stream_update
->stream
->update_flags
.raw
= 0xFFFFFFFF;
1850 stream_update
->stream
->update_flags
.bits
.dsc_changed
= dsc_changed
;
1852 for (i
= 0; i
< surface_count
; i
++)
1853 updates
[i
].surface
->update_flags
.raw
= 0xFFFFFFFF;
1856 if (type
== UPDATE_TYPE_FAST
) {
1857 // If there's an available clock comparator, we use that.
1858 if (dc
->clk_mgr
->funcs
->are_clock_states_equal
) {
1859 if (!dc
->clk_mgr
->funcs
->are_clock_states_equal(&dc
->clk_mgr
->clks
, &dc
->current_state
->bw_ctx
.bw
.dcn
.clk
))
1860 dc
->optimized_required
= true;
1861 // Else we fallback to mem compare.
1862 } else if (memcmp(&dc
->current_state
->bw_ctx
.bw
.dcn
.clk
, &dc
->clk_mgr
->clks
, offsetof(struct dc_clocks
, prev_p_state_change_support
)) != 0) {
1863 dc
->optimized_required
= true;
1866 dc
->optimized_required
|= dc
->wm_optimized_required
;
1872 static struct dc_stream_status
*stream_get_status(
1873 struct dc_state
*ctx
,
1874 struct dc_stream_state
*stream
)
1878 for (i
= 0; i
< ctx
->stream_count
; i
++) {
1879 if (stream
== ctx
->streams
[i
]) {
1880 return &ctx
->stream_status
[i
];
1887 static const enum surface_update_type update_surface_trace_level
= UPDATE_TYPE_FULL
;
1889 static void copy_surface_update_to_plane(
1890 struct dc_plane_state
*surface
,
1891 struct dc_surface_update
*srf_update
)
1893 if (srf_update
->flip_addr
) {
1894 surface
->address
= srf_update
->flip_addr
->address
;
1895 surface
->flip_immediate
=
1896 srf_update
->flip_addr
->flip_immediate
;
1897 surface
->time
.time_elapsed_in_us
[surface
->time
.index
] =
1898 srf_update
->flip_addr
->flip_timestamp_in_us
-
1899 surface
->time
.prev_update_time_in_us
;
1900 surface
->time
.prev_update_time_in_us
=
1901 srf_update
->flip_addr
->flip_timestamp_in_us
;
1902 surface
->time
.index
++;
1903 if (surface
->time
.index
>= DC_PLANE_UPDATE_TIMES_MAX
)
1904 surface
->time
.index
= 0;
1906 surface
->triplebuffer_flips
= srf_update
->flip_addr
->triplebuffer_flips
;
1909 if (srf_update
->scaling_info
) {
1910 surface
->scaling_quality
=
1911 srf_update
->scaling_info
->scaling_quality
;
1913 srf_update
->scaling_info
->dst_rect
;
1915 srf_update
->scaling_info
->src_rect
;
1916 surface
->clip_rect
=
1917 srf_update
->scaling_info
->clip_rect
;
1920 if (srf_update
->plane_info
) {
1921 surface
->color_space
=
1922 srf_update
->plane_info
->color_space
;
1924 srf_update
->plane_info
->format
;
1925 surface
->plane_size
=
1926 srf_update
->plane_info
->plane_size
;
1928 srf_update
->plane_info
->rotation
;
1929 surface
->horizontal_mirror
=
1930 srf_update
->plane_info
->horizontal_mirror
;
1931 surface
->stereo_format
=
1932 srf_update
->plane_info
->stereo_format
;
1933 surface
->tiling_info
=
1934 srf_update
->plane_info
->tiling_info
;
1936 srf_update
->plane_info
->visible
;
1937 surface
->per_pixel_alpha
=
1938 srf_update
->plane_info
->per_pixel_alpha
;
1939 surface
->global_alpha
=
1940 srf_update
->plane_info
->global_alpha
;
1941 surface
->global_alpha_value
=
1942 srf_update
->plane_info
->global_alpha_value
;
1944 srf_update
->plane_info
->dcc
;
1945 surface
->layer_index
=
1946 srf_update
->plane_info
->layer_index
;
1949 if (srf_update
->gamma
&&
1950 (surface
->gamma_correction
!=
1951 srf_update
->gamma
)) {
1952 memcpy(&surface
->gamma_correction
->entries
,
1953 &srf_update
->gamma
->entries
,
1954 sizeof(struct dc_gamma_entries
));
1955 surface
->gamma_correction
->is_identity
=
1956 srf_update
->gamma
->is_identity
;
1957 surface
->gamma_correction
->num_entries
=
1958 srf_update
->gamma
->num_entries
;
1959 surface
->gamma_correction
->type
=
1960 srf_update
->gamma
->type
;
1963 if (srf_update
->in_transfer_func
&&
1964 (surface
->in_transfer_func
!=
1965 srf_update
->in_transfer_func
)) {
1966 surface
->in_transfer_func
->sdr_ref_white_level
=
1967 srf_update
->in_transfer_func
->sdr_ref_white_level
;
1968 surface
->in_transfer_func
->tf
=
1969 srf_update
->in_transfer_func
->tf
;
1970 surface
->in_transfer_func
->type
=
1971 srf_update
->in_transfer_func
->type
;
1972 memcpy(&surface
->in_transfer_func
->tf_pts
,
1973 &srf_update
->in_transfer_func
->tf_pts
,
1974 sizeof(struct dc_transfer_func_distributed_points
));
1977 if (srf_update
->func_shaper
&&
1978 (surface
->in_shaper_func
!=
1979 srf_update
->func_shaper
))
1980 memcpy(surface
->in_shaper_func
, srf_update
->func_shaper
,
1981 sizeof(*surface
->in_shaper_func
));
1983 if (srf_update
->lut3d_func
&&
1984 (surface
->lut3d_func
!=
1985 srf_update
->lut3d_func
))
1986 memcpy(surface
->lut3d_func
, srf_update
->lut3d_func
,
1987 sizeof(*surface
->lut3d_func
));
1989 if (srf_update
->hdr_mult
.value
)
1991 srf_update
->hdr_mult
;
1993 if (srf_update
->blend_tf
&&
1994 (surface
->blend_tf
!=
1995 srf_update
->blend_tf
))
1996 memcpy(surface
->blend_tf
, srf_update
->blend_tf
,
1997 sizeof(*surface
->blend_tf
));
1999 if (srf_update
->input_csc_color_matrix
)
2000 surface
->input_csc_color_matrix
=
2001 *srf_update
->input_csc_color_matrix
;
2003 if (srf_update
->coeff_reduction_factor
)
2004 surface
->coeff_reduction_factor
=
2005 *srf_update
->coeff_reduction_factor
;
2007 if (srf_update
->gamut_remap_matrix
)
2008 surface
->gamut_remap_matrix
=
2009 *srf_update
->gamut_remap_matrix
;
2012 static void copy_stream_update_to_stream(struct dc
*dc
,
2013 struct dc_state
*context
,
2014 struct dc_stream_state
*stream
,
2015 struct dc_stream_update
*update
)
2017 struct dc_context
*dc_ctx
= dc
->ctx
;
2019 if (update
== NULL
|| stream
== NULL
)
2022 if (update
->src
.height
&& update
->src
.width
)
2023 stream
->src
= update
->src
;
2025 if (update
->dst
.height
&& update
->dst
.width
)
2026 stream
->dst
= update
->dst
;
2028 if (update
->out_transfer_func
&&
2029 stream
->out_transfer_func
!= update
->out_transfer_func
) {
2030 stream
->out_transfer_func
->sdr_ref_white_level
=
2031 update
->out_transfer_func
->sdr_ref_white_level
;
2032 stream
->out_transfer_func
->tf
= update
->out_transfer_func
->tf
;
2033 stream
->out_transfer_func
->type
=
2034 update
->out_transfer_func
->type
;
2035 memcpy(&stream
->out_transfer_func
->tf_pts
,
2036 &update
->out_transfer_func
->tf_pts
,
2037 sizeof(struct dc_transfer_func_distributed_points
));
2040 if (update
->hdr_static_metadata
)
2041 stream
->hdr_static_metadata
= *update
->hdr_static_metadata
;
2043 if (update
->abm_level
)
2044 stream
->abm_level
= *update
->abm_level
;
2046 if (update
->periodic_interrupt0
)
2047 stream
->periodic_interrupt0
= *update
->periodic_interrupt0
;
2049 if (update
->periodic_interrupt1
)
2050 stream
->periodic_interrupt1
= *update
->periodic_interrupt1
;
2052 if (update
->gamut_remap
)
2053 stream
->gamut_remap_matrix
= *update
->gamut_remap
;
2055 /* Note: this being updated after mode set is currently not a use case
2056 * however if it arises OCSC would need to be reprogrammed at the
2059 if (update
->output_color_space
)
2060 stream
->output_color_space
= *update
->output_color_space
;
2062 if (update
->output_csc_transform
)
2063 stream
->csc_color_matrix
= *update
->output_csc_transform
;
2065 if (update
->vrr_infopacket
)
2066 stream
->vrr_infopacket
= *update
->vrr_infopacket
;
2068 if (update
->dpms_off
)
2069 stream
->dpms_off
= *update
->dpms_off
;
2071 if (update
->vsc_infopacket
)
2072 stream
->vsc_infopacket
= *update
->vsc_infopacket
;
2074 if (update
->vsp_infopacket
)
2075 stream
->vsp_infopacket
= *update
->vsp_infopacket
;
2077 if (update
->dither_option
)
2078 stream
->dither_option
= *update
->dither_option
;
2079 /* update current stream with writeback info */
2080 if (update
->wb_update
) {
2083 stream
->num_wb_info
= update
->wb_update
->num_wb_info
;
2084 ASSERT(stream
->num_wb_info
<= MAX_DWB_PIPES
);
2085 for (i
= 0; i
< stream
->num_wb_info
; i
++)
2086 stream
->writeback_info
[i
] =
2087 update
->wb_update
->writeback_info
[i
];
2089 if (update
->dsc_config
) {
2090 struct dc_dsc_config old_dsc_cfg
= stream
->timing
.dsc_cfg
;
2091 uint32_t old_dsc_enabled
= stream
->timing
.flags
.DSC
;
2092 uint32_t enable_dsc
= (update
->dsc_config
->num_slices_h
!= 0 &&
2093 update
->dsc_config
->num_slices_v
!= 0);
2095 /* Use temporarry context for validating new DSC config */
2096 struct dc_state
*dsc_validate_context
= dc_create_state(dc
);
2098 if (dsc_validate_context
) {
2099 dc_resource_state_copy_construct(dc
->current_state
, dsc_validate_context
);
2101 stream
->timing
.dsc_cfg
= *update
->dsc_config
;
2102 stream
->timing
.flags
.DSC
= enable_dsc
;
2103 if (!dc
->res_pool
->funcs
->validate_bandwidth(dc
, dsc_validate_context
, true)) {
2104 stream
->timing
.dsc_cfg
= old_dsc_cfg
;
2105 stream
->timing
.flags
.DSC
= old_dsc_enabled
;
2106 update
->dsc_config
= NULL
;
2109 dc_release_state(dsc_validate_context
);
2111 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2112 update
->dsc_config
= NULL
;
2117 static void commit_planes_do_stream_update(struct dc
*dc
,
2118 struct dc_stream_state
*stream
,
2119 struct dc_stream_update
*stream_update
,
2120 enum surface_update_type update_type
,
2121 struct dc_state
*context
)
2124 bool should_program_abm
;
2127 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2128 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2130 if (!pipe_ctx
->top_pipe
&& !pipe_ctx
->prev_odm_pipe
&& pipe_ctx
->stream
== stream
) {
2132 if (stream_update
->periodic_interrupt0
&&
2133 dc
->hwss
.setup_periodic_interrupt
)
2134 dc
->hwss
.setup_periodic_interrupt(dc
, pipe_ctx
, VLINE0
);
2136 if (stream_update
->periodic_interrupt1
&&
2137 dc
->hwss
.setup_periodic_interrupt
)
2138 dc
->hwss
.setup_periodic_interrupt(dc
, pipe_ctx
, VLINE1
);
2140 if ((stream_update
->hdr_static_metadata
&& !stream
->use_dynamic_meta
) ||
2141 stream_update
->vrr_infopacket
||
2142 stream_update
->vsc_infopacket
||
2143 stream_update
->vsp_infopacket
) {
2144 resource_build_info_frame(pipe_ctx
);
2145 dc
->hwss
.update_info_frame(pipe_ctx
);
2148 if (stream_update
->hdr_static_metadata
&&
2149 stream
->use_dynamic_meta
&&
2150 dc
->hwss
.set_dmdata_attributes
&&
2151 pipe_ctx
->stream
->dmdata_address
.quad_part
!= 0)
2152 dc
->hwss
.set_dmdata_attributes(pipe_ctx
);
2154 if (stream_update
->gamut_remap
)
2155 dc_stream_set_gamut_remap(dc
, stream
);
2157 if (stream_update
->output_csc_transform
)
2158 dc_stream_program_csc_matrix(dc
, stream
);
2160 if (stream_update
->dither_option
) {
2161 struct pipe_ctx
*odm_pipe
= pipe_ctx
->next_odm_pipe
;
2162 resource_build_bit_depth_reduction_params(pipe_ctx
->stream
,
2163 &pipe_ctx
->stream
->bit_depth_params
);
2164 pipe_ctx
->stream_res
.opp
->funcs
->opp_program_fmt(pipe_ctx
->stream_res
.opp
,
2165 &stream
->bit_depth_params
,
2168 odm_pipe
->stream_res
.opp
->funcs
->opp_program_fmt(odm_pipe
->stream_res
.opp
,
2169 &stream
->bit_depth_params
,
2171 odm_pipe
= odm_pipe
->next_odm_pipe
;
2176 if (update_type
== UPDATE_TYPE_FAST
)
2179 if (stream_update
->dsc_config
)
2180 dp_update_dsc_config(pipe_ctx
);
2182 if (stream_update
->dpms_off
) {
2183 if (*stream_update
->dpms_off
) {
2184 core_link_disable_stream(pipe_ctx
);
2185 /* for dpms, keep acquired resources*/
2186 if (pipe_ctx
->stream_res
.audio
&& !dc
->debug
.az_endpoint_mute_only
)
2187 pipe_ctx
->stream_res
.audio
->funcs
->az_disable(pipe_ctx
->stream_res
.audio
);
2189 dc
->hwss
.optimize_bandwidth(dc
, dc
->current_state
);
2191 if (dc
->optimize_seamless_boot_streams
== 0)
2192 dc
->hwss
.prepare_bandwidth(dc
, dc
->current_state
);
2194 core_link_enable_stream(dc
->current_state
, pipe_ctx
);
2198 if (stream_update
->abm_level
&& pipe_ctx
->stream_res
.abm
) {
2199 should_program_abm
= true;
2201 // if otg funcs defined check if blanked before programming
2202 if (pipe_ctx
->stream_res
.tg
->funcs
->is_blanked
)
2203 if (pipe_ctx
->stream_res
.tg
->funcs
->is_blanked(pipe_ctx
->stream_res
.tg
))
2204 should_program_abm
= false;
2206 if (should_program_abm
) {
2207 if (*stream_update
->abm_level
== ABM_LEVEL_IMMEDIATE_DISABLE
) {
2208 pipe_ctx
->stream_res
.abm
->funcs
->set_abm_immediate_disable(pipe_ctx
->stream_res
.abm
);
2210 pipe_ctx
->stream_res
.abm
->funcs
->set_abm_level(
2211 pipe_ctx
->stream_res
.abm
, stream
->abm_level
);
2219 static void commit_planes_for_stream(struct dc
*dc
,
2220 struct dc_surface_update
*srf_updates
,
2222 struct dc_stream_state
*stream
,
2223 struct dc_stream_update
*stream_update
,
2224 enum surface_update_type update_type
,
2225 struct dc_state
*context
)
2228 struct pipe_ctx
*top_pipe_to_program
= NULL
;
2230 if (dc
->optimize_seamless_boot_streams
> 0 && surface_count
> 0) {
2231 /* Optimize seamless boot flag keeps clocks and watermarks high until
2232 * first flip. After first flip, optimization is required to lower
2233 * bandwidth. Important to note that it is expected UEFI will
2234 * only light up a single display on POST, therefore we only expect
2235 * one stream with seamless boot flag set.
2237 if (stream
->apply_seamless_boot_optimization
) {
2238 stream
->apply_seamless_boot_optimization
= false;
2239 dc
->optimize_seamless_boot_streams
--;
2241 if (dc
->optimize_seamless_boot_streams
== 0)
2242 dc
->optimized_required
= true;
2246 if (update_type
== UPDATE_TYPE_FULL
&& dc
->optimize_seamless_boot_streams
== 0) {
2247 dc
->hwss
.prepare_bandwidth(dc
, context
);
2248 context_clock_trace(dc
, context
);
2251 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2252 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2254 if (!pipe_ctx
->top_pipe
&&
2255 !pipe_ctx
->prev_odm_pipe
&&
2257 pipe_ctx
->stream
== stream
) {
2258 top_pipe_to_program
= pipe_ctx
;
2262 if ((update_type
!= UPDATE_TYPE_FAST
) && stream
->update_flags
.bits
.dsc_changed
)
2263 if (top_pipe_to_program
->stream_res
.tg
->funcs
->lock_doublebuffer_enable
)
2264 top_pipe_to_program
->stream_res
.tg
->funcs
->lock_doublebuffer_enable(
2265 top_pipe_to_program
->stream_res
.tg
);
2267 if ((update_type
!= UPDATE_TYPE_FAST
) && dc
->hwss
.interdependent_update_lock
)
2268 dc
->hwss
.interdependent_update_lock(dc
, context
, true);
2270 /* Lock the top pipe while updating plane addrs, since freesync requires
2271 * plane addr update event triggers to be synchronized.
2272 * top_pipe_to_program is expected to never be NULL
2274 dc
->hwss
.pipe_control_lock(dc
, top_pipe_to_program
, true);
2279 commit_planes_do_stream_update(dc
, stream
, stream_update
, update_type
, context
);
2281 if (surface_count
== 0) {
2283 * In case of turning off screen, no need to program front end a second time.
2284 * just return after program blank.
2286 if (dc
->hwss
.apply_ctx_for_surface
)
2287 dc
->hwss
.apply_ctx_for_surface(dc
, stream
, 0, context
);
2288 if (dc
->hwss
.program_front_end_for_ctx
)
2289 dc
->hwss
.program_front_end_for_ctx(dc
, context
);
2291 if ((update_type
!= UPDATE_TYPE_FAST
) && dc
->hwss
.interdependent_update_lock
)
2292 dc
->hwss
.interdependent_update_lock(dc
, context
, false);
2294 dc
->hwss
.pipe_control_lock(dc
, top_pipe_to_program
, false);
2296 dc
->hwss
.post_unlock_program_front_end(dc
, context
);
2300 if (!IS_DIAG_DC(dc
->ctx
->dce_environment
)) {
2301 for (i
= 0; i
< surface_count
; i
++) {
2302 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
2303 /*set logical flag for lock/unlock use*/
2304 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2305 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2306 if (!pipe_ctx
->plane_state
)
2308 if (pipe_ctx
->plane_state
!= plane_state
)
2310 plane_state
->triplebuffer_flips
= false;
2311 if (update_type
== UPDATE_TYPE_FAST
&&
2312 dc
->hwss
.program_triplebuffer
!= NULL
&&
2313 !plane_state
->flip_immediate
&&
2314 !dc
->debug
.disable_tri_buf
) {
2315 /*triple buffer for VUpdate only*/
2316 plane_state
->triplebuffer_flips
= true;
2322 // Update Type FULL, Surface updates
2323 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2324 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2326 if (!pipe_ctx
->top_pipe
&&
2327 !pipe_ctx
->prev_odm_pipe
&&
2329 pipe_ctx
->stream
== stream
) {
2330 struct dc_stream_status
*stream_status
= NULL
;
2332 if (!pipe_ctx
->plane_state
)
2336 if (update_type
== UPDATE_TYPE_FAST
)
2339 ASSERT(!pipe_ctx
->plane_state
->triplebuffer_flips
);
2341 if (dc
->hwss
.program_triplebuffer
!= NULL
&&
2342 !dc
->debug
.disable_tri_buf
) {
2343 /*turn off triple buffer for full update*/
2344 dc
->hwss
.program_triplebuffer(
2345 dc
, pipe_ctx
, pipe_ctx
->plane_state
->triplebuffer_flips
);
2348 stream_get_status(context
, pipe_ctx
->stream
);
2350 if (dc
->hwss
.apply_ctx_for_surface
)
2351 dc
->hwss
.apply_ctx_for_surface(
2352 dc
, pipe_ctx
->stream
, stream_status
->plane_count
, context
);
2355 if (dc
->hwss
.program_front_end_for_ctx
&& update_type
!= UPDATE_TYPE_FAST
) {
2356 dc
->hwss
.program_front_end_for_ctx(dc
, context
);
2357 #ifdef CONFIG_DRM_AMD_DC_DCN
2358 if (dc
->debug
.validate_dml_output
) {
2359 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2360 struct pipe_ctx cur_pipe
= context
->res_ctx
.pipe_ctx
[i
];
2361 if (cur_pipe
.stream
== NULL
)
2364 cur_pipe
.plane_res
.hubp
->funcs
->validate_dml_output(
2365 cur_pipe
.plane_res
.hubp
, dc
->ctx
,
2366 &context
->res_ctx
.pipe_ctx
[i
].rq_regs
,
2367 &context
->res_ctx
.pipe_ctx
[i
].dlg_regs
,
2368 &context
->res_ctx
.pipe_ctx
[i
].ttu_regs
);
2374 // Update Type FAST, Surface updates
2375 if (update_type
== UPDATE_TYPE_FAST
) {
2376 if (dc
->hwss
.set_flip_control_gsl
)
2377 for (i
= 0; i
< surface_count
; i
++) {
2378 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
2380 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2381 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2383 if (pipe_ctx
->stream
!= stream
)
2386 if (pipe_ctx
->plane_state
!= plane_state
)
2389 // GSL has to be used for flip immediate
2390 dc
->hwss
.set_flip_control_gsl(pipe_ctx
,
2391 plane_state
->flip_immediate
);
2394 /* Perform requested Updates */
2395 for (i
= 0; i
< surface_count
; i
++) {
2396 struct dc_plane_state
*plane_state
= srf_updates
[i
].surface
;
2398 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2399 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2401 if (pipe_ctx
->stream
!= stream
)
2404 if (pipe_ctx
->plane_state
!= plane_state
)
2406 /*program triple buffer after lock based on flip type*/
2407 if (dc
->hwss
.program_triplebuffer
!= NULL
&&
2408 !dc
->debug
.disable_tri_buf
) {
2409 /*only enable triplebuffer for fast_update*/
2410 dc
->hwss
.program_triplebuffer(
2411 dc
, pipe_ctx
, plane_state
->triplebuffer_flips
);
2413 if (srf_updates
[i
].flip_addr
)
2414 dc
->hwss
.update_plane_addr(dc
, pipe_ctx
);
2419 if ((update_type
!= UPDATE_TYPE_FAST
) && dc
->hwss
.interdependent_update_lock
)
2420 dc
->hwss
.interdependent_update_lock(dc
, context
, false);
2422 dc
->hwss
.pipe_control_lock(dc
, top_pipe_to_program
, false);
2424 if ((update_type
!= UPDATE_TYPE_FAST
) && stream
->update_flags
.bits
.dsc_changed
)
2425 if (top_pipe_to_program
->stream_res
.tg
->funcs
->lock_doublebuffer_enable
) {
2426 top_pipe_to_program
->stream_res
.tg
->funcs
->wait_for_state(
2427 top_pipe_to_program
->stream_res
.tg
,
2428 CRTC_STATE_VACTIVE
);
2429 top_pipe_to_program
->stream_res
.tg
->funcs
->wait_for_state(
2430 top_pipe_to_program
->stream_res
.tg
,
2432 top_pipe_to_program
->stream_res
.tg
->funcs
->wait_for_state(
2433 top_pipe_to_program
->stream_res
.tg
,
2434 CRTC_STATE_VACTIVE
);
2435 top_pipe_to_program
->stream_res
.tg
->funcs
->lock_doublebuffer_disable(
2436 top_pipe_to_program
->stream_res
.tg
);
2439 if (update_type
!= UPDATE_TYPE_FAST
)
2440 dc
->hwss
.post_unlock_program_front_end(dc
, context
);
2442 // Fire manual trigger only when bottom plane is flipped
2443 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2444 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[j
];
2446 if (pipe_ctx
->bottom_pipe
||
2447 !pipe_ctx
->stream
||
2448 pipe_ctx
->stream
!= stream
||
2449 !pipe_ctx
->plane_state
->update_flags
.bits
.addr_update
)
2452 if (pipe_ctx
->stream_res
.tg
->funcs
->program_manual_trigger
)
2453 pipe_ctx
->stream_res
.tg
->funcs
->program_manual_trigger(pipe_ctx
->stream_res
.tg
);
2457 void dc_commit_updates_for_stream(struct dc
*dc
,
2458 struct dc_surface_update
*srf_updates
,
2460 struct dc_stream_state
*stream
,
2461 struct dc_stream_update
*stream_update
,
2462 struct dc_state
*state
)
2464 const struct dc_stream_status
*stream_status
;
2465 enum surface_update_type update_type
;
2466 struct dc_state
*context
;
2467 struct dc_context
*dc_ctx
= dc
->ctx
;
2470 stream_status
= dc_stream_get_status(stream
);
2471 context
= dc
->current_state
;
2473 update_type
= dc_check_update_surfaces_for_stream(
2474 dc
, srf_updates
, surface_count
, stream_update
, stream_status
);
2476 if (update_type
>= update_surface_trace_level
)
2477 update_surface_trace(dc
, srf_updates
, surface_count
);
2480 if (update_type
>= UPDATE_TYPE_FULL
) {
2482 /* initialize scratch memory for building context */
2483 context
= dc_create_state(dc
);
2484 if (context
== NULL
) {
2485 DC_ERROR("Failed to allocate new validate context!\n");
2489 dc_resource_state_copy_construct(state
, context
);
2491 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2492 struct pipe_ctx
*new_pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2493 struct pipe_ctx
*old_pipe
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
2495 if (new_pipe
->plane_state
&& new_pipe
->plane_state
!= old_pipe
->plane_state
)
2496 new_pipe
->plane_state
->force_full_update
= true;
2501 for (i
= 0; i
< surface_count
; i
++) {
2502 struct dc_plane_state
*surface
= srf_updates
[i
].surface
;
2504 copy_surface_update_to_plane(surface
, &srf_updates
[i
]);
2506 if (update_type
>= UPDATE_TYPE_MED
) {
2507 for (j
= 0; j
< dc
->res_pool
->pipe_count
; j
++) {
2508 struct pipe_ctx
*pipe_ctx
=
2509 &context
->res_ctx
.pipe_ctx
[j
];
2511 if (pipe_ctx
->plane_state
!= surface
)
2514 resource_build_scaling_params(pipe_ctx
);
2519 copy_stream_update_to_stream(dc
, context
, stream
, stream_update
);
2521 commit_planes_for_stream(
2529 /*update current_State*/
2530 if (dc
->current_state
!= context
) {
2532 struct dc_state
*old
= dc
->current_state
;
2534 dc
->current_state
= context
;
2535 dc_release_state(old
);
2537 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2538 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2540 if (pipe_ctx
->plane_state
&& pipe_ctx
->stream
== stream
)
2541 pipe_ctx
->plane_state
->force_full_update
= false;
2544 /*let's use current_state to update watermark etc*/
2545 if (update_type
>= UPDATE_TYPE_FULL
)
2546 dc_post_update_surfaces_to_stream(dc
);
2552 uint8_t dc_get_current_stream_count(struct dc
*dc
)
2554 return dc
->current_state
->stream_count
;
2557 struct dc_stream_state
*dc_get_stream_at_index(struct dc
*dc
, uint8_t i
)
2559 if (i
< dc
->current_state
->stream_count
)
2560 return dc
->current_state
->streams
[i
];
2564 enum dc_irq_source
dc_interrupt_to_irq_source(
2569 return dal_irq_service_to_irq_source(dc
->res_pool
->irqs
, src_id
, ext_id
);
2573 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2575 bool dc_interrupt_set(struct dc
*dc
, enum dc_irq_source src
, bool enable
)
2581 return dal_irq_service_set(dc
->res_pool
->irqs
, src
, enable
);
2584 void dc_interrupt_ack(struct dc
*dc
, enum dc_irq_source src
)
2586 dal_irq_service_ack(dc
->res_pool
->irqs
, src
);
2589 void dc_set_power_state(
2591 enum dc_acpi_cm_power_state power_state
)
2593 struct kref refcount
;
2594 struct display_mode_lib
*dml
;
2596 switch (power_state
) {
2597 case DC_ACPI_CM_POWER_STATE_D0
:
2598 dc_resource_state_construct(dc
, dc
->current_state
);
2600 if (dc
->ctx
->dmub_srv
)
2601 dc_dmub_srv_wait_phy_init(dc
->ctx
->dmub_srv
);
2603 dc
->hwss
.init_hw(dc
);
2605 if (dc
->hwss
.init_sys_ctx
!= NULL
&&
2606 dc
->vm_pa_config
.valid
) {
2607 dc
->hwss
.init_sys_ctx(dc
->hwseq
, dc
, &dc
->vm_pa_config
);
2612 ASSERT(dc
->current_state
->stream_count
== 0);
2613 /* Zero out the current context so that on resume we start with
2614 * clean state, and dc hw programming optimizations will not
2615 * cause any trouble.
2617 dml
= kzalloc(sizeof(struct display_mode_lib
),
2624 /* Preserve refcount */
2625 refcount
= dc
->current_state
->refcount
;
2626 /* Preserve display mode lib */
2627 memcpy(dml
, &dc
->current_state
->bw_ctx
.dml
, sizeof(struct display_mode_lib
));
2629 dc_resource_state_destruct(dc
->current_state
);
2630 memset(dc
->current_state
, 0,
2631 sizeof(*dc
->current_state
));
2633 dc
->current_state
->refcount
= refcount
;
2634 dc
->current_state
->bw_ctx
.dml
= *dml
;
2642 void dc_resume(struct dc
*dc
)
2647 for (i
= 0; i
< dc
->link_count
; i
++)
2648 core_link_resume(dc
->links
[i
]);
2651 unsigned int dc_get_current_backlight_pwm(struct dc
*dc
)
2653 struct abm
*abm
= dc
->res_pool
->abm
;
2656 return abm
->funcs
->get_current_backlight(abm
);
2661 unsigned int dc_get_target_backlight_pwm(struct dc
*dc
)
2663 struct abm
*abm
= dc
->res_pool
->abm
;
2666 return abm
->funcs
->get_target_backlight(abm
);
2671 bool dc_is_dmcu_initialized(struct dc
*dc
)
2673 struct dmcu
*dmcu
= dc
->res_pool
->dmcu
;
2676 return dmcu
->funcs
->is_dmcu_initialized(dmcu
);
2682 uint32_t link_index
,
2683 struct i2c_command
*cmd
)
2686 struct dc_link
*link
= dc
->links
[link_index
];
2687 struct ddc_service
*ddc
= link
->ddc
;
2688 return dce_i2c_submit_command(
2694 bool dc_submit_i2c_oem(
2696 struct i2c_command
*cmd
)
2698 struct ddc_service
*ddc
= dc
->res_pool
->oem_device
;
2699 return dce_i2c_submit_command(
2705 static bool link_add_remote_sink_helper(struct dc_link
*dc_link
, struct dc_sink
*sink
)
2707 if (dc_link
->sink_count
>= MAX_SINKS_PER_LINK
) {
2708 BREAK_TO_DEBUGGER();
2712 dc_sink_retain(sink
);
2714 dc_link
->remote_sinks
[dc_link
->sink_count
] = sink
;
2715 dc_link
->sink_count
++;
2721 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
2723 * EDID length is in bytes
2725 struct dc_sink
*dc_link_add_remote_sink(
2726 struct dc_link
*link
,
2727 const uint8_t *edid
,
2729 struct dc_sink_init_data
*init_data
)
2731 struct dc_sink
*dc_sink
;
2732 enum dc_edid_status edid_status
;
2734 if (len
> DC_MAX_EDID_BUFFER_SIZE
) {
2735 dm_error("Max EDID buffer size breached!\n");
2740 BREAK_TO_DEBUGGER();
2744 if (!init_data
->link
) {
2745 BREAK_TO_DEBUGGER();
2749 dc_sink
= dc_sink_create(init_data
);
2754 memmove(dc_sink
->dc_edid
.raw_edid
, edid
, len
);
2755 dc_sink
->dc_edid
.length
= len
;
2757 if (!link_add_remote_sink_helper(
2762 edid_status
= dm_helpers_parse_edid_caps(
2765 &dc_sink
->edid_caps
);
2768 * Treat device as no EDID device if EDID
2771 if (edid_status
!= EDID_OK
) {
2772 dc_sink
->dc_edid
.length
= 0;
2773 dm_error("Bad EDID, status%d!\n", edid_status
);
2779 dc_sink_release(dc_sink
);
2784 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
2786 * Note that this just removes the struct dc_sink - it doesn't
2787 * program hardware or alter other members of dc_link
2789 void dc_link_remove_remote_sink(struct dc_link
*link
, struct dc_sink
*sink
)
2793 if (!link
->sink_count
) {
2794 BREAK_TO_DEBUGGER();
2798 for (i
= 0; i
< link
->sink_count
; i
++) {
2799 if (link
->remote_sinks
[i
] == sink
) {
2800 dc_sink_release(sink
);
2801 link
->remote_sinks
[i
] = NULL
;
2803 /* shrink array to remove empty place */
2804 while (i
< link
->sink_count
- 1) {
2805 link
->remote_sinks
[i
] = link
->remote_sinks
[i
+1];
2808 link
->remote_sinks
[i
] = NULL
;
2815 void get_clock_requirements_for_state(struct dc_state
*state
, struct AsicStateEx
*info
)
2817 info
->displayClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
;
2818 info
->engineClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.dcfclk_khz
;
2819 info
->memoryClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.dramclk_khz
;
2820 info
->maxSupportedDppClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.max_supported_dppclk_khz
;
2821 info
->dppClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
;
2822 info
->socClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.socclk_khz
;
2823 info
->dcfClockDeepSleep
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.dcfclk_deep_sleep_khz
;
2824 info
->fClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.fclk_khz
;
2825 info
->phyClock
= (unsigned int)state
->bw_ctx
.bw
.dcn
.clk
.phyclk_khz
;
2827 enum dc_status
dc_set_clock(struct dc
*dc
, enum dc_clock_type clock_type
, uint32_t clk_khz
, uint32_t stepping
)
2829 if (dc
->hwss
.set_clock
)
2830 return dc
->hwss
.set_clock(dc
, clock_type
, clk_khz
, stepping
);
2831 return DC_ERROR_UNEXPECTED
;
2833 void dc_get_clock(struct dc
*dc
, enum dc_clock_type clock_type
, struct dc_clock_config
*clock_cfg
)
2835 if (dc
->hwss
.get_clock
)
2836 dc
->hwss
.get_clock(dc
, clock_type
, clock_cfg
);