]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
Merge tag 'staging-5.3-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[thirdparty/linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "include/irq_service_interface.h"
45 #include "transform.h"
46 #include "dmcu.h"
47 #include "dpp.h"
48 #include "timing_generator.h"
49 #include "abm.h"
50 #include "virtual/virtual_link_encoder.h"
51
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54
55 #include "dc_link_ddc.h"
56 #include "dm_helpers.h"
57 #include "mem_input.h"
58 #include "hubp.h"
59
60 #include "dc_link_dp.h"
61
62 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
63 #include "dsc.h"
64 #endif
65
66 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
67 #include "vm_helper.h"
68 #endif
69
70 #include "dce/dce_i2c.h"
71
72 #define DC_LOGGER \
73 dc->ctx->logger
74
75 const static char DC_BUILD_ID[] = "production-build";
76
77 /**
78 * DOC: Overview
79 *
80 * DC is the OS-agnostic component of the amdgpu DC driver.
81 *
82 * DC maintains and validates a set of structs representing the state of the
83 * driver and writes that state to AMD hardware
84 *
85 * Main DC HW structs:
86 *
87 * struct dc - The central struct. One per driver. Created on driver load,
88 * destroyed on driver unload.
89 *
90 * struct dc_context - One per driver.
91 * Used as a backpointer by most other structs in dc.
92 *
93 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
94 * plugpoints). Created on driver load, destroyed on driver unload.
95 *
96 * struct dc_sink - One per display. Created on boot or hotplug.
97 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
98 * (the display directly attached). It may also have one or more remote
99 * sinks (in the Multi-Stream Transport case)
100 *
101 * struct resource_pool - One per driver. Represents the hw blocks not in the
102 * main pipeline. Not directly accessible by dm.
103 *
104 * Main dc state structs:
105 *
106 * These structs can be created and destroyed as needed. There is a full set of
107 * these structs in dc->current_state representing the currently programmed state.
108 *
109 * struct dc_state - The global DC state to track global state information,
110 * such as bandwidth values.
111 *
112 * struct dc_stream_state - Represents the hw configuration for the pipeline from
113 * a framebuffer to a display. Maps one-to-one with dc_sink.
114 *
115 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
116 * and may have more in the Multi-Plane Overlay case.
117 *
118 * struct resource_context - Represents the programmable state of everything in
119 * the resource_pool. Not directly accessible by dm.
120 *
121 * struct pipe_ctx - A member of struct resource_context. Represents the
122 * internal hardware pipeline components. Each dc_plane_state has either
123 * one or two (in the pipe-split case).
124 */
125
126 /*******************************************************************************
127 * Private functions
128 ******************************************************************************/
129
130 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
131 {
132 if (new > *original)
133 *original = new;
134 }
135
136 static void destroy_links(struct dc *dc)
137 {
138 uint32_t i;
139
140 for (i = 0; i < dc->link_count; i++) {
141 if (NULL != dc->links[i])
142 link_destroy(&dc->links[i]);
143 }
144 }
145
146 static bool create_links(
147 struct dc *dc,
148 uint32_t num_virtual_links)
149 {
150 int i;
151 int connectors_num;
152 struct dc_bios *bios = dc->ctx->dc_bios;
153
154 dc->link_count = 0;
155
156 connectors_num = bios->funcs->get_connectors_number(bios);
157
158 if (connectors_num > ENUM_ID_COUNT) {
159 dm_error(
160 "DC: Number of connectors %d exceeds maximum of %d!\n",
161 connectors_num,
162 ENUM_ID_COUNT);
163 return false;
164 }
165
166 dm_output_to_console(
167 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
168 __func__,
169 connectors_num,
170 num_virtual_links);
171
172 for (i = 0; i < connectors_num; i++) {
173 struct link_init_data link_init_params = {0};
174 struct dc_link *link;
175
176 link_init_params.ctx = dc->ctx;
177 /* next BIOS object table connector */
178 link_init_params.connector_index = i;
179 link_init_params.link_index = dc->link_count;
180 link_init_params.dc = dc;
181 link = link_create(&link_init_params);
182
183 if (link) {
184 if (dc->config.edp_not_connected &&
185 link->connector_signal == SIGNAL_TYPE_EDP) {
186 link_destroy(&link);
187 } else {
188 dc->links[dc->link_count] = link;
189 link->dc = dc;
190 ++dc->link_count;
191 }
192 }
193 }
194
195 for (i = 0; i < num_virtual_links; i++) {
196 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
197 struct encoder_init_data enc_init = {0};
198
199 if (link == NULL) {
200 BREAK_TO_DEBUGGER();
201 goto failed_alloc;
202 }
203
204 link->link_index = dc->link_count;
205 dc->links[dc->link_count] = link;
206 dc->link_count++;
207
208 link->ctx = dc->ctx;
209 link->dc = dc;
210 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
211 link->link_id.type = OBJECT_TYPE_CONNECTOR;
212 link->link_id.id = CONNECTOR_ID_VIRTUAL;
213 link->link_id.enum_id = ENUM_ID_1;
214 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
215
216 if (!link->link_enc) {
217 BREAK_TO_DEBUGGER();
218 goto failed_alloc;
219 }
220
221 link->link_status.dpcd_caps = &link->dpcd_caps;
222
223 enc_init.ctx = dc->ctx;
224 enc_init.channel = CHANNEL_ID_UNKNOWN;
225 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
226 enc_init.transmitter = TRANSMITTER_UNKNOWN;
227 enc_init.connector = link->link_id;
228 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
229 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
230 enc_init.encoder.enum_id = ENUM_ID_1;
231 virtual_link_encoder_construct(link->link_enc, &enc_init);
232 }
233
234 return true;
235
236 failed_alloc:
237 return false;
238 }
239
240 static struct dc_perf_trace *dc_perf_trace_create(void)
241 {
242 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
243 }
244
245 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
246 {
247 kfree(*perf_trace);
248 *perf_trace = NULL;
249 }
250
251 /**
252 *****************************************************************************
253 * Function: dc_stream_adjust_vmin_vmax
254 *
255 * @brief
256 * Looks up the pipe context of dc_stream_state and updates the
257 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
258 * Rate, which is a power-saving feature that targets reducing panel
259 * refresh rate while the screen is static
260 *
261 * @param [in] dc: dc reference
262 * @param [in] stream: Initial dc stream state
263 * @param [in] adjust: Updated parameters for vertical_total_min and
264 * vertical_total_max
265 *****************************************************************************
266 */
267 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
268 struct dc_stream_state *stream,
269 struct dc_crtc_timing_adjust *adjust)
270 {
271 int i = 0;
272 bool ret = false;
273
274 for (i = 0; i < MAX_PIPES; i++) {
275 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
276
277 if (pipe->stream == stream && pipe->stream_res.tg) {
278 pipe->stream->adjust = *adjust;
279 dc->hwss.set_drr(&pipe,
280 1,
281 adjust->v_total_min,
282 adjust->v_total_max);
283
284 ret = true;
285 }
286 }
287 return ret;
288 }
289
290 bool dc_stream_get_crtc_position(struct dc *dc,
291 struct dc_stream_state **streams, int num_streams,
292 unsigned int *v_pos, unsigned int *nom_v_pos)
293 {
294 /* TODO: Support multiple streams */
295 const struct dc_stream_state *stream = streams[0];
296 int i = 0;
297 bool ret = false;
298 struct crtc_position position;
299
300 for (i = 0; i < MAX_PIPES; i++) {
301 struct pipe_ctx *pipe =
302 &dc->current_state->res_ctx.pipe_ctx[i];
303
304 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
305 dc->hwss.get_position(&pipe, 1, &position);
306
307 *v_pos = position.vertical_count;
308 *nom_v_pos = position.nominal_vcount;
309 ret = true;
310 }
311 }
312 return ret;
313 }
314
315 /**
316 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
317 * @dc: DC Object
318 * @stream: The stream to configure CRC on.
319 * @enable: Enable CRC if true, disable otherwise.
320 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
321 * once.
322 *
323 * By default, only CRC0 is configured, and the entire frame is used to
324 * calculate the crc.
325 */
326 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
327 bool enable, bool continuous)
328 {
329 int i;
330 struct pipe_ctx *pipe;
331 struct crc_params param;
332 struct timing_generator *tg;
333
334 for (i = 0; i < MAX_PIPES; i++) {
335 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
336 if (pipe->stream == stream)
337 break;
338 }
339 /* Stream not found */
340 if (i == MAX_PIPES)
341 return false;
342
343 /* Always capture the full frame */
344 param.windowa_x_start = 0;
345 param.windowa_y_start = 0;
346 param.windowa_x_end = pipe->stream->timing.h_addressable;
347 param.windowa_y_end = pipe->stream->timing.v_addressable;
348 param.windowb_x_start = 0;
349 param.windowb_y_start = 0;
350 param.windowb_x_end = pipe->stream->timing.h_addressable;
351 param.windowb_y_end = pipe->stream->timing.v_addressable;
352
353 /* Default to the union of both windows */
354 param.selection = UNION_WINDOW_A_B;
355 param.continuous_mode = continuous;
356 param.enable = enable;
357
358 tg = pipe->stream_res.tg;
359
360 /* Only call if supported */
361 if (tg->funcs->configure_crc)
362 return tg->funcs->configure_crc(tg, &param);
363 DC_LOG_WARNING("CRC capture not supported.");
364 return false;
365 }
366
367 /**
368 * dc_stream_get_crc() - Get CRC values for the given stream.
369 * @dc: DC object
370 * @stream: The DC stream state of the stream to get CRCs from.
371 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
372 *
373 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
374 * Return false if stream is not found, or if CRCs are not enabled.
375 */
376 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
377 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
378 {
379 int i;
380 struct pipe_ctx *pipe;
381 struct timing_generator *tg;
382
383 for (i = 0; i < MAX_PIPES; i++) {
384 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
385 if (pipe->stream == stream)
386 break;
387 }
388 /* Stream not found */
389 if (i == MAX_PIPES)
390 return false;
391
392 tg = pipe->stream_res.tg;
393
394 if (tg->funcs->get_crc)
395 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
396 DC_LOG_WARNING("CRC capture not supported.");
397 return false;
398 }
399
400 void dc_stream_set_dither_option(struct dc_stream_state *stream,
401 enum dc_dither_option option)
402 {
403 struct bit_depth_reduction_params params;
404 struct dc_link *link = stream->link;
405 struct pipe_ctx *pipes = NULL;
406 int i;
407
408 for (i = 0; i < MAX_PIPES; i++) {
409 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
410 stream) {
411 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
412 break;
413 }
414 }
415
416 if (!pipes)
417 return;
418 if (option > DITHER_OPTION_MAX)
419 return;
420
421 stream->dither_option = option;
422
423 memset(&params, 0, sizeof(params));
424 resource_build_bit_depth_reduction_params(stream, &params);
425 stream->bit_depth_params = params;
426
427 if (pipes->plane_res.xfm &&
428 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
429 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
430 pipes->plane_res.xfm,
431 pipes->plane_res.scl_data.lb_params.depth,
432 &stream->bit_depth_params);
433 }
434
435 pipes->stream_res.opp->funcs->
436 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
437 }
438
439 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
440 {
441 int i = 0;
442 bool ret = false;
443 struct pipe_ctx *pipes;
444
445 for (i = 0; i < MAX_PIPES; i++) {
446 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
447 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
448 dc->hwss.program_gamut_remap(pipes);
449 ret = true;
450 }
451 }
452
453 return ret;
454 }
455
456 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
457 {
458 int i = 0;
459 bool ret = false;
460 struct pipe_ctx *pipes;
461
462 for (i = 0; i < MAX_PIPES; i++) {
463 if (dc->current_state->res_ctx.pipe_ctx[i].stream
464 == stream) {
465
466 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
467 dc->hwss.program_output_csc(dc,
468 pipes,
469 stream->output_color_space,
470 stream->csc_color_matrix.matrix,
471 pipes->stream_res.opp->inst);
472 ret = true;
473 }
474 }
475
476 return ret;
477 }
478
479 void dc_stream_set_static_screen_events(struct dc *dc,
480 struct dc_stream_state **streams,
481 int num_streams,
482 const struct dc_static_screen_events *events)
483 {
484 int i = 0;
485 int j = 0;
486 struct pipe_ctx *pipes_affected[MAX_PIPES];
487 int num_pipes_affected = 0;
488
489 for (i = 0; i < num_streams; i++) {
490 struct dc_stream_state *stream = streams[i];
491
492 for (j = 0; j < MAX_PIPES; j++) {
493 if (dc->current_state->res_ctx.pipe_ctx[j].stream
494 == stream) {
495 pipes_affected[num_pipes_affected++] =
496 &dc->current_state->res_ctx.pipe_ctx[j];
497 }
498 }
499 }
500
501 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
502 }
503
504 static void destruct(struct dc *dc)
505 {
506 if (dc->current_state) {
507 dc_release_state(dc->current_state);
508 dc->current_state = NULL;
509 }
510
511 destroy_links(dc);
512
513 if (dc->clk_mgr) {
514 dc_destroy_clk_mgr(dc->clk_mgr);
515 dc->clk_mgr = NULL;
516 }
517
518 dc_destroy_resource_pool(dc);
519
520 if (dc->ctx->gpio_service)
521 dal_gpio_service_destroy(&dc->ctx->gpio_service);
522
523 if (dc->ctx->created_bios)
524 dal_bios_parser_destroy(&dc->ctx->dc_bios);
525
526 dc_perf_trace_destroy(&dc->ctx->perf_trace);
527
528 kfree(dc->ctx);
529 dc->ctx = NULL;
530
531 kfree(dc->bw_vbios);
532 dc->bw_vbios = NULL;
533
534 kfree(dc->bw_dceip);
535 dc->bw_dceip = NULL;
536
537 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
538 kfree(dc->dcn_soc);
539 dc->dcn_soc = NULL;
540
541 kfree(dc->dcn_ip);
542 dc->dcn_ip = NULL;
543
544 #endif
545 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
546 kfree(dc->vm_helper);
547 dc->vm_helper = NULL;
548
549 #endif
550 }
551
552 static bool construct(struct dc *dc,
553 const struct dc_init_data *init_params)
554 {
555 struct dc_context *dc_ctx;
556 struct bw_calcs_dceip *dc_dceip;
557 struct bw_calcs_vbios *dc_vbios;
558 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
559 struct dcn_soc_bounding_box *dcn_soc;
560 struct dcn_ip_params *dcn_ip;
561 #endif
562
563 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
564 dc->config = init_params->flags;
565
566 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
567 // Allocate memory for the vm_helper
568 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
569
570 #endif
571 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
572
573 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
574 if (!dc_dceip) {
575 dm_error("%s: failed to create dceip\n", __func__);
576 goto fail;
577 }
578
579 dc->bw_dceip = dc_dceip;
580
581 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
582 if (!dc_vbios) {
583 dm_error("%s: failed to create vbios\n", __func__);
584 goto fail;
585 }
586
587 dc->bw_vbios = dc_vbios;
588 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
589 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
590 if (!dcn_soc) {
591 dm_error("%s: failed to create dcn_soc\n", __func__);
592 goto fail;
593 }
594
595 dc->dcn_soc = dcn_soc;
596
597 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
598 if (!dcn_ip) {
599 dm_error("%s: failed to create dcn_ip\n", __func__);
600 goto fail;
601 }
602
603 dc->dcn_ip = dcn_ip;
604 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
605 dc->soc_bounding_box = init_params->soc_bounding_box;
606 #endif
607 #endif
608
609 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
610 if (!dc_ctx) {
611 dm_error("%s: failed to create ctx\n", __func__);
612 goto fail;
613 }
614
615 dc_ctx->cgs_device = init_params->cgs_device;
616 dc_ctx->driver_context = init_params->driver;
617 dc_ctx->dc = dc;
618 dc_ctx->asic_id = init_params->asic_id;
619 dc_ctx->dc_sink_id_count = 0;
620 dc_ctx->dc_stream_id_count = 0;
621 dc->ctx = dc_ctx;
622
623 /* Create logger */
624
625 dc_ctx->dce_environment = init_params->dce_environment;
626
627 dc_version = resource_parse_asic_id(init_params->asic_id);
628 dc_ctx->dce_version = dc_version;
629
630 /* Resource should construct all asic specific resources.
631 * This should be the only place where we need to parse the asic id
632 */
633 if (init_params->vbios_override)
634 dc_ctx->dc_bios = init_params->vbios_override;
635 else {
636 /* Create BIOS parser */
637 struct bp_init_data bp_init_data;
638
639 bp_init_data.ctx = dc_ctx;
640 bp_init_data.bios = init_params->asic_id.atombios_base_address;
641
642 dc_ctx->dc_bios = dal_bios_parser_create(
643 &bp_init_data, dc_version);
644
645 if (!dc_ctx->dc_bios) {
646 ASSERT_CRITICAL(false);
647 goto fail;
648 }
649
650 dc_ctx->created_bios = true;
651 }
652
653 dc_ctx->perf_trace = dc_perf_trace_create();
654 if (!dc_ctx->perf_trace) {
655 ASSERT_CRITICAL(false);
656 goto fail;
657 }
658
659 /* Create GPIO service */
660 dc_ctx->gpio_service = dal_gpio_service_create(
661 dc_version,
662 dc_ctx->dce_environment,
663 dc_ctx);
664
665 if (!dc_ctx->gpio_service) {
666 ASSERT_CRITICAL(false);
667 goto fail;
668 }
669
670 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
671 if (!dc->res_pool)
672 goto fail;
673
674 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
675 if (!dc->clk_mgr)
676 goto fail;
677
678 /* Creation of current_state must occur after dc->dml
679 * is initialized in dc_create_resource_pool because
680 * on creation it copies the contents of dc->dml
681 */
682
683 dc->current_state = dc_create_state(dc);
684
685 if (!dc->current_state) {
686 dm_error("%s: failed to create validate ctx\n", __func__);
687 goto fail;
688 }
689
690 dc_resource_state_construct(dc, dc->current_state);
691
692 if (!create_links(dc, init_params->num_virtual_links))
693 goto fail;
694
695 return true;
696
697 fail:
698
699 destruct(dc);
700 return false;
701 }
702
703 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
704 static bool disable_all_writeback_pipes_for_stream(
705 const struct dc *dc,
706 struct dc_stream_state *stream,
707 struct dc_state *context)
708 {
709 int i;
710
711 for (i = 0; i < stream->num_wb_info; i++)
712 stream->writeback_info[i].wb_enabled = false;
713
714 return true;
715 }
716 #endif
717
718 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
719 {
720 int i, j;
721 struct dc_state *dangling_context = dc_create_state(dc);
722 struct dc_state *current_ctx;
723
724 if (dangling_context == NULL)
725 return;
726
727 dc_resource_state_copy_construct(dc->current_state, dangling_context);
728
729 for (i = 0; i < dc->res_pool->pipe_count; i++) {
730 struct dc_stream_state *old_stream =
731 dc->current_state->res_ctx.pipe_ctx[i].stream;
732 bool should_disable = true;
733
734 for (j = 0; j < context->stream_count; j++) {
735 if (old_stream == context->streams[j]) {
736 should_disable = false;
737 break;
738 }
739 }
740 if (should_disable && old_stream) {
741 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
742 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
743 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
744 #endif
745 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
746 }
747 }
748
749 current_ctx = dc->current_state;
750 dc->current_state = dangling_context;
751 dc_release_state(current_ctx);
752 }
753
754 /*******************************************************************************
755 * Public functions
756 ******************************************************************************/
757
758 struct dc *dc_create(const struct dc_init_data *init_params)
759 {
760 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
761 unsigned int full_pipe_count;
762
763 if (NULL == dc)
764 goto alloc_fail;
765
766 if (false == construct(dc, init_params))
767 goto construct_fail;
768
769 /*TODO: separate HW and SW initialization*/
770 dc->hwss.init_hw(dc);
771
772 full_pipe_count = dc->res_pool->pipe_count;
773 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
774 full_pipe_count--;
775 dc->caps.max_streams = min(
776 full_pipe_count,
777 dc->res_pool->stream_enc_count);
778
779 dc->caps.max_links = dc->link_count;
780 dc->caps.max_audios = dc->res_pool->audio_count;
781 dc->caps.linear_pitch_alignment = 64;
782
783 /* Populate versioning information */
784 dc->versions.dc_ver = DC_VER;
785
786 if (dc->res_pool->dmcu != NULL)
787 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
788
789 dc->build_id = DC_BUILD_ID;
790
791 DC_LOG_DC("Display Core initialized\n");
792
793
794
795 return dc;
796
797 construct_fail:
798 kfree(dc);
799
800 alloc_fail:
801 return NULL;
802 }
803
804 void dc_init_callbacks(struct dc *dc,
805 const struct dc_callback_init *init_params)
806 {
807 }
808
809 void dc_destroy(struct dc **dc)
810 {
811 destruct(*dc);
812 kfree(*dc);
813 *dc = NULL;
814 }
815
816 static void enable_timing_multisync(
817 struct dc *dc,
818 struct dc_state *ctx)
819 {
820 int i = 0, multisync_count = 0;
821 int pipe_count = dc->res_pool->pipe_count;
822 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
823
824 for (i = 0; i < pipe_count; i++) {
825 if (!ctx->res_ctx.pipe_ctx[i].stream ||
826 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
827 continue;
828 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
829 continue;
830 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
831 multisync_count++;
832 }
833
834 if (multisync_count > 0) {
835 dc->hwss.enable_per_frame_crtc_position_reset(
836 dc, multisync_count, multisync_pipes);
837 }
838 }
839
840 static void program_timing_sync(
841 struct dc *dc,
842 struct dc_state *ctx)
843 {
844 int i, j, k;
845 int group_index = 0;
846 int num_group = 0;
847 int pipe_count = dc->res_pool->pipe_count;
848 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
849
850 for (i = 0; i < pipe_count; i++) {
851 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
852 continue;
853
854 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
855 }
856
857 for (i = 0; i < pipe_count; i++) {
858 int group_size = 1;
859 struct pipe_ctx *pipe_set[MAX_PIPES];
860
861 if (!unsynced_pipes[i])
862 continue;
863
864 pipe_set[0] = unsynced_pipes[i];
865 unsynced_pipes[i] = NULL;
866
867 /* Add tg to the set, search rest of the tg's for ones with
868 * same timing, add all tgs with same timing to the group
869 */
870 for (j = i + 1; j < pipe_count; j++) {
871 if (!unsynced_pipes[j])
872 continue;
873
874 if (resource_are_streams_timing_synchronizable(
875 unsynced_pipes[j]->stream,
876 pipe_set[0]->stream)) {
877 pipe_set[group_size] = unsynced_pipes[j];
878 unsynced_pipes[j] = NULL;
879 group_size++;
880 }
881 }
882
883 /* set first pipe with plane as master */
884 for (j = 0; j < group_size; j++) {
885 struct pipe_ctx *temp;
886
887 if (pipe_set[j]->plane_state) {
888 if (j == 0)
889 break;
890
891 temp = pipe_set[0];
892 pipe_set[0] = pipe_set[j];
893 pipe_set[j] = temp;
894 break;
895 }
896 }
897
898
899 for (k = 0; k < group_size; k++) {
900 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
901
902 status->timing_sync_info.group_id = num_group;
903 status->timing_sync_info.group_size = group_size;
904 if (k == 0)
905 status->timing_sync_info.master = true;
906 else
907 status->timing_sync_info.master = false;
908
909 }
910 /* remove any other pipes with plane as they have already been synced */
911 for (j = j + 1; j < group_size; j++) {
912 if (pipe_set[j]->plane_state) {
913 group_size--;
914 pipe_set[j] = pipe_set[group_size];
915 j--;
916 }
917 }
918
919 if (group_size > 1) {
920 dc->hwss.enable_timing_synchronization(
921 dc, group_index, group_size, pipe_set);
922 group_index++;
923 }
924 num_group++;
925 }
926 }
927
928 static bool context_changed(
929 struct dc *dc,
930 struct dc_state *context)
931 {
932 uint8_t i;
933
934 if (context->stream_count != dc->current_state->stream_count)
935 return true;
936
937 for (i = 0; i < dc->current_state->stream_count; i++) {
938 if (dc->current_state->streams[i] != context->streams[i])
939 return true;
940 }
941
942 return false;
943 }
944
945 bool dc_validate_seamless_boot_timing(const struct dc *dc,
946 const struct dc_sink *sink,
947 struct dc_crtc_timing *crtc_timing)
948 {
949 struct timing_generator *tg;
950 struct dc_link *link = sink->link;
951 unsigned int inst;
952
953 /* Check for enabled DIG to identify enabled display */
954 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
955 return false;
956
957 /* Check for which front end is used by this encoder.
958 * Note the inst is 1 indexed, where 0 is undefined.
959 * Note that DIG_FE can source from different OTG but our
960 * current implementation always map 1-to-1, so this code makes
961 * the same assumption and doesn't check OTG source.
962 */
963 inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
964
965 /* Instance should be within the range of the pool */
966 if (inst >= dc->res_pool->pipe_count)
967 return false;
968
969 tg = dc->res_pool->timing_generators[inst];
970
971 if (!tg->funcs->is_matching_timing)
972 return false;
973
974 if (!tg->funcs->is_matching_timing(tg, crtc_timing))
975 return false;
976
977 if (dc_is_dp_signal(link->connector_signal)) {
978 unsigned int pix_clk_100hz;
979
980 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
981 dc->res_pool->dp_clock_source,
982 inst, &pix_clk_100hz);
983
984 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
985 return false;
986 }
987
988 return true;
989 }
990
991 bool dc_enable_stereo(
992 struct dc *dc,
993 struct dc_state *context,
994 struct dc_stream_state *streams[],
995 uint8_t stream_count)
996 {
997 bool ret = true;
998 int i, j;
999 struct pipe_ctx *pipe;
1000
1001 for (i = 0; i < MAX_PIPES; i++) {
1002 if (context != NULL)
1003 pipe = &context->res_ctx.pipe_ctx[i];
1004 else
1005 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1006 for (j = 0 ; pipe && j < stream_count; j++) {
1007 if (streams[j] && streams[j] == pipe->stream &&
1008 dc->hwss.setup_stereo)
1009 dc->hwss.setup_stereo(pipe, dc);
1010 }
1011 }
1012
1013 return ret;
1014 }
1015
1016 /*
1017 * Applies given context to HW and copy it into current context.
1018 * It's up to the user to release the src context afterwards.
1019 */
1020 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1021 {
1022 struct dc_bios *dcb = dc->ctx->dc_bios;
1023 enum dc_status result = DC_ERROR_UNEXPECTED;
1024 struct pipe_ctx *pipe;
1025 int i, k, l;
1026 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1027
1028 disable_dangling_plane(dc, context);
1029
1030 for (i = 0; i < context->stream_count; i++)
1031 dc_streams[i] = context->streams[i];
1032
1033 if (!dcb->funcs->is_accelerated_mode(dcb))
1034 dc->hwss.enable_accelerated_mode(dc, context);
1035
1036 for (i = 0; i < context->stream_count; i++) {
1037 if (context->streams[i]->apply_seamless_boot_optimization)
1038 dc->optimize_seamless_boot = true;
1039 }
1040
1041 if (!dc->optimize_seamless_boot)
1042 dc->hwss.prepare_bandwidth(dc, context);
1043
1044 /* re-program planes for existing stream, in case we need to
1045 * free up plane resource for later use
1046 */
1047 for (i = 0; i < context->stream_count; i++) {
1048 if (context->streams[i]->mode_changed)
1049 continue;
1050
1051 dc->hwss.apply_ctx_for_surface(
1052 dc, context->streams[i],
1053 context->stream_status[i].plane_count,
1054 context); /* use new pipe config in new context */
1055 }
1056
1057 /* Program hardware */
1058 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1059 pipe = &context->res_ctx.pipe_ctx[i];
1060 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1061 }
1062
1063 result = dc->hwss.apply_ctx_to_hw(dc, context);
1064
1065 if (result != DC_OK)
1066 return result;
1067
1068 if (context->stream_count > 1) {
1069 enable_timing_multisync(dc, context);
1070 program_timing_sync(dc, context);
1071 }
1072
1073 /* Program all planes within new context*/
1074 for (i = 0; i < context->stream_count; i++) {
1075 const struct dc_link *link = context->streams[i]->link;
1076
1077 if (!context->streams[i]->mode_changed)
1078 continue;
1079
1080 dc->hwss.apply_ctx_for_surface(
1081 dc, context->streams[i],
1082 context->stream_status[i].plane_count,
1083 context);
1084
1085 /*
1086 * enable stereo
1087 * TODO rework dc_enable_stereo call to work with validation sets?
1088 */
1089 for (k = 0; k < MAX_PIPES; k++) {
1090 pipe = &context->res_ctx.pipe_ctx[k];
1091
1092 for (l = 0 ; pipe && l < context->stream_count; l++) {
1093 if (context->streams[l] &&
1094 context->streams[l] == pipe->stream &&
1095 dc->hwss.setup_stereo)
1096 dc->hwss.setup_stereo(pipe, dc);
1097 }
1098 }
1099
1100 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1101 context->streams[i]->timing.h_addressable,
1102 context->streams[i]->timing.v_addressable,
1103 context->streams[i]->timing.h_total,
1104 context->streams[i]->timing.v_total,
1105 context->streams[i]->timing.pix_clk_100hz / 10);
1106 }
1107
1108 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1109
1110 if (!dc->optimize_seamless_boot)
1111 /* pplib is notified if disp_num changed */
1112 dc->hwss.optimize_bandwidth(dc, context);
1113
1114 for (i = 0; i < context->stream_count; i++)
1115 context->streams[i]->mode_changed = false;
1116
1117 memset(&context->commit_hints, 0, sizeof(context->commit_hints));
1118
1119 dc_release_state(dc->current_state);
1120
1121 dc->current_state = context;
1122
1123 dc_retain_state(dc->current_state);
1124
1125 return result;
1126 }
1127
1128 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1129 {
1130 enum dc_status result = DC_ERROR_UNEXPECTED;
1131 int i;
1132
1133 if (false == context_changed(dc, context))
1134 return DC_OK;
1135
1136 DC_LOG_DC("%s: %d streams\n",
1137 __func__, context->stream_count);
1138
1139 for (i = 0; i < context->stream_count; i++) {
1140 struct dc_stream_state *stream = context->streams[i];
1141
1142 dc_stream_log(dc, stream);
1143 }
1144
1145 result = dc_commit_state_no_check(dc, context);
1146
1147 return (result == DC_OK);
1148 }
1149
1150 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1151 {
1152 int i;
1153 struct dc_state *context = dc->current_state;
1154
1155 if (!dc->optimized_required || dc->optimize_seamless_boot)
1156 return true;
1157
1158 post_surface_trace(dc);
1159
1160 for (i = 0; i < dc->res_pool->pipe_count; i++)
1161 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1162 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1163 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1164 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1165 }
1166
1167 dc->optimized_required = false;
1168
1169 dc->hwss.optimize_bandwidth(dc, context);
1170 return true;
1171 }
1172
1173 struct dc_state *dc_create_state(struct dc *dc)
1174 {
1175 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1176 GFP_KERNEL);
1177
1178 if (!context)
1179 return NULL;
1180 /* Each context must have their own instance of VBA and in order to
1181 * initialize and obtain IP and SOC the base DML instance from DC is
1182 * initially copied into every context
1183 */
1184 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
1185 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1186 #endif
1187
1188 kref_init(&context->refcount);
1189
1190 return context;
1191 }
1192
1193 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1194 {
1195 int i, j;
1196 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1197
1198 if (!new_ctx)
1199 return NULL;
1200 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1201
1202 for (i = 0; i < MAX_PIPES; i++) {
1203 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1204
1205 if (cur_pipe->top_pipe)
1206 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1207
1208 if (cur_pipe->bottom_pipe)
1209 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1210
1211 }
1212
1213 for (i = 0; i < new_ctx->stream_count; i++) {
1214 dc_stream_retain(new_ctx->streams[i]);
1215 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1216 dc_plane_state_retain(
1217 new_ctx->stream_status[i].plane_states[j]);
1218 }
1219
1220 kref_init(&new_ctx->refcount);
1221
1222 return new_ctx;
1223 }
1224
1225 void dc_retain_state(struct dc_state *context)
1226 {
1227 kref_get(&context->refcount);
1228 }
1229
1230 static void dc_state_free(struct kref *kref)
1231 {
1232 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1233 dc_resource_state_destruct(context);
1234 kvfree(context);
1235 }
1236
1237 void dc_release_state(struct dc_state *context)
1238 {
1239 kref_put(&context->refcount, dc_state_free);
1240 }
1241
1242 static bool is_surface_in_context(
1243 const struct dc_state *context,
1244 const struct dc_plane_state *plane_state)
1245 {
1246 int j;
1247
1248 for (j = 0; j < MAX_PIPES; j++) {
1249 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1250
1251 if (plane_state == pipe_ctx->plane_state) {
1252 return true;
1253 }
1254 }
1255
1256 return false;
1257 }
1258
1259 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1260 {
1261 union surface_update_flags *update_flags = &u->surface->update_flags;
1262 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1263
1264 if (!u->plane_info)
1265 return UPDATE_TYPE_FAST;
1266
1267 if (u->plane_info->color_space != u->surface->color_space) {
1268 update_flags->bits.color_space_change = 1;
1269 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1270 }
1271
1272 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1273 update_flags->bits.horizontal_mirror_change = 1;
1274 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1275 }
1276
1277 if (u->plane_info->rotation != u->surface->rotation) {
1278 update_flags->bits.rotation_change = 1;
1279 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1280 }
1281
1282 if (u->plane_info->format != u->surface->format) {
1283 update_flags->bits.pixel_format_change = 1;
1284 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1285 }
1286
1287 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1288 update_flags->bits.stereo_format_change = 1;
1289 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1290 }
1291
1292 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1293 update_flags->bits.per_pixel_alpha_change = 1;
1294 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1295 }
1296
1297 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1298 update_flags->bits.global_alpha_change = 1;
1299 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1300 }
1301
1302 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
1303 update_flags->bits.sdr_white_level = 1;
1304 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1305 }
1306
1307 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1308 || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1309 || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) {
1310 update_flags->bits.dcc_change = 1;
1311 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1312 }
1313
1314 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1315 resource_pixel_format_to_bpp(u->surface->format)) {
1316 /* different bytes per element will require full bandwidth
1317 * and DML calculation
1318 */
1319 update_flags->bits.bpp_change = 1;
1320 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1321 }
1322
1323 if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch
1324 || u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch
1325 || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) {
1326 update_flags->bits.plane_size_change = 1;
1327 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1328 }
1329
1330
1331 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1332 sizeof(union dc_tiling_info)) != 0) {
1333 update_flags->bits.swizzle_change = 1;
1334 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1335
1336 /* todo: below are HW dependent, we should add a hook to
1337 * DCE/N resource and validated there.
1338 */
1339 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1340 /* swizzled mode requires RQ to be setup properly,
1341 * thus need to run DML to calculate RQ settings
1342 */
1343 update_flags->bits.bandwidth_change = 1;
1344 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1345 }
1346 }
1347
1348 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1349 return update_type;
1350 }
1351
1352 static enum surface_update_type get_scaling_info_update_type(
1353 const struct dc_surface_update *u)
1354 {
1355 union surface_update_flags *update_flags = &u->surface->update_flags;
1356
1357 if (!u->scaling_info)
1358 return UPDATE_TYPE_FAST;
1359
1360 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1361 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1362 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1363 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1364 update_flags->bits.scaling_change = 1;
1365
1366 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1367 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1368 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1369 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1370 /* Making dst rect smaller requires a bandwidth change */
1371 update_flags->bits.bandwidth_change = 1;
1372 }
1373
1374 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1375 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1376
1377 update_flags->bits.scaling_change = 1;
1378 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1379 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1380 /* Making src rect bigger requires a bandwidth change */
1381 update_flags->bits.clock_change = 1;
1382 }
1383
1384 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1385 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1386 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1387 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1388 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1389 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1390 update_flags->bits.position_change = 1;
1391
1392 if (update_flags->bits.clock_change
1393 || update_flags->bits.bandwidth_change)
1394 return UPDATE_TYPE_FULL;
1395
1396 if (update_flags->bits.scaling_change
1397 || update_flags->bits.position_change)
1398 return UPDATE_TYPE_MED;
1399
1400 return UPDATE_TYPE_FAST;
1401 }
1402
1403 static enum surface_update_type det_surface_update(const struct dc *dc,
1404 const struct dc_surface_update *u)
1405 {
1406 const struct dc_state *context = dc->current_state;
1407 enum surface_update_type type;
1408 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1409 union surface_update_flags *update_flags = &u->surface->update_flags;
1410
1411 update_flags->raw = 0; // Reset all flags
1412
1413 if (u->flip_addr)
1414 update_flags->bits.addr_update = 1;
1415
1416 if (!is_surface_in_context(context, u->surface)) {
1417 update_flags->bits.new_plane = 1;
1418 return UPDATE_TYPE_FULL;
1419 }
1420
1421 if (u->surface->force_full_update) {
1422 update_flags->bits.full_update = 1;
1423 return UPDATE_TYPE_FULL;
1424 }
1425
1426 type = get_plane_info_update_type(u);
1427 elevate_update_type(&overall_type, type);
1428
1429 type = get_scaling_info_update_type(u);
1430 elevate_update_type(&overall_type, type);
1431
1432 if (u->flip_addr)
1433 update_flags->bits.addr_update = 1;
1434
1435 if (u->in_transfer_func)
1436 update_flags->bits.in_transfer_func_change = 1;
1437
1438 if (u->input_csc_color_matrix)
1439 update_flags->bits.input_csc_change = 1;
1440
1441 if (u->coeff_reduction_factor)
1442 update_flags->bits.coeff_reduction_change = 1;
1443
1444 if (u->gamma) {
1445 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1446
1447 if (u->plane_info)
1448 format = u->plane_info->format;
1449 else if (u->surface)
1450 format = u->surface->format;
1451
1452 if (dce_use_lut(format))
1453 update_flags->bits.gamma_change = 1;
1454 }
1455
1456 if (update_flags->bits.in_transfer_func_change) {
1457 type = UPDATE_TYPE_MED;
1458 elevate_update_type(&overall_type, type);
1459 }
1460
1461 if (update_flags->bits.input_csc_change
1462 || update_flags->bits.coeff_reduction_change
1463 || update_flags->bits.gamma_change) {
1464 type = UPDATE_TYPE_FULL;
1465 elevate_update_type(&overall_type, type);
1466 }
1467
1468 return overall_type;
1469 }
1470
1471 static enum surface_update_type check_update_surfaces_for_stream(
1472 struct dc *dc,
1473 struct dc_surface_update *updates,
1474 int surface_count,
1475 struct dc_stream_update *stream_update,
1476 const struct dc_stream_status *stream_status)
1477 {
1478 int i;
1479 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1480
1481 if (stream_status == NULL || stream_status->plane_count != surface_count)
1482 return UPDATE_TYPE_FULL;
1483
1484 /* some stream updates require passive update */
1485 if (stream_update) {
1486 if ((stream_update->src.height != 0) &&
1487 (stream_update->src.width != 0))
1488 return UPDATE_TYPE_FULL;
1489
1490 if ((stream_update->dst.height != 0) &&
1491 (stream_update->dst.width != 0))
1492 return UPDATE_TYPE_FULL;
1493
1494 if (stream_update->out_transfer_func)
1495 return UPDATE_TYPE_FULL;
1496
1497 if (stream_update->abm_level)
1498 return UPDATE_TYPE_FULL;
1499
1500 if (stream_update->dpms_off)
1501 return UPDATE_TYPE_FULL;
1502
1503 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1504 if (stream_update->wb_update)
1505 return UPDATE_TYPE_FULL;
1506 #endif
1507 }
1508
1509 for (i = 0 ; i < surface_count; i++) {
1510 enum surface_update_type type =
1511 det_surface_update(dc, &updates[i]);
1512
1513 if (type == UPDATE_TYPE_FULL)
1514 return type;
1515
1516 elevate_update_type(&overall_type, type);
1517 }
1518
1519 return overall_type;
1520 }
1521
1522 /**
1523 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1524 *
1525 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1526 */
1527 enum surface_update_type dc_check_update_surfaces_for_stream(
1528 struct dc *dc,
1529 struct dc_surface_update *updates,
1530 int surface_count,
1531 struct dc_stream_update *stream_update,
1532 const struct dc_stream_status *stream_status)
1533 {
1534 int i;
1535 enum surface_update_type type;
1536
1537 for (i = 0; i < surface_count; i++)
1538 updates[i].surface->update_flags.raw = 0;
1539
1540 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1541 if (type == UPDATE_TYPE_FULL)
1542 for (i = 0; i < surface_count; i++)
1543 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1544
1545 return type;
1546 }
1547
1548 static struct dc_stream_status *stream_get_status(
1549 struct dc_state *ctx,
1550 struct dc_stream_state *stream)
1551 {
1552 uint8_t i;
1553
1554 for (i = 0; i < ctx->stream_count; i++) {
1555 if (stream == ctx->streams[i]) {
1556 return &ctx->stream_status[i];
1557 }
1558 }
1559
1560 return NULL;
1561 }
1562
1563 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1564
1565 static void copy_surface_update_to_plane(
1566 struct dc_plane_state *surface,
1567 struct dc_surface_update *srf_update)
1568 {
1569 if (srf_update->flip_addr) {
1570 surface->address = srf_update->flip_addr->address;
1571 surface->flip_immediate =
1572 srf_update->flip_addr->flip_immediate;
1573 surface->time.time_elapsed_in_us[surface->time.index] =
1574 srf_update->flip_addr->flip_timestamp_in_us -
1575 surface->time.prev_update_time_in_us;
1576 surface->time.prev_update_time_in_us =
1577 srf_update->flip_addr->flip_timestamp_in_us;
1578 surface->time.index++;
1579 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1580 surface->time.index = 0;
1581 }
1582
1583 if (srf_update->scaling_info) {
1584 surface->scaling_quality =
1585 srf_update->scaling_info->scaling_quality;
1586 surface->dst_rect =
1587 srf_update->scaling_info->dst_rect;
1588 surface->src_rect =
1589 srf_update->scaling_info->src_rect;
1590 surface->clip_rect =
1591 srf_update->scaling_info->clip_rect;
1592 }
1593
1594 if (srf_update->plane_info) {
1595 surface->color_space =
1596 srf_update->plane_info->color_space;
1597 surface->format =
1598 srf_update->plane_info->format;
1599 surface->plane_size =
1600 srf_update->plane_info->plane_size;
1601 surface->rotation =
1602 srf_update->plane_info->rotation;
1603 surface->horizontal_mirror =
1604 srf_update->plane_info->horizontal_mirror;
1605 surface->stereo_format =
1606 srf_update->plane_info->stereo_format;
1607 surface->tiling_info =
1608 srf_update->plane_info->tiling_info;
1609 surface->visible =
1610 srf_update->plane_info->visible;
1611 surface->per_pixel_alpha =
1612 srf_update->plane_info->per_pixel_alpha;
1613 surface->global_alpha =
1614 srf_update->plane_info->global_alpha;
1615 surface->global_alpha_value =
1616 srf_update->plane_info->global_alpha_value;
1617 surface->dcc =
1618 srf_update->plane_info->dcc;
1619 surface->sdr_white_level =
1620 srf_update->plane_info->sdr_white_level;
1621 }
1622
1623 if (srf_update->gamma &&
1624 (surface->gamma_correction !=
1625 srf_update->gamma)) {
1626 memcpy(&surface->gamma_correction->entries,
1627 &srf_update->gamma->entries,
1628 sizeof(struct dc_gamma_entries));
1629 surface->gamma_correction->is_identity =
1630 srf_update->gamma->is_identity;
1631 surface->gamma_correction->num_entries =
1632 srf_update->gamma->num_entries;
1633 surface->gamma_correction->type =
1634 srf_update->gamma->type;
1635 }
1636
1637 if (srf_update->in_transfer_func &&
1638 (surface->in_transfer_func !=
1639 srf_update->in_transfer_func)) {
1640 surface->in_transfer_func->sdr_ref_white_level =
1641 srf_update->in_transfer_func->sdr_ref_white_level;
1642 surface->in_transfer_func->tf =
1643 srf_update->in_transfer_func->tf;
1644 surface->in_transfer_func->type =
1645 srf_update->in_transfer_func->type;
1646 memcpy(&surface->in_transfer_func->tf_pts,
1647 &srf_update->in_transfer_func->tf_pts,
1648 sizeof(struct dc_transfer_func_distributed_points));
1649 }
1650
1651 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1652 if (srf_update->func_shaper &&
1653 (surface->in_shaper_func !=
1654 srf_update->func_shaper))
1655 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1656 sizeof(*surface->in_shaper_func));
1657
1658 if (srf_update->lut3d_func &&
1659 (surface->lut3d_func !=
1660 srf_update->lut3d_func))
1661 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1662 sizeof(*surface->lut3d_func));
1663
1664 if (srf_update->blend_tf &&
1665 (surface->blend_tf !=
1666 srf_update->blend_tf))
1667 memcpy(surface->blend_tf, srf_update->blend_tf,
1668 sizeof(*surface->blend_tf));
1669
1670 #endif
1671 if (srf_update->input_csc_color_matrix)
1672 surface->input_csc_color_matrix =
1673 *srf_update->input_csc_color_matrix;
1674
1675 if (srf_update->coeff_reduction_factor)
1676 surface->coeff_reduction_factor =
1677 *srf_update->coeff_reduction_factor;
1678 }
1679
1680 static void copy_stream_update_to_stream(struct dc *dc,
1681 struct dc_state *context,
1682 struct dc_stream_state *stream,
1683 const struct dc_stream_update *update)
1684 {
1685 if (update == NULL || stream == NULL)
1686 return;
1687
1688 if (update->src.height && update->src.width)
1689 stream->src = update->src;
1690
1691 if (update->dst.height && update->dst.width)
1692 stream->dst = update->dst;
1693
1694 if (update->out_transfer_func &&
1695 stream->out_transfer_func != update->out_transfer_func) {
1696 stream->out_transfer_func->sdr_ref_white_level =
1697 update->out_transfer_func->sdr_ref_white_level;
1698 stream->out_transfer_func->tf = update->out_transfer_func->tf;
1699 stream->out_transfer_func->type =
1700 update->out_transfer_func->type;
1701 memcpy(&stream->out_transfer_func->tf_pts,
1702 &update->out_transfer_func->tf_pts,
1703 sizeof(struct dc_transfer_func_distributed_points));
1704 }
1705
1706 if (update->hdr_static_metadata)
1707 stream->hdr_static_metadata = *update->hdr_static_metadata;
1708
1709 if (update->abm_level)
1710 stream->abm_level = *update->abm_level;
1711
1712 if (update->periodic_interrupt0)
1713 stream->periodic_interrupt0 = *update->periodic_interrupt0;
1714
1715 if (update->periodic_interrupt1)
1716 stream->periodic_interrupt1 = *update->periodic_interrupt1;
1717
1718 if (update->gamut_remap)
1719 stream->gamut_remap_matrix = *update->gamut_remap;
1720
1721 /* Note: this being updated after mode set is currently not a use case
1722 * however if it arises OCSC would need to be reprogrammed at the
1723 * minimum
1724 */
1725 if (update->output_color_space)
1726 stream->output_color_space = *update->output_color_space;
1727
1728 if (update->output_csc_transform)
1729 stream->csc_color_matrix = *update->output_csc_transform;
1730
1731 if (update->vrr_infopacket)
1732 stream->vrr_infopacket = *update->vrr_infopacket;
1733
1734 if (update->dpms_off)
1735 stream->dpms_off = *update->dpms_off;
1736
1737 if (update->vsc_infopacket)
1738 stream->vsc_infopacket = *update->vsc_infopacket;
1739
1740 if (update->vsp_infopacket)
1741 stream->vsp_infopacket = *update->vsp_infopacket;
1742
1743 if (update->dither_option)
1744 stream->dither_option = *update->dither_option;
1745 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1746 /* update current stream with writeback info */
1747 if (update->wb_update) {
1748 int i;
1749
1750 stream->num_wb_info = update->wb_update->num_wb_info;
1751 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
1752 for (i = 0; i < stream->num_wb_info; i++)
1753 stream->writeback_info[i] =
1754 update->wb_update->writeback_info[i];
1755 }
1756 #endif
1757 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1758 if (update->dsc_config) {
1759 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
1760 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
1761 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
1762 update->dsc_config->num_slices_v != 0);
1763
1764 stream->timing.dsc_cfg = *update->dsc_config;
1765 stream->timing.flags.DSC = enable_dsc;
1766 if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
1767 true)) {
1768 stream->timing.dsc_cfg = old_dsc_cfg;
1769 stream->timing.flags.DSC = old_dsc_enabled;
1770 }
1771 }
1772 #endif
1773 }
1774
1775 static void commit_planes_do_stream_update(struct dc *dc,
1776 struct dc_stream_state *stream,
1777 struct dc_stream_update *stream_update,
1778 enum surface_update_type update_type,
1779 struct dc_state *context)
1780 {
1781 int j;
1782
1783 // Stream updates
1784 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1785 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1786
1787 if (!pipe_ctx->top_pipe &&
1788 pipe_ctx->stream &&
1789 pipe_ctx->stream == stream) {
1790
1791 if (stream_update->periodic_interrupt0 &&
1792 dc->hwss.setup_periodic_interrupt)
1793 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
1794
1795 if (stream_update->periodic_interrupt1 &&
1796 dc->hwss.setup_periodic_interrupt)
1797 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
1798
1799 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
1800 stream_update->vrr_infopacket ||
1801 stream_update->vsc_infopacket ||
1802 stream_update->vsp_infopacket) {
1803 resource_build_info_frame(pipe_ctx);
1804 dc->hwss.update_info_frame(pipe_ctx);
1805 }
1806
1807 if (stream_update->gamut_remap)
1808 dc_stream_set_gamut_remap(dc, stream);
1809
1810 if (stream_update->output_csc_transform)
1811 dc_stream_program_csc_matrix(dc, stream);
1812
1813 if (stream_update->dither_option) {
1814 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1815 struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
1816 #endif
1817 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1818 &pipe_ctx->stream->bit_depth_params);
1819 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
1820 &stream->bit_depth_params,
1821 &stream->clamping);
1822 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1823 if (odm_pipe)
1824 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
1825 &stream->bit_depth_params,
1826 &stream->clamping);
1827 #endif
1828 }
1829
1830 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1831 if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
1832 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
1833 dp_update_dsc_config(pipe_ctx);
1834 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
1835 }
1836 #endif
1837 /* Full fe update*/
1838 if (update_type == UPDATE_TYPE_FAST)
1839 continue;
1840
1841 if (stream_update->dpms_off) {
1842 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
1843 if (*stream_update->dpms_off) {
1844 core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
1845 dc->hwss.optimize_bandwidth(dc, dc->current_state);
1846 } else {
1847 dc->hwss.prepare_bandwidth(dc, dc->current_state);
1848 core_link_enable_stream(dc->current_state, pipe_ctx);
1849 }
1850 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
1851 }
1852
1853 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
1854 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1855 // if otg funcs defined check if blanked before programming
1856 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1857 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1858 pipe_ctx->stream_res.abm, stream->abm_level);
1859 } else
1860 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1861 pipe_ctx->stream_res.abm, stream->abm_level);
1862 }
1863 }
1864 }
1865 }
1866
1867 static void commit_planes_for_stream(struct dc *dc,
1868 struct dc_surface_update *srf_updates,
1869 int surface_count,
1870 struct dc_stream_state *stream,
1871 struct dc_stream_update *stream_update,
1872 enum surface_update_type update_type,
1873 struct dc_state *context)
1874 {
1875 int i, j;
1876 struct pipe_ctx *top_pipe_to_program = NULL;
1877
1878 if (dc->optimize_seamless_boot && surface_count > 0) {
1879 /* Optimize seamless boot flag keeps clocks and watermarks high until
1880 * first flip. After first flip, optimization is required to lower
1881 * bandwidth. Important to note that it is expected UEFI will
1882 * only light up a single display on POST, therefore we only expect
1883 * one stream with seamless boot flag set.
1884 */
1885 if (stream->apply_seamless_boot_optimization) {
1886 stream->apply_seamless_boot_optimization = false;
1887 dc->optimize_seamless_boot = false;
1888 dc->optimized_required = true;
1889 }
1890 }
1891
1892 if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
1893 dc->hwss.prepare_bandwidth(dc, context);
1894 context_clock_trace(dc, context);
1895 }
1896
1897 // Stream updates
1898 if (stream_update)
1899 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
1900
1901 if (surface_count == 0) {
1902 /*
1903 * In case of turning off screen, no need to program front end a second time.
1904 * just return after program blank.
1905 */
1906 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
1907 return;
1908 }
1909
1910 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1911 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
1912 for (i = 0; i < surface_count; i++) {
1913 struct dc_plane_state *plane_state = srf_updates[i].surface;
1914 /*set logical flag for lock/unlock use*/
1915 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1916 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1917 if (!pipe_ctx->plane_state)
1918 continue;
1919 if (pipe_ctx->plane_state != plane_state)
1920 continue;
1921 plane_state->triplebuffer_flips = false;
1922 if (update_type == UPDATE_TYPE_FAST &&
1923 dc->hwss.program_triplebuffer != NULL &&
1924 !plane_state->flip_immediate &&
1925 !dc->debug.disable_tri_buf) {
1926 /*triple buffer for VUpdate only*/
1927 plane_state->triplebuffer_flips = true;
1928 }
1929 }
1930 }
1931 }
1932 #endif
1933
1934 // Update Type FULL, Surface updates
1935 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1936 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1937
1938 if (!pipe_ctx->top_pipe &&
1939 pipe_ctx->stream &&
1940 pipe_ctx->stream == stream) {
1941 struct dc_stream_status *stream_status = NULL;
1942
1943 top_pipe_to_program = pipe_ctx;
1944
1945 if (!pipe_ctx->plane_state)
1946 continue;
1947
1948 /* Full fe update*/
1949 if (update_type == UPDATE_TYPE_FAST)
1950 continue;
1951
1952 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1953 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
1954
1955 if (dc->hwss.program_triplebuffer != NULL &&
1956 !dc->debug.disable_tri_buf) {
1957 /*turn off triple buffer for full update*/
1958 dc->hwss.program_triplebuffer(
1959 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
1960 }
1961 #endif
1962 stream_status =
1963 stream_get_status(context, pipe_ctx->stream);
1964
1965 dc->hwss.apply_ctx_for_surface(
1966 dc, pipe_ctx->stream, stream_status->plane_count, context);
1967 }
1968 }
1969
1970 // Update Type FAST, Surface updates
1971 if (update_type == UPDATE_TYPE_FAST) {
1972 /* Lock the top pipe while updating plane addrs, since freesync requires
1973 * plane addr update event triggers to be synchronized.
1974 * top_pipe_to_program is expected to never be NULL
1975 */
1976 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1977
1978 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1979 if (dc->hwss.set_flip_control_gsl)
1980 for (i = 0; i < surface_count; i++) {
1981 struct dc_plane_state *plane_state = srf_updates[i].surface;
1982
1983 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1984 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1985
1986 if (pipe_ctx->stream != stream)
1987 continue;
1988
1989 if (pipe_ctx->plane_state != plane_state)
1990 continue;
1991
1992 // GSL has to be used for flip immediate
1993 dc->hwss.set_flip_control_gsl(pipe_ctx,
1994 plane_state->flip_immediate);
1995 }
1996 }
1997 #endif
1998 /* Perform requested Updates */
1999 for (i = 0; i < surface_count; i++) {
2000 struct dc_plane_state *plane_state = srf_updates[i].surface;
2001
2002 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2003 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2004
2005 if (pipe_ctx->stream != stream)
2006 continue;
2007
2008 if (pipe_ctx->plane_state != plane_state)
2009 continue;
2010 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2011 /*program triple buffer after lock based on flip type*/
2012 if (dc->hwss.program_triplebuffer != NULL &&
2013 !dc->debug.disable_tri_buf) {
2014 /*only enable triplebuffer for fast_update*/
2015 dc->hwss.program_triplebuffer(
2016 dc, pipe_ctx, plane_state->triplebuffer_flips);
2017 }
2018 #endif
2019 if (srf_updates[i].flip_addr)
2020 dc->hwss.update_plane_addr(dc, pipe_ctx);
2021 }
2022 }
2023
2024 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2025 }
2026
2027 // Fire manual trigger only when bottom plane is flipped
2028 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2029 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2030
2031 if (pipe_ctx->bottom_pipe ||
2032 !pipe_ctx->stream ||
2033 pipe_ctx->stream != stream ||
2034 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2035 continue;
2036
2037 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2038 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2039 }
2040 }
2041
2042 void dc_commit_updates_for_stream(struct dc *dc,
2043 struct dc_surface_update *srf_updates,
2044 int surface_count,
2045 struct dc_stream_state *stream,
2046 struct dc_stream_update *stream_update,
2047 struct dc_state *state)
2048 {
2049 const struct dc_stream_status *stream_status;
2050 enum surface_update_type update_type;
2051 struct dc_state *context;
2052 struct dc_context *dc_ctx = dc->ctx;
2053 int i, j;
2054
2055 stream_status = dc_stream_get_status(stream);
2056 context = dc->current_state;
2057
2058 update_type = dc_check_update_surfaces_for_stream(
2059 dc, srf_updates, surface_count, stream_update, stream_status);
2060
2061 if (update_type >= update_surface_trace_level)
2062 update_surface_trace(dc, srf_updates, surface_count);
2063
2064
2065 if (update_type >= UPDATE_TYPE_FULL) {
2066
2067 /* initialize scratch memory for building context */
2068 context = dc_create_state(dc);
2069 if (context == NULL) {
2070 DC_ERROR("Failed to allocate new validate context!\n");
2071 return;
2072 }
2073
2074 dc_resource_state_copy_construct(state, context);
2075
2076 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2077 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2078 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2079
2080 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2081 new_pipe->plane_state->force_full_update = true;
2082 }
2083 }
2084
2085
2086 for (i = 0; i < surface_count; i++) {
2087 struct dc_plane_state *surface = srf_updates[i].surface;
2088
2089 copy_surface_update_to_plane(surface, &srf_updates[i]);
2090
2091 if (update_type >= UPDATE_TYPE_MED) {
2092 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2093 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2094
2095 if (pipe_ctx->plane_state != surface)
2096 continue;
2097
2098 resource_build_scaling_params(pipe_ctx);
2099 }
2100 }
2101 }
2102
2103 copy_stream_update_to_stream(dc, context, stream, stream_update);
2104
2105 commit_planes_for_stream(
2106 dc,
2107 srf_updates,
2108 surface_count,
2109 stream,
2110 stream_update,
2111 update_type,
2112 context);
2113 /*update current_State*/
2114 if (dc->current_state != context) {
2115
2116 struct dc_state *old = dc->current_state;
2117
2118 dc->current_state = context;
2119 dc_release_state(old);
2120
2121 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2122 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2123
2124 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2125 pipe_ctx->plane_state->force_full_update = false;
2126 }
2127 }
2128 /*let's use current_state to update watermark etc*/
2129 if (update_type >= UPDATE_TYPE_FULL)
2130 dc_post_update_surfaces_to_stream(dc);
2131
2132 return;
2133
2134 }
2135
2136 uint8_t dc_get_current_stream_count(struct dc *dc)
2137 {
2138 return dc->current_state->stream_count;
2139 }
2140
2141 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2142 {
2143 if (i < dc->current_state->stream_count)
2144 return dc->current_state->streams[i];
2145 return NULL;
2146 }
2147
2148 enum dc_irq_source dc_interrupt_to_irq_source(
2149 struct dc *dc,
2150 uint32_t src_id,
2151 uint32_t ext_id)
2152 {
2153 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2154 }
2155
2156 /**
2157 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2158 */
2159 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2160 {
2161
2162 if (dc == NULL)
2163 return false;
2164
2165 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2166 }
2167
2168 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2169 {
2170 dal_irq_service_ack(dc->res_pool->irqs, src);
2171 }
2172
2173 void dc_set_power_state(
2174 struct dc *dc,
2175 enum dc_acpi_cm_power_state power_state)
2176 {
2177 struct kref refcount;
2178 struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
2179 GFP_KERNEL);
2180
2181 ASSERT(dml);
2182 if (!dml)
2183 return;
2184
2185 switch (power_state) {
2186 case DC_ACPI_CM_POWER_STATE_D0:
2187 dc_resource_state_construct(dc, dc->current_state);
2188
2189 dc->hwss.init_hw(dc);
2190 break;
2191 default:
2192 ASSERT(dc->current_state->stream_count == 0);
2193 /* Zero out the current context so that on resume we start with
2194 * clean state, and dc hw programming optimizations will not
2195 * cause any trouble.
2196 */
2197
2198 /* Preserve refcount */
2199 refcount = dc->current_state->refcount;
2200 /* Preserve display mode lib */
2201 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2202
2203 dc_resource_state_destruct(dc->current_state);
2204 memset(dc->current_state, 0,
2205 sizeof(*dc->current_state));
2206
2207 dc->current_state->refcount = refcount;
2208 dc->current_state->bw_ctx.dml = *dml;
2209
2210 break;
2211 }
2212
2213 kfree(dml);
2214 }
2215
2216 void dc_resume(struct dc *dc)
2217 {
2218
2219 uint32_t i;
2220
2221 for (i = 0; i < dc->link_count; i++)
2222 core_link_resume(dc->links[i]);
2223 }
2224
2225 unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2226 {
2227 struct abm *abm = dc->res_pool->abm;
2228
2229 if (abm)
2230 return abm->funcs->get_current_backlight(abm);
2231
2232 return 0;
2233 }
2234
2235 unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2236 {
2237 struct abm *abm = dc->res_pool->abm;
2238
2239 if (abm)
2240 return abm->funcs->get_target_backlight(abm);
2241
2242 return 0;
2243 }
2244
2245 bool dc_is_dmcu_initialized(struct dc *dc)
2246 {
2247 struct dmcu *dmcu = dc->res_pool->dmcu;
2248
2249 if (dmcu)
2250 return dmcu->funcs->is_dmcu_initialized(dmcu);
2251 return false;
2252 }
2253
2254 bool dc_submit_i2c(
2255 struct dc *dc,
2256 uint32_t link_index,
2257 struct i2c_command *cmd)
2258 {
2259
2260 struct dc_link *link = dc->links[link_index];
2261 struct ddc_service *ddc = link->ddc;
2262 return dce_i2c_submit_command(
2263 dc->res_pool,
2264 ddc->ddc_pin,
2265 cmd);
2266 }
2267
2268 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2269 {
2270 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2271 BREAK_TO_DEBUGGER();
2272 return false;
2273 }
2274
2275 dc_sink_retain(sink);
2276
2277 dc_link->remote_sinks[dc_link->sink_count] = sink;
2278 dc_link->sink_count++;
2279
2280 return true;
2281 }
2282
2283 /**
2284 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
2285 *
2286 * EDID length is in bytes
2287 */
2288 struct dc_sink *dc_link_add_remote_sink(
2289 struct dc_link *link,
2290 const uint8_t *edid,
2291 int len,
2292 struct dc_sink_init_data *init_data)
2293 {
2294 struct dc_sink *dc_sink;
2295 enum dc_edid_status edid_status;
2296
2297 if (len > DC_MAX_EDID_BUFFER_SIZE) {
2298 dm_error("Max EDID buffer size breached!\n");
2299 return NULL;
2300 }
2301
2302 if (!init_data) {
2303 BREAK_TO_DEBUGGER();
2304 return NULL;
2305 }
2306
2307 if (!init_data->link) {
2308 BREAK_TO_DEBUGGER();
2309 return NULL;
2310 }
2311
2312 dc_sink = dc_sink_create(init_data);
2313
2314 if (!dc_sink)
2315 return NULL;
2316
2317 memmove(dc_sink->dc_edid.raw_edid, edid, len);
2318 dc_sink->dc_edid.length = len;
2319
2320 if (!link_add_remote_sink_helper(
2321 link,
2322 dc_sink))
2323 goto fail_add_sink;
2324
2325 edid_status = dm_helpers_parse_edid_caps(
2326 link->ctx,
2327 &dc_sink->dc_edid,
2328 &dc_sink->edid_caps);
2329
2330 /*
2331 * Treat device as no EDID device if EDID
2332 * parsing fails
2333 */
2334 if (edid_status != EDID_OK) {
2335 dc_sink->dc_edid.length = 0;
2336 dm_error("Bad EDID, status%d!\n", edid_status);
2337 }
2338
2339 return dc_sink;
2340
2341 fail_add_sink:
2342 dc_sink_release(dc_sink);
2343 return NULL;
2344 }
2345
2346 /**
2347 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
2348 *
2349 * Note that this just removes the struct dc_sink - it doesn't
2350 * program hardware or alter other members of dc_link
2351 */
2352 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2353 {
2354 int i;
2355
2356 if (!link->sink_count) {
2357 BREAK_TO_DEBUGGER();
2358 return;
2359 }
2360
2361 for (i = 0; i < link->sink_count; i++) {
2362 if (link->remote_sinks[i] == sink) {
2363 dc_sink_release(sink);
2364 link->remote_sinks[i] = NULL;
2365
2366 /* shrink array to remove empty place */
2367 while (i < link->sink_count - 1) {
2368 link->remote_sinks[i] = link->remote_sinks[i+1];
2369 i++;
2370 }
2371 link->remote_sinks[i] = NULL;
2372 link->sink_count--;
2373 return;
2374 }
2375 }
2376 }
2377
2378 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2379 {
2380 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2381 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2382 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2383 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2384 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2385 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2386 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2387 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2388 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2389 }