]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "include/irq_service_interface.h"
45 #include "transform.h"
46 #include "dmcu.h"
47 #include "dpp.h"
48 #include "timing_generator.h"
49 #include "abm.h"
50 #include "virtual/virtual_link_encoder.h"
51
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54
55 #include "dc_link_ddc.h"
56 #include "dm_helpers.h"
57 #include "mem_input.h"
58 #include "hubp.h"
59
60 #include "dc_link_dp.h"
61 #include "dc_dmub_srv.h"
62
63 #include "dsc.h"
64
65 #include "vm_helper.h"
66
67 #include "dce/dce_i2c.h"
68
69 #define CTX \
70 dc->ctx
71
72 #define DC_LOGGER \
73 dc->ctx->logger
74
75 static const char DC_BUILD_ID[] = "production-build";
76
77 /**
78 * DOC: Overview
79 *
80 * DC is the OS-agnostic component of the amdgpu DC driver.
81 *
82 * DC maintains and validates a set of structs representing the state of the
83 * driver and writes that state to AMD hardware
84 *
85 * Main DC HW structs:
86 *
87 * struct dc - The central struct. One per driver. Created on driver load,
88 * destroyed on driver unload.
89 *
90 * struct dc_context - One per driver.
91 * Used as a backpointer by most other structs in dc.
92 *
93 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
94 * plugpoints). Created on driver load, destroyed on driver unload.
95 *
96 * struct dc_sink - One per display. Created on boot or hotplug.
97 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
98 * (the display directly attached). It may also have one or more remote
99 * sinks (in the Multi-Stream Transport case)
100 *
101 * struct resource_pool - One per driver. Represents the hw blocks not in the
102 * main pipeline. Not directly accessible by dm.
103 *
104 * Main dc state structs:
105 *
106 * These structs can be created and destroyed as needed. There is a full set of
107 * these structs in dc->current_state representing the currently programmed state.
108 *
109 * struct dc_state - The global DC state to track global state information,
110 * such as bandwidth values.
111 *
112 * struct dc_stream_state - Represents the hw configuration for the pipeline from
113 * a framebuffer to a display. Maps one-to-one with dc_sink.
114 *
115 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
116 * and may have more in the Multi-Plane Overlay case.
117 *
118 * struct resource_context - Represents the programmable state of everything in
119 * the resource_pool. Not directly accessible by dm.
120 *
121 * struct pipe_ctx - A member of struct resource_context. Represents the
122 * internal hardware pipeline components. Each dc_plane_state has either
123 * one or two (in the pipe-split case).
124 */
125
126 /*******************************************************************************
127 * Private functions
128 ******************************************************************************/
129
130 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
131 {
132 if (new > *original)
133 *original = new;
134 }
135
136 static void destroy_links(struct dc *dc)
137 {
138 uint32_t i;
139
140 for (i = 0; i < dc->link_count; i++) {
141 if (NULL != dc->links[i])
142 link_destroy(&dc->links[i]);
143 }
144 }
145
146 static bool create_links(
147 struct dc *dc,
148 uint32_t num_virtual_links)
149 {
150 int i;
151 int connectors_num;
152 struct dc_bios *bios = dc->ctx->dc_bios;
153
154 dc->link_count = 0;
155
156 connectors_num = bios->funcs->get_connectors_number(bios);
157
158 if (connectors_num > ENUM_ID_COUNT) {
159 dm_error(
160 "DC: Number of connectors %d exceeds maximum of %d!\n",
161 connectors_num,
162 ENUM_ID_COUNT);
163 return false;
164 }
165
166 dm_output_to_console(
167 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
168 __func__,
169 connectors_num,
170 num_virtual_links);
171
172 for (i = 0; i < connectors_num; i++) {
173 struct link_init_data link_init_params = {0};
174 struct dc_link *link;
175
176 link_init_params.ctx = dc->ctx;
177 /* next BIOS object table connector */
178 link_init_params.connector_index = i;
179 link_init_params.link_index = dc->link_count;
180 link_init_params.dc = dc;
181 link = link_create(&link_init_params);
182
183 if (link) {
184 bool should_destory_link = false;
185
186 if (link->connector_signal == SIGNAL_TYPE_EDP) {
187 if (dc->config.edp_not_connected)
188 should_destory_link = true;
189 else if (dc->debug.remove_disconnect_edp) {
190 enum dc_connection_type type;
191 dc_link_detect_sink(link, &type);
192 if (type == dc_connection_none)
193 should_destory_link = true;
194 }
195 }
196
197 if (dc->config.force_enum_edp || !should_destory_link) {
198 dc->links[dc->link_count] = link;
199 link->dc = dc;
200 ++dc->link_count;
201 } else {
202 link_destroy(&link);
203 }
204 }
205 }
206
207 for (i = 0; i < num_virtual_links; i++) {
208 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
209 struct encoder_init_data enc_init = {0};
210
211 if (link == NULL) {
212 BREAK_TO_DEBUGGER();
213 goto failed_alloc;
214 }
215
216 link->link_index = dc->link_count;
217 dc->links[dc->link_count] = link;
218 dc->link_count++;
219
220 link->ctx = dc->ctx;
221 link->dc = dc;
222 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
223 link->link_id.type = OBJECT_TYPE_CONNECTOR;
224 link->link_id.id = CONNECTOR_ID_VIRTUAL;
225 link->link_id.enum_id = ENUM_ID_1;
226 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
227
228 if (!link->link_enc) {
229 BREAK_TO_DEBUGGER();
230 goto failed_alloc;
231 }
232
233 link->link_status.dpcd_caps = &link->dpcd_caps;
234
235 enc_init.ctx = dc->ctx;
236 enc_init.channel = CHANNEL_ID_UNKNOWN;
237 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
238 enc_init.transmitter = TRANSMITTER_UNKNOWN;
239 enc_init.connector = link->link_id;
240 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
241 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
242 enc_init.encoder.enum_id = ENUM_ID_1;
243 virtual_link_encoder_construct(link->link_enc, &enc_init);
244 }
245
246 return true;
247
248 failed_alloc:
249 return false;
250 }
251
252 static struct dc_perf_trace *dc_perf_trace_create(void)
253 {
254 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
255 }
256
257 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
258 {
259 kfree(*perf_trace);
260 *perf_trace = NULL;
261 }
262
263 /**
264 *****************************************************************************
265 * Function: dc_stream_adjust_vmin_vmax
266 *
267 * @brief
268 * Looks up the pipe context of dc_stream_state and updates the
269 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
270 * Rate, which is a power-saving feature that targets reducing panel
271 * refresh rate while the screen is static
272 *
273 * @param [in] dc: dc reference
274 * @param [in] stream: Initial dc stream state
275 * @param [in] adjust: Updated parameters for vertical_total_min and
276 * vertical_total_max
277 *****************************************************************************
278 */
279 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
280 struct dc_stream_state *stream,
281 struct dc_crtc_timing_adjust *adjust)
282 {
283 int i = 0;
284 bool ret = false;
285
286 stream->adjust = *adjust;
287
288 for (i = 0; i < MAX_PIPES; i++) {
289 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
290
291 if (pipe->stream == stream && pipe->stream_res.tg) {
292 dc->hwss.set_drr(&pipe,
293 1,
294 adjust->v_total_min,
295 adjust->v_total_max,
296 adjust->v_total_mid,
297 adjust->v_total_mid_frame_num);
298
299 ret = true;
300 }
301 }
302 return ret;
303 }
304
305 bool dc_stream_get_crtc_position(struct dc *dc,
306 struct dc_stream_state **streams, int num_streams,
307 unsigned int *v_pos, unsigned int *nom_v_pos)
308 {
309 /* TODO: Support multiple streams */
310 const struct dc_stream_state *stream = streams[0];
311 int i = 0;
312 bool ret = false;
313 struct crtc_position position;
314
315 for (i = 0; i < MAX_PIPES; i++) {
316 struct pipe_ctx *pipe =
317 &dc->current_state->res_ctx.pipe_ctx[i];
318
319 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
320 dc->hwss.get_position(&pipe, 1, &position);
321
322 *v_pos = position.vertical_count;
323 *nom_v_pos = position.nominal_vcount;
324 ret = true;
325 }
326 }
327 return ret;
328 }
329
330 /**
331 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
332 * @dc: DC Object
333 * @stream: The stream to configure CRC on.
334 * @enable: Enable CRC if true, disable otherwise.
335 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
336 * once.
337 *
338 * By default, only CRC0 is configured, and the entire frame is used to
339 * calculate the crc.
340 */
341 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
342 bool enable, bool continuous)
343 {
344 int i;
345 struct pipe_ctx *pipe;
346 struct crc_params param;
347 struct timing_generator *tg;
348
349 for (i = 0; i < MAX_PIPES; i++) {
350 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
351 if (pipe->stream == stream)
352 break;
353 }
354 /* Stream not found */
355 if (i == MAX_PIPES)
356 return false;
357
358 /* Always capture the full frame */
359 param.windowa_x_start = 0;
360 param.windowa_y_start = 0;
361 param.windowa_x_end = pipe->stream->timing.h_addressable;
362 param.windowa_y_end = pipe->stream->timing.v_addressable;
363 param.windowb_x_start = 0;
364 param.windowb_y_start = 0;
365 param.windowb_x_end = pipe->stream->timing.h_addressable;
366 param.windowb_y_end = pipe->stream->timing.v_addressable;
367
368 /* Default to the union of both windows */
369 param.selection = UNION_WINDOW_A_B;
370 param.continuous_mode = continuous;
371 param.enable = enable;
372
373 tg = pipe->stream_res.tg;
374
375 /* Only call if supported */
376 if (tg->funcs->configure_crc)
377 return tg->funcs->configure_crc(tg, &param);
378 DC_LOG_WARNING("CRC capture not supported.");
379 return false;
380 }
381
382 /**
383 * dc_stream_get_crc() - Get CRC values for the given stream.
384 * @dc: DC object
385 * @stream: The DC stream state of the stream to get CRCs from.
386 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
387 *
388 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
389 * Return false if stream is not found, or if CRCs are not enabled.
390 */
391 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
392 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
393 {
394 int i;
395 struct pipe_ctx *pipe;
396 struct timing_generator *tg;
397
398 for (i = 0; i < MAX_PIPES; i++) {
399 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
400 if (pipe->stream == stream)
401 break;
402 }
403 /* Stream not found */
404 if (i == MAX_PIPES)
405 return false;
406
407 tg = pipe->stream_res.tg;
408
409 if (tg->funcs->get_crc)
410 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
411 DC_LOG_WARNING("CRC capture not supported.");
412 return false;
413 }
414
415 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
416 enum dc_dynamic_expansion option)
417 {
418 /* OPP FMT dyn expansion updates*/
419 int i = 0;
420 struct pipe_ctx *pipe_ctx;
421
422 for (i = 0; i < MAX_PIPES; i++) {
423 if (dc->current_state->res_ctx.pipe_ctx[i].stream
424 == stream) {
425 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
426 pipe_ctx->stream_res.opp->dyn_expansion = option;
427 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
428 pipe_ctx->stream_res.opp,
429 COLOR_SPACE_YCBCR601,
430 stream->timing.display_color_depth,
431 stream->signal);
432 }
433 }
434 }
435
436 void dc_stream_set_dither_option(struct dc_stream_state *stream,
437 enum dc_dither_option option)
438 {
439 struct bit_depth_reduction_params params;
440 struct dc_link *link = stream->link;
441 struct pipe_ctx *pipes = NULL;
442 int i;
443
444 for (i = 0; i < MAX_PIPES; i++) {
445 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
446 stream) {
447 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
448 break;
449 }
450 }
451
452 if (!pipes)
453 return;
454 if (option > DITHER_OPTION_MAX)
455 return;
456
457 stream->dither_option = option;
458
459 memset(&params, 0, sizeof(params));
460 resource_build_bit_depth_reduction_params(stream, &params);
461 stream->bit_depth_params = params;
462
463 if (pipes->plane_res.xfm &&
464 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
465 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
466 pipes->plane_res.xfm,
467 pipes->plane_res.scl_data.lb_params.depth,
468 &stream->bit_depth_params);
469 }
470
471 pipes->stream_res.opp->funcs->
472 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
473 }
474
475 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
476 {
477 int i = 0;
478 bool ret = false;
479 struct pipe_ctx *pipes;
480
481 for (i = 0; i < MAX_PIPES; i++) {
482 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
483 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
484 dc->hwss.program_gamut_remap(pipes);
485 ret = true;
486 }
487 }
488
489 return ret;
490 }
491
492 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
493 {
494 int i = 0;
495 bool ret = false;
496 struct pipe_ctx *pipes;
497
498 for (i = 0; i < MAX_PIPES; i++) {
499 if (dc->current_state->res_ctx.pipe_ctx[i].stream
500 == stream) {
501
502 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
503 dc->hwss.program_output_csc(dc,
504 pipes,
505 stream->output_color_space,
506 stream->csc_color_matrix.matrix,
507 pipes->stream_res.opp->inst);
508 ret = true;
509 }
510 }
511
512 return ret;
513 }
514
515 void dc_stream_set_static_screen_params(struct dc *dc,
516 struct dc_stream_state **streams,
517 int num_streams,
518 const struct dc_static_screen_params *params)
519 {
520 int i = 0;
521 int j = 0;
522 struct pipe_ctx *pipes_affected[MAX_PIPES];
523 int num_pipes_affected = 0;
524
525 for (i = 0; i < num_streams; i++) {
526 struct dc_stream_state *stream = streams[i];
527
528 for (j = 0; j < MAX_PIPES; j++) {
529 if (dc->current_state->res_ctx.pipe_ctx[j].stream
530 == stream) {
531 pipes_affected[num_pipes_affected++] =
532 &dc->current_state->res_ctx.pipe_ctx[j];
533 }
534 }
535 }
536
537 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
538 }
539
540 static void dc_destruct(struct dc *dc)
541 {
542 if (dc->current_state) {
543 dc_release_state(dc->current_state);
544 dc->current_state = NULL;
545 }
546
547 destroy_links(dc);
548
549 if (dc->clk_mgr) {
550 dc_destroy_clk_mgr(dc->clk_mgr);
551 dc->clk_mgr = NULL;
552 }
553
554 dc_destroy_resource_pool(dc);
555
556 if (dc->ctx->gpio_service)
557 dal_gpio_service_destroy(&dc->ctx->gpio_service);
558
559 if (dc->ctx->created_bios)
560 dal_bios_parser_destroy(&dc->ctx->dc_bios);
561
562 dc_perf_trace_destroy(&dc->ctx->perf_trace);
563
564 kfree(dc->ctx);
565 dc->ctx = NULL;
566
567 kfree(dc->bw_vbios);
568 dc->bw_vbios = NULL;
569
570 kfree(dc->bw_dceip);
571 dc->bw_dceip = NULL;
572
573 #ifdef CONFIG_DRM_AMD_DC_DCN
574 kfree(dc->dcn_soc);
575 dc->dcn_soc = NULL;
576
577 kfree(dc->dcn_ip);
578 dc->dcn_ip = NULL;
579
580 #endif
581 kfree(dc->vm_helper);
582 dc->vm_helper = NULL;
583
584 }
585
586 static bool dc_construct_ctx(struct dc *dc,
587 const struct dc_init_data *init_params)
588 {
589 struct dc_context *dc_ctx;
590 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
591
592 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
593 if (!dc_ctx)
594 return false;
595
596 dc_ctx->cgs_device = init_params->cgs_device;
597 dc_ctx->driver_context = init_params->driver;
598 dc_ctx->dc = dc;
599 dc_ctx->asic_id = init_params->asic_id;
600 dc_ctx->dc_sink_id_count = 0;
601 dc_ctx->dc_stream_id_count = 0;
602 dc_ctx->dce_environment = init_params->dce_environment;
603
604 /* Create logger */
605
606 dc_version = resource_parse_asic_id(init_params->asic_id);
607 dc_ctx->dce_version = dc_version;
608
609 dc_ctx->perf_trace = dc_perf_trace_create();
610 if (!dc_ctx->perf_trace) {
611 ASSERT_CRITICAL(false);
612 return false;
613 }
614
615 dc->ctx = dc_ctx;
616
617 return true;
618 }
619
620 static bool dc_construct(struct dc *dc,
621 const struct dc_init_data *init_params)
622 {
623 struct dc_context *dc_ctx;
624 struct bw_calcs_dceip *dc_dceip;
625 struct bw_calcs_vbios *dc_vbios;
626 #ifdef CONFIG_DRM_AMD_DC_DCN
627 struct dcn_soc_bounding_box *dcn_soc;
628 struct dcn_ip_params *dcn_ip;
629 #endif
630
631 dc->config = init_params->flags;
632
633 // Allocate memory for the vm_helper
634 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
635 if (!dc->vm_helper) {
636 dm_error("%s: failed to create dc->vm_helper\n", __func__);
637 goto fail;
638 }
639
640 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
641
642 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
643 if (!dc_dceip) {
644 dm_error("%s: failed to create dceip\n", __func__);
645 goto fail;
646 }
647
648 dc->bw_dceip = dc_dceip;
649
650 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
651 if (!dc_vbios) {
652 dm_error("%s: failed to create vbios\n", __func__);
653 goto fail;
654 }
655
656 dc->bw_vbios = dc_vbios;
657 #ifdef CONFIG_DRM_AMD_DC_DCN
658 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
659 if (!dcn_soc) {
660 dm_error("%s: failed to create dcn_soc\n", __func__);
661 goto fail;
662 }
663
664 dc->dcn_soc = dcn_soc;
665
666 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
667 if (!dcn_ip) {
668 dm_error("%s: failed to create dcn_ip\n", __func__);
669 goto fail;
670 }
671
672 dc->dcn_ip = dcn_ip;
673 dc->soc_bounding_box = init_params->soc_bounding_box;
674 #endif
675
676 if (!dc_construct_ctx(dc, init_params)) {
677 dm_error("%s: failed to create ctx\n", __func__);
678 goto fail;
679 }
680
681 dc_ctx = dc->ctx;
682
683 /* Resource should construct all asic specific resources.
684 * This should be the only place where we need to parse the asic id
685 */
686 if (init_params->vbios_override)
687 dc_ctx->dc_bios = init_params->vbios_override;
688 else {
689 /* Create BIOS parser */
690 struct bp_init_data bp_init_data;
691
692 bp_init_data.ctx = dc_ctx;
693 bp_init_data.bios = init_params->asic_id.atombios_base_address;
694
695 dc_ctx->dc_bios = dal_bios_parser_create(
696 &bp_init_data, dc_ctx->dce_version);
697
698 if (!dc_ctx->dc_bios) {
699 ASSERT_CRITICAL(false);
700 goto fail;
701 }
702
703 dc_ctx->created_bios = true;
704 }
705
706 dc->vendor_signature = init_params->vendor_signature;
707
708 /* Create GPIO service */
709 dc_ctx->gpio_service = dal_gpio_service_create(
710 dc_ctx->dce_version,
711 dc_ctx->dce_environment,
712 dc_ctx);
713
714 if (!dc_ctx->gpio_service) {
715 ASSERT_CRITICAL(false);
716 goto fail;
717 }
718
719 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
720 if (!dc->res_pool)
721 goto fail;
722
723 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
724 if (!dc->clk_mgr)
725 goto fail;
726
727 if (dc->res_pool->funcs->update_bw_bounding_box)
728 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
729
730 /* Creation of current_state must occur after dc->dml
731 * is initialized in dc_create_resource_pool because
732 * on creation it copies the contents of dc->dml
733 */
734
735 dc->current_state = dc_create_state(dc);
736
737 if (!dc->current_state) {
738 dm_error("%s: failed to create validate ctx\n", __func__);
739 goto fail;
740 }
741
742 dc_resource_state_construct(dc, dc->current_state);
743
744 if (!create_links(dc, init_params->num_virtual_links))
745 goto fail;
746
747 return true;
748
749 fail:
750 return false;
751 }
752
753 static bool disable_all_writeback_pipes_for_stream(
754 const struct dc *dc,
755 struct dc_stream_state *stream,
756 struct dc_state *context)
757 {
758 int i;
759
760 for (i = 0; i < stream->num_wb_info; i++)
761 stream->writeback_info[i].wb_enabled = false;
762
763 return true;
764 }
765
766 void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
767 {
768 int i = 0;
769
770 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
771 if (dc->hwss.interdependent_update_lock)
772 dc->hwss.interdependent_update_lock(dc, context, lock);
773 else {
774 for (i = 0; i < dc->res_pool->pipe_count; i++) {
775 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
776 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
777
778 // Copied conditions that were previously in dce110_apply_ctx_for_surface
779 if (stream == pipe_ctx->stream) {
780 if (!pipe_ctx->top_pipe &&
781 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
782 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
783 }
784 }
785 }
786 }
787
788 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
789 {
790 int i, j;
791 struct dc_state *dangling_context = dc_create_state(dc);
792 struct dc_state *current_ctx;
793
794 if (dangling_context == NULL)
795 return;
796
797 dc_resource_state_copy_construct(dc->current_state, dangling_context);
798
799 for (i = 0; i < dc->res_pool->pipe_count; i++) {
800 struct dc_stream_state *old_stream =
801 dc->current_state->res_ctx.pipe_ctx[i].stream;
802 bool should_disable = true;
803
804 for (j = 0; j < context->stream_count; j++) {
805 if (old_stream == context->streams[j]) {
806 should_disable = false;
807 break;
808 }
809 }
810 if (should_disable && old_stream) {
811 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
812 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
813
814 if (dc->hwss.apply_ctx_for_surface) {
815 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
816 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
817 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
818 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
819 }
820 if (dc->hwss.program_front_end_for_ctx) {
821 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
822 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
823 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
824 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
825 }
826 }
827 }
828
829 current_ctx = dc->current_state;
830 dc->current_state = dangling_context;
831 dc_release_state(current_ctx);
832 }
833
834 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
835 {
836 int i;
837 int count = 0;
838 struct pipe_ctx *pipe;
839 PERF_TRACE();
840 for (i = 0; i < MAX_PIPES; i++) {
841 pipe = &context->res_ctx.pipe_ctx[i];
842
843 if (!pipe->plane_state)
844 continue;
845
846 /* Timeout 100 ms */
847 while (count < 100000) {
848 /* Must set to false to start with, due to OR in update function */
849 pipe->plane_state->status.is_flip_pending = false;
850 dc->hwss.update_pending_status(pipe);
851 if (!pipe->plane_state->status.is_flip_pending)
852 break;
853 udelay(1);
854 count++;
855 }
856 ASSERT(!pipe->plane_state->status.is_flip_pending);
857 }
858 PERF_TRACE();
859 }
860
861 /*******************************************************************************
862 * Public functions
863 ******************************************************************************/
864
865 struct dc *dc_create(const struct dc_init_data *init_params)
866 {
867 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
868 unsigned int full_pipe_count;
869
870 if (NULL == dc)
871 goto alloc_fail;
872
873 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
874 if (false == dc_construct_ctx(dc, init_params)) {
875 dc_destruct(dc);
876 goto construct_fail;
877 }
878 } else {
879 if (false == dc_construct(dc, init_params)) {
880 dc_destruct(dc);
881 goto construct_fail;
882 }
883
884 full_pipe_count = dc->res_pool->pipe_count;
885 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
886 full_pipe_count--;
887 dc->caps.max_streams = min(
888 full_pipe_count,
889 dc->res_pool->stream_enc_count);
890
891 dc->optimize_seamless_boot_streams = 0;
892 dc->caps.max_links = dc->link_count;
893 dc->caps.max_audios = dc->res_pool->audio_count;
894 dc->caps.linear_pitch_alignment = 64;
895
896 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
897
898 if (dc->res_pool->dmcu != NULL)
899 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
900 }
901
902 /* Populate versioning information */
903 dc->versions.dc_ver = DC_VER;
904
905 dc->build_id = DC_BUILD_ID;
906
907 DC_LOG_DC("Display Core initialized\n");
908
909
910
911 return dc;
912
913 construct_fail:
914 kfree(dc);
915
916 alloc_fail:
917 return NULL;
918 }
919
920 void dc_hardware_init(struct dc *dc)
921 {
922 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
923 dc->hwss.init_hw(dc);
924 }
925
926 void dc_init_callbacks(struct dc *dc,
927 const struct dc_callback_init *init_params)
928 {
929 #ifdef CONFIG_DRM_AMD_DC_HDCP
930 dc->ctx->cp_psp = init_params->cp_psp;
931 #endif
932 }
933
934 void dc_deinit_callbacks(struct dc *dc)
935 {
936 #ifdef CONFIG_DRM_AMD_DC_HDCP
937 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
938 #endif
939 }
940
941 void dc_destroy(struct dc **dc)
942 {
943 dc_destruct(*dc);
944 kfree(*dc);
945 *dc = NULL;
946 }
947
948 static void enable_timing_multisync(
949 struct dc *dc,
950 struct dc_state *ctx)
951 {
952 int i = 0, multisync_count = 0;
953 int pipe_count = dc->res_pool->pipe_count;
954 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
955
956 for (i = 0; i < pipe_count; i++) {
957 if (!ctx->res_ctx.pipe_ctx[i].stream ||
958 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
959 continue;
960 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
961 continue;
962 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
963 multisync_count++;
964 }
965
966 if (multisync_count > 0) {
967 dc->hwss.enable_per_frame_crtc_position_reset(
968 dc, multisync_count, multisync_pipes);
969 }
970 }
971
972 static void program_timing_sync(
973 struct dc *dc,
974 struct dc_state *ctx)
975 {
976 int i, j, k;
977 int group_index = 0;
978 int num_group = 0;
979 int pipe_count = dc->res_pool->pipe_count;
980 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
981
982 for (i = 0; i < pipe_count; i++) {
983 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
984 continue;
985
986 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
987 }
988
989 for (i = 0; i < pipe_count; i++) {
990 int group_size = 1;
991 struct pipe_ctx *pipe_set[MAX_PIPES];
992
993 if (!unsynced_pipes[i])
994 continue;
995
996 pipe_set[0] = unsynced_pipes[i];
997 unsynced_pipes[i] = NULL;
998
999 /* Add tg to the set, search rest of the tg's for ones with
1000 * same timing, add all tgs with same timing to the group
1001 */
1002 for (j = i + 1; j < pipe_count; j++) {
1003 if (!unsynced_pipes[j])
1004 continue;
1005
1006 if (resource_are_streams_timing_synchronizable(
1007 unsynced_pipes[j]->stream,
1008 pipe_set[0]->stream)) {
1009 pipe_set[group_size] = unsynced_pipes[j];
1010 unsynced_pipes[j] = NULL;
1011 group_size++;
1012 }
1013 }
1014
1015 /* set first pipe with plane as master */
1016 for (j = 0; j < group_size; j++) {
1017 if (pipe_set[j]->plane_state) {
1018 if (j == 0)
1019 break;
1020
1021 swap(pipe_set[0], pipe_set[j]);
1022 break;
1023 }
1024 }
1025
1026
1027 for (k = 0; k < group_size; k++) {
1028 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1029
1030 status->timing_sync_info.group_id = num_group;
1031 status->timing_sync_info.group_size = group_size;
1032 if (k == 0)
1033 status->timing_sync_info.master = true;
1034 else
1035 status->timing_sync_info.master = false;
1036
1037 }
1038 /* remove any other pipes with plane as they have already been synced */
1039 for (j = j + 1; j < group_size; j++) {
1040 if (pipe_set[j]->plane_state) {
1041 group_size--;
1042 pipe_set[j] = pipe_set[group_size];
1043 j--;
1044 }
1045 }
1046
1047 if (group_size > 1) {
1048 dc->hwss.enable_timing_synchronization(
1049 dc, group_index, group_size, pipe_set);
1050 group_index++;
1051 }
1052 num_group++;
1053 }
1054 }
1055
1056 static bool context_changed(
1057 struct dc *dc,
1058 struct dc_state *context)
1059 {
1060 uint8_t i;
1061
1062 if (context->stream_count != dc->current_state->stream_count)
1063 return true;
1064
1065 for (i = 0; i < dc->current_state->stream_count; i++) {
1066 if (dc->current_state->streams[i] != context->streams[i])
1067 return true;
1068 }
1069
1070 return false;
1071 }
1072
1073 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1074 const struct dc_sink *sink,
1075 struct dc_crtc_timing *crtc_timing)
1076 {
1077 struct timing_generator *tg;
1078 struct stream_encoder *se = NULL;
1079
1080 struct dc_crtc_timing hw_crtc_timing = {0};
1081
1082 struct dc_link *link = sink->link;
1083 unsigned int i, enc_inst, tg_inst = 0;
1084
1085 // Seamless port only support single DP and EDP so far
1086 if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
1087 sink->sink_signal != SIGNAL_TYPE_EDP)
1088 return false;
1089
1090 /* Check for enabled DIG to identify enabled display */
1091 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1092 return false;
1093
1094 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1095
1096 if (enc_inst == ENGINE_ID_UNKNOWN)
1097 return false;
1098
1099 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1100 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1101
1102 se = dc->res_pool->stream_enc[i];
1103
1104 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1105 dc->res_pool->stream_enc[i]);
1106 break;
1107 }
1108 }
1109
1110 // tg_inst not found
1111 if (i == dc->res_pool->stream_enc_count)
1112 return false;
1113
1114 if (tg_inst >= dc->res_pool->timing_generator_count)
1115 return false;
1116
1117 tg = dc->res_pool->timing_generators[tg_inst];
1118
1119 if (!tg->funcs->get_hw_timing)
1120 return false;
1121
1122 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1123 return false;
1124
1125 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1126 return false;
1127
1128 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1129 return false;
1130
1131 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1132 return false;
1133
1134 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1135 return false;
1136
1137 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1138 return false;
1139
1140 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1141 return false;
1142
1143 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1144 return false;
1145
1146 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1147 return false;
1148
1149 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1150 return false;
1151
1152 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1153 return false;
1154
1155 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1156 return false;
1157
1158 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1159 return false;
1160
1161 if (dc_is_dp_signal(link->connector_signal)) {
1162 unsigned int pix_clk_100hz;
1163
1164 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1165 dc->res_pool->dp_clock_source,
1166 tg_inst, &pix_clk_100hz);
1167
1168 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1169 return false;
1170
1171 if (!se->funcs->dp_get_pixel_format)
1172 return false;
1173
1174 if (!se->funcs->dp_get_pixel_format(
1175 se,
1176 &hw_crtc_timing.pixel_encoding,
1177 &hw_crtc_timing.display_color_depth))
1178 return false;
1179
1180 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1181 return false;
1182
1183 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1184 return false;
1185 }
1186
1187 return true;
1188 }
1189
1190 bool dc_enable_stereo(
1191 struct dc *dc,
1192 struct dc_state *context,
1193 struct dc_stream_state *streams[],
1194 uint8_t stream_count)
1195 {
1196 bool ret = true;
1197 int i, j;
1198 struct pipe_ctx *pipe;
1199
1200 for (i = 0; i < MAX_PIPES; i++) {
1201 if (context != NULL)
1202 pipe = &context->res_ctx.pipe_ctx[i];
1203 else
1204 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1205 for (j = 0 ; pipe && j < stream_count; j++) {
1206 if (streams[j] && streams[j] == pipe->stream &&
1207 dc->hwss.setup_stereo)
1208 dc->hwss.setup_stereo(pipe, dc);
1209 }
1210 }
1211
1212 return ret;
1213 }
1214
1215 /*
1216 * Applies given context to HW and copy it into current context.
1217 * It's up to the user to release the src context afterwards.
1218 */
1219 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1220 {
1221 struct dc_bios *dcb = dc->ctx->dc_bios;
1222 enum dc_status result = DC_ERROR_UNEXPECTED;
1223 struct pipe_ctx *pipe;
1224 int i, k, l;
1225 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1226
1227 disable_dangling_plane(dc, context);
1228
1229 for (i = 0; i < context->stream_count; i++)
1230 dc_streams[i] = context->streams[i];
1231
1232 if (!dcb->funcs->is_accelerated_mode(dcb))
1233 dc->hwss.enable_accelerated_mode(dc, context);
1234
1235 for (i = 0; i < context->stream_count; i++) {
1236 if (context->streams[i]->apply_seamless_boot_optimization)
1237 dc->optimize_seamless_boot_streams++;
1238 }
1239
1240 if (dc->optimize_seamless_boot_streams == 0)
1241 dc->hwss.prepare_bandwidth(dc, context);
1242
1243 /* re-program planes for existing stream, in case we need to
1244 * free up plane resource for later use
1245 */
1246 if (dc->hwss.apply_ctx_for_surface) {
1247 for (i = 0; i < context->stream_count; i++) {
1248 if (context->streams[i]->mode_changed)
1249 continue;
1250 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1251 dc->hwss.apply_ctx_for_surface(
1252 dc, context->streams[i],
1253 context->stream_status[i].plane_count,
1254 context); /* use new pipe config in new context */
1255 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1256 dc->hwss.post_unlock_program_front_end(dc, context);
1257 }
1258 }
1259
1260 /* Program hardware */
1261 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1262 pipe = &context->res_ctx.pipe_ctx[i];
1263 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1264 }
1265
1266 result = dc->hwss.apply_ctx_to_hw(dc, context);
1267
1268 if (result != DC_OK)
1269 return result;
1270
1271 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1272 enable_timing_multisync(dc, context);
1273 program_timing_sync(dc, context);
1274 }
1275
1276 /* Program all planes within new context*/
1277 if (dc->hwss.program_front_end_for_ctx) {
1278 dc->hwss.interdependent_update_lock(dc, context, true);
1279 dc->hwss.program_front_end_for_ctx(dc, context);
1280 dc->hwss.interdependent_update_lock(dc, context, false);
1281 dc->hwss.post_unlock_program_front_end(dc, context);
1282 }
1283 for (i = 0; i < context->stream_count; i++) {
1284 const struct dc_link *link = context->streams[i]->link;
1285
1286 if (!context->streams[i]->mode_changed)
1287 continue;
1288
1289 if (dc->hwss.apply_ctx_for_surface) {
1290 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1291 dc->hwss.apply_ctx_for_surface(
1292 dc, context->streams[i],
1293 context->stream_status[i].plane_count,
1294 context);
1295 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1296 dc->hwss.post_unlock_program_front_end(dc, context);
1297 }
1298
1299 /*
1300 * enable stereo
1301 * TODO rework dc_enable_stereo call to work with validation sets?
1302 */
1303 for (k = 0; k < MAX_PIPES; k++) {
1304 pipe = &context->res_ctx.pipe_ctx[k];
1305
1306 for (l = 0 ; pipe && l < context->stream_count; l++) {
1307 if (context->streams[l] &&
1308 context->streams[l] == pipe->stream &&
1309 dc->hwss.setup_stereo)
1310 dc->hwss.setup_stereo(pipe, dc);
1311 }
1312 }
1313
1314 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1315 context->streams[i]->timing.h_addressable,
1316 context->streams[i]->timing.v_addressable,
1317 context->streams[i]->timing.h_total,
1318 context->streams[i]->timing.v_total,
1319 context->streams[i]->timing.pix_clk_100hz / 10);
1320 }
1321
1322 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1323
1324 if (dc->optimize_seamless_boot_streams == 0) {
1325 /* Must wait for no flips to be pending before doing optimize bw */
1326 wait_for_no_pipes_pending(dc, context);
1327 /* pplib is notified if disp_num changed */
1328 dc->hwss.optimize_bandwidth(dc, context);
1329 }
1330
1331 for (i = 0; i < context->stream_count; i++)
1332 context->streams[i]->mode_changed = false;
1333
1334 dc_release_state(dc->current_state);
1335
1336 dc->current_state = context;
1337
1338 dc_retain_state(dc->current_state);
1339
1340 return result;
1341 }
1342
1343 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1344 {
1345 enum dc_status result = DC_ERROR_UNEXPECTED;
1346 int i;
1347
1348 if (false == context_changed(dc, context))
1349 return DC_OK;
1350
1351 DC_LOG_DC("%s: %d streams\n",
1352 __func__, context->stream_count);
1353
1354 for (i = 0; i < context->stream_count; i++) {
1355 struct dc_stream_state *stream = context->streams[i];
1356
1357 dc_stream_log(dc, stream);
1358 }
1359
1360 result = dc_commit_state_no_check(dc, context);
1361
1362 return (result == DC_OK);
1363 }
1364
1365 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1366 {
1367 int i;
1368 struct pipe_ctx *pipe;
1369
1370 for (i = 0; i < MAX_PIPES; i++) {
1371 pipe = &context->res_ctx.pipe_ctx[i];
1372
1373 if (!pipe->plane_state)
1374 continue;
1375
1376 /* Must set to false to start with, due to OR in update function */
1377 pipe->plane_state->status.is_flip_pending = false;
1378 dc->hwss.update_pending_status(pipe);
1379 if (pipe->plane_state->status.is_flip_pending)
1380 return true;
1381 }
1382 return false;
1383 }
1384
1385 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1386 {
1387 int i;
1388 struct dc_state *context = dc->current_state;
1389
1390 if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
1391 return true;
1392
1393 post_surface_trace(dc);
1394
1395 if (is_flip_pending_in_pipes(dc, context))
1396 return true;
1397
1398 for (i = 0; i < dc->res_pool->pipe_count; i++)
1399 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1400 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1401 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1402 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1403 }
1404
1405 dc->hwss.optimize_bandwidth(dc, context);
1406
1407 dc->optimized_required = false;
1408 dc->wm_optimized_required = false;
1409
1410 return true;
1411 }
1412
1413 struct dc_state *dc_create_state(struct dc *dc)
1414 {
1415 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1416 GFP_KERNEL);
1417
1418 if (!context)
1419 return NULL;
1420 /* Each context must have their own instance of VBA and in order to
1421 * initialize and obtain IP and SOC the base DML instance from DC is
1422 * initially copied into every context
1423 */
1424 #ifdef CONFIG_DRM_AMD_DC_DCN
1425 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1426 #endif
1427
1428 kref_init(&context->refcount);
1429
1430 return context;
1431 }
1432
1433 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1434 {
1435 int i, j;
1436 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1437
1438 if (!new_ctx)
1439 return NULL;
1440 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1441
1442 for (i = 0; i < MAX_PIPES; i++) {
1443 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1444
1445 if (cur_pipe->top_pipe)
1446 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1447
1448 if (cur_pipe->bottom_pipe)
1449 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1450
1451 if (cur_pipe->prev_odm_pipe)
1452 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1453
1454 if (cur_pipe->next_odm_pipe)
1455 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1456
1457 }
1458
1459 for (i = 0; i < new_ctx->stream_count; i++) {
1460 dc_stream_retain(new_ctx->streams[i]);
1461 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1462 dc_plane_state_retain(
1463 new_ctx->stream_status[i].plane_states[j]);
1464 }
1465
1466 kref_init(&new_ctx->refcount);
1467
1468 return new_ctx;
1469 }
1470
1471 void dc_retain_state(struct dc_state *context)
1472 {
1473 kref_get(&context->refcount);
1474 }
1475
1476 static void dc_state_free(struct kref *kref)
1477 {
1478 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1479 dc_resource_state_destruct(context);
1480 kvfree(context);
1481 }
1482
1483 void dc_release_state(struct dc_state *context)
1484 {
1485 kref_put(&context->refcount, dc_state_free);
1486 }
1487
1488 bool dc_set_generic_gpio_for_stereo(bool enable,
1489 struct gpio_service *gpio_service)
1490 {
1491 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1492 struct gpio_pin_info pin_info;
1493 struct gpio *generic;
1494 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1495 GFP_KERNEL);
1496
1497 if (!config)
1498 return false;
1499 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1500
1501 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1502 kfree(config);
1503 return false;
1504 } else {
1505 generic = dal_gpio_service_create_generic_mux(
1506 gpio_service,
1507 pin_info.offset,
1508 pin_info.mask);
1509 }
1510
1511 if (!generic) {
1512 kfree(config);
1513 return false;
1514 }
1515
1516 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1517
1518 config->enable_output_from_mux = enable;
1519 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1520
1521 if (gpio_result == GPIO_RESULT_OK)
1522 gpio_result = dal_mux_setup_config(generic, config);
1523
1524 if (gpio_result == GPIO_RESULT_OK) {
1525 dal_gpio_close(generic);
1526 dal_gpio_destroy_generic_mux(&generic);
1527 kfree(config);
1528 return true;
1529 } else {
1530 dal_gpio_close(generic);
1531 dal_gpio_destroy_generic_mux(&generic);
1532 kfree(config);
1533 return false;
1534 }
1535 }
1536
1537 static bool is_surface_in_context(
1538 const struct dc_state *context,
1539 const struct dc_plane_state *plane_state)
1540 {
1541 int j;
1542
1543 for (j = 0; j < MAX_PIPES; j++) {
1544 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1545
1546 if (plane_state == pipe_ctx->plane_state) {
1547 return true;
1548 }
1549 }
1550
1551 return false;
1552 }
1553
1554 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1555 {
1556 union surface_update_flags *update_flags = &u->surface->update_flags;
1557 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1558
1559 if (!u->plane_info)
1560 return UPDATE_TYPE_FAST;
1561
1562 if (u->plane_info->color_space != u->surface->color_space) {
1563 update_flags->bits.color_space_change = 1;
1564 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1565 }
1566
1567 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1568 update_flags->bits.horizontal_mirror_change = 1;
1569 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1570 }
1571
1572 if (u->plane_info->rotation != u->surface->rotation) {
1573 update_flags->bits.rotation_change = 1;
1574 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1575 }
1576
1577 if (u->plane_info->format != u->surface->format) {
1578 update_flags->bits.pixel_format_change = 1;
1579 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1580 }
1581
1582 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1583 update_flags->bits.stereo_format_change = 1;
1584 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1585 }
1586
1587 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1588 update_flags->bits.per_pixel_alpha_change = 1;
1589 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1590 }
1591
1592 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1593 update_flags->bits.global_alpha_change = 1;
1594 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1595 }
1596
1597 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1598 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1599 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1600 update_flags->bits.dcc_change = 1;
1601 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1602 }
1603
1604 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1605 resource_pixel_format_to_bpp(u->surface->format)) {
1606 /* different bytes per element will require full bandwidth
1607 * and DML calculation
1608 */
1609 update_flags->bits.bpp_change = 1;
1610 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1611 }
1612
1613 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1614 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1615 update_flags->bits.plane_size_change = 1;
1616 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1617 }
1618
1619
1620 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1621 sizeof(union dc_tiling_info)) != 0) {
1622 update_flags->bits.swizzle_change = 1;
1623 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1624
1625 /* todo: below are HW dependent, we should add a hook to
1626 * DCE/N resource and validated there.
1627 */
1628 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1629 /* swizzled mode requires RQ to be setup properly,
1630 * thus need to run DML to calculate RQ settings
1631 */
1632 update_flags->bits.bandwidth_change = 1;
1633 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1634 }
1635 }
1636
1637 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1638 return update_type;
1639 }
1640
1641 static enum surface_update_type get_scaling_info_update_type(
1642 const struct dc_surface_update *u)
1643 {
1644 union surface_update_flags *update_flags = &u->surface->update_flags;
1645
1646 if (!u->scaling_info)
1647 return UPDATE_TYPE_FAST;
1648
1649 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1650 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1651 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1652 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
1653 || u->scaling_info->scaling_quality.integer_scaling !=
1654 u->surface->scaling_quality.integer_scaling
1655 ) {
1656 update_flags->bits.scaling_change = 1;
1657
1658 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1659 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1660 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1661 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1662 /* Making dst rect smaller requires a bandwidth change */
1663 update_flags->bits.bandwidth_change = 1;
1664 }
1665
1666 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1667 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1668
1669 update_flags->bits.scaling_change = 1;
1670 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1671 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
1672 /* Making src rect bigger requires a bandwidth change */
1673 update_flags->bits.clock_change = 1;
1674 }
1675
1676 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1677 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1678 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1679 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1680 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1681 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1682 update_flags->bits.position_change = 1;
1683
1684 if (update_flags->bits.clock_change
1685 || update_flags->bits.bandwidth_change
1686 || update_flags->bits.scaling_change)
1687 return UPDATE_TYPE_FULL;
1688
1689 if (update_flags->bits.position_change)
1690 return UPDATE_TYPE_MED;
1691
1692 return UPDATE_TYPE_FAST;
1693 }
1694
1695 static enum surface_update_type det_surface_update(const struct dc *dc,
1696 const struct dc_surface_update *u)
1697 {
1698 const struct dc_state *context = dc->current_state;
1699 enum surface_update_type type;
1700 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1701 union surface_update_flags *update_flags = &u->surface->update_flags;
1702
1703 if (u->flip_addr)
1704 update_flags->bits.addr_update = 1;
1705
1706 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
1707 update_flags->raw = 0xFFFFFFFF;
1708 return UPDATE_TYPE_FULL;
1709 }
1710
1711 update_flags->raw = 0; // Reset all flags
1712
1713 type = get_plane_info_update_type(u);
1714 elevate_update_type(&overall_type, type);
1715
1716 type = get_scaling_info_update_type(u);
1717 elevate_update_type(&overall_type, type);
1718
1719 if (u->flip_addr)
1720 update_flags->bits.addr_update = 1;
1721
1722 if (u->in_transfer_func)
1723 update_flags->bits.in_transfer_func_change = 1;
1724
1725 if (u->input_csc_color_matrix)
1726 update_flags->bits.input_csc_change = 1;
1727
1728 if (u->coeff_reduction_factor)
1729 update_flags->bits.coeff_reduction_change = 1;
1730
1731 if (u->gamut_remap_matrix)
1732 update_flags->bits.gamut_remap_change = 1;
1733
1734 if (u->gamma) {
1735 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1736
1737 if (u->plane_info)
1738 format = u->plane_info->format;
1739 else if (u->surface)
1740 format = u->surface->format;
1741
1742 if (dce_use_lut(format))
1743 update_flags->bits.gamma_change = 1;
1744 }
1745
1746 if (u->hdr_mult.value)
1747 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
1748 update_flags->bits.hdr_mult = 1;
1749 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
1750 }
1751
1752 if (update_flags->bits.in_transfer_func_change) {
1753 type = UPDATE_TYPE_MED;
1754 elevate_update_type(&overall_type, type);
1755 }
1756
1757 if (update_flags->bits.input_csc_change
1758 || update_flags->bits.coeff_reduction_change
1759 || update_flags->bits.gamma_change
1760 || update_flags->bits.gamut_remap_change) {
1761 type = UPDATE_TYPE_FULL;
1762 elevate_update_type(&overall_type, type);
1763 }
1764
1765 return overall_type;
1766 }
1767
1768 static enum surface_update_type check_update_surfaces_for_stream(
1769 struct dc *dc,
1770 struct dc_surface_update *updates,
1771 int surface_count,
1772 struct dc_stream_update *stream_update,
1773 const struct dc_stream_status *stream_status)
1774 {
1775 int i;
1776 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1777
1778 if (stream_status == NULL || stream_status->plane_count != surface_count)
1779 overall_type = UPDATE_TYPE_FULL;
1780
1781 /* some stream updates require passive update */
1782 if (stream_update) {
1783 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
1784
1785 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
1786 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
1787 stream_update->integer_scaling_update)
1788 su_flags->bits.scaling = 1;
1789
1790 if (stream_update->out_transfer_func)
1791 su_flags->bits.out_tf = 1;
1792
1793 if (stream_update->abm_level)
1794 su_flags->bits.abm_level = 1;
1795
1796 if (stream_update->dpms_off)
1797 su_flags->bits.dpms_off = 1;
1798
1799 if (stream_update->gamut_remap)
1800 su_flags->bits.gamut_remap = 1;
1801
1802 if (stream_update->wb_update)
1803 su_flags->bits.wb_update = 1;
1804
1805 if (stream_update->dsc_config)
1806 su_flags->bits.dsc_changed = 1;
1807
1808 if (su_flags->raw != 0)
1809 overall_type = UPDATE_TYPE_FULL;
1810
1811 if (stream_update->output_csc_transform || stream_update->output_color_space)
1812 su_flags->bits.out_csc = 1;
1813 }
1814
1815 for (i = 0 ; i < surface_count; i++) {
1816 enum surface_update_type type =
1817 det_surface_update(dc, &updates[i]);
1818
1819 elevate_update_type(&overall_type, type);
1820 }
1821
1822 return overall_type;
1823 }
1824
1825 /**
1826 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1827 *
1828 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1829 */
1830 enum surface_update_type dc_check_update_surfaces_for_stream(
1831 struct dc *dc,
1832 struct dc_surface_update *updates,
1833 int surface_count,
1834 struct dc_stream_update *stream_update,
1835 const struct dc_stream_status *stream_status)
1836 {
1837 int i;
1838 enum surface_update_type type;
1839
1840 if (stream_update)
1841 stream_update->stream->update_flags.raw = 0;
1842 for (i = 0; i < surface_count; i++)
1843 updates[i].surface->update_flags.raw = 0;
1844
1845 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1846 if (type == UPDATE_TYPE_FULL) {
1847 if (stream_update) {
1848 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
1849 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
1850 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
1851 }
1852 for (i = 0; i < surface_count; i++)
1853 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1854 }
1855
1856 if (type == UPDATE_TYPE_FAST) {
1857 // If there's an available clock comparator, we use that.
1858 if (dc->clk_mgr->funcs->are_clock_states_equal) {
1859 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
1860 dc->optimized_required = true;
1861 // Else we fallback to mem compare.
1862 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
1863 dc->optimized_required = true;
1864 }
1865
1866 dc->optimized_required |= dc->wm_optimized_required;
1867 }
1868
1869 return type;
1870 }
1871
1872 static struct dc_stream_status *stream_get_status(
1873 struct dc_state *ctx,
1874 struct dc_stream_state *stream)
1875 {
1876 uint8_t i;
1877
1878 for (i = 0; i < ctx->stream_count; i++) {
1879 if (stream == ctx->streams[i]) {
1880 return &ctx->stream_status[i];
1881 }
1882 }
1883
1884 return NULL;
1885 }
1886
1887 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1888
1889 static void copy_surface_update_to_plane(
1890 struct dc_plane_state *surface,
1891 struct dc_surface_update *srf_update)
1892 {
1893 if (srf_update->flip_addr) {
1894 surface->address = srf_update->flip_addr->address;
1895 surface->flip_immediate =
1896 srf_update->flip_addr->flip_immediate;
1897 surface->time.time_elapsed_in_us[surface->time.index] =
1898 srf_update->flip_addr->flip_timestamp_in_us -
1899 surface->time.prev_update_time_in_us;
1900 surface->time.prev_update_time_in_us =
1901 srf_update->flip_addr->flip_timestamp_in_us;
1902 surface->time.index++;
1903 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1904 surface->time.index = 0;
1905
1906 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
1907 }
1908
1909 if (srf_update->scaling_info) {
1910 surface->scaling_quality =
1911 srf_update->scaling_info->scaling_quality;
1912 surface->dst_rect =
1913 srf_update->scaling_info->dst_rect;
1914 surface->src_rect =
1915 srf_update->scaling_info->src_rect;
1916 surface->clip_rect =
1917 srf_update->scaling_info->clip_rect;
1918 }
1919
1920 if (srf_update->plane_info) {
1921 surface->color_space =
1922 srf_update->plane_info->color_space;
1923 surface->format =
1924 srf_update->plane_info->format;
1925 surface->plane_size =
1926 srf_update->plane_info->plane_size;
1927 surface->rotation =
1928 srf_update->plane_info->rotation;
1929 surface->horizontal_mirror =
1930 srf_update->plane_info->horizontal_mirror;
1931 surface->stereo_format =
1932 srf_update->plane_info->stereo_format;
1933 surface->tiling_info =
1934 srf_update->plane_info->tiling_info;
1935 surface->visible =
1936 srf_update->plane_info->visible;
1937 surface->per_pixel_alpha =
1938 srf_update->plane_info->per_pixel_alpha;
1939 surface->global_alpha =
1940 srf_update->plane_info->global_alpha;
1941 surface->global_alpha_value =
1942 srf_update->plane_info->global_alpha_value;
1943 surface->dcc =
1944 srf_update->plane_info->dcc;
1945 surface->layer_index =
1946 srf_update->plane_info->layer_index;
1947 }
1948
1949 if (srf_update->gamma &&
1950 (surface->gamma_correction !=
1951 srf_update->gamma)) {
1952 memcpy(&surface->gamma_correction->entries,
1953 &srf_update->gamma->entries,
1954 sizeof(struct dc_gamma_entries));
1955 surface->gamma_correction->is_identity =
1956 srf_update->gamma->is_identity;
1957 surface->gamma_correction->num_entries =
1958 srf_update->gamma->num_entries;
1959 surface->gamma_correction->type =
1960 srf_update->gamma->type;
1961 }
1962
1963 if (srf_update->in_transfer_func &&
1964 (surface->in_transfer_func !=
1965 srf_update->in_transfer_func)) {
1966 surface->in_transfer_func->sdr_ref_white_level =
1967 srf_update->in_transfer_func->sdr_ref_white_level;
1968 surface->in_transfer_func->tf =
1969 srf_update->in_transfer_func->tf;
1970 surface->in_transfer_func->type =
1971 srf_update->in_transfer_func->type;
1972 memcpy(&surface->in_transfer_func->tf_pts,
1973 &srf_update->in_transfer_func->tf_pts,
1974 sizeof(struct dc_transfer_func_distributed_points));
1975 }
1976
1977 if (srf_update->func_shaper &&
1978 (surface->in_shaper_func !=
1979 srf_update->func_shaper))
1980 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1981 sizeof(*surface->in_shaper_func));
1982
1983 if (srf_update->lut3d_func &&
1984 (surface->lut3d_func !=
1985 srf_update->lut3d_func))
1986 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1987 sizeof(*surface->lut3d_func));
1988
1989 if (srf_update->hdr_mult.value)
1990 surface->hdr_mult =
1991 srf_update->hdr_mult;
1992
1993 if (srf_update->blend_tf &&
1994 (surface->blend_tf !=
1995 srf_update->blend_tf))
1996 memcpy(surface->blend_tf, srf_update->blend_tf,
1997 sizeof(*surface->blend_tf));
1998
1999 if (srf_update->input_csc_color_matrix)
2000 surface->input_csc_color_matrix =
2001 *srf_update->input_csc_color_matrix;
2002
2003 if (srf_update->coeff_reduction_factor)
2004 surface->coeff_reduction_factor =
2005 *srf_update->coeff_reduction_factor;
2006
2007 if (srf_update->gamut_remap_matrix)
2008 surface->gamut_remap_matrix =
2009 *srf_update->gamut_remap_matrix;
2010 }
2011
2012 static void copy_stream_update_to_stream(struct dc *dc,
2013 struct dc_state *context,
2014 struct dc_stream_state *stream,
2015 struct dc_stream_update *update)
2016 {
2017 struct dc_context *dc_ctx = dc->ctx;
2018
2019 if (update == NULL || stream == NULL)
2020 return;
2021
2022 if (update->src.height && update->src.width)
2023 stream->src = update->src;
2024
2025 if (update->dst.height && update->dst.width)
2026 stream->dst = update->dst;
2027
2028 if (update->out_transfer_func &&
2029 stream->out_transfer_func != update->out_transfer_func) {
2030 stream->out_transfer_func->sdr_ref_white_level =
2031 update->out_transfer_func->sdr_ref_white_level;
2032 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2033 stream->out_transfer_func->type =
2034 update->out_transfer_func->type;
2035 memcpy(&stream->out_transfer_func->tf_pts,
2036 &update->out_transfer_func->tf_pts,
2037 sizeof(struct dc_transfer_func_distributed_points));
2038 }
2039
2040 if (update->hdr_static_metadata)
2041 stream->hdr_static_metadata = *update->hdr_static_metadata;
2042
2043 if (update->abm_level)
2044 stream->abm_level = *update->abm_level;
2045
2046 if (update->periodic_interrupt0)
2047 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2048
2049 if (update->periodic_interrupt1)
2050 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2051
2052 if (update->gamut_remap)
2053 stream->gamut_remap_matrix = *update->gamut_remap;
2054
2055 /* Note: this being updated after mode set is currently not a use case
2056 * however if it arises OCSC would need to be reprogrammed at the
2057 * minimum
2058 */
2059 if (update->output_color_space)
2060 stream->output_color_space = *update->output_color_space;
2061
2062 if (update->output_csc_transform)
2063 stream->csc_color_matrix = *update->output_csc_transform;
2064
2065 if (update->vrr_infopacket)
2066 stream->vrr_infopacket = *update->vrr_infopacket;
2067
2068 if (update->dpms_off)
2069 stream->dpms_off = *update->dpms_off;
2070
2071 if (update->vsc_infopacket)
2072 stream->vsc_infopacket = *update->vsc_infopacket;
2073
2074 if (update->vsp_infopacket)
2075 stream->vsp_infopacket = *update->vsp_infopacket;
2076
2077 if (update->dither_option)
2078 stream->dither_option = *update->dither_option;
2079 /* update current stream with writeback info */
2080 if (update->wb_update) {
2081 int i;
2082
2083 stream->num_wb_info = update->wb_update->num_wb_info;
2084 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2085 for (i = 0; i < stream->num_wb_info; i++)
2086 stream->writeback_info[i] =
2087 update->wb_update->writeback_info[i];
2088 }
2089 if (update->dsc_config) {
2090 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2091 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2092 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2093 update->dsc_config->num_slices_v != 0);
2094
2095 /* Use temporarry context for validating new DSC config */
2096 struct dc_state *dsc_validate_context = dc_create_state(dc);
2097
2098 if (dsc_validate_context) {
2099 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2100
2101 stream->timing.dsc_cfg = *update->dsc_config;
2102 stream->timing.flags.DSC = enable_dsc;
2103 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2104 stream->timing.dsc_cfg = old_dsc_cfg;
2105 stream->timing.flags.DSC = old_dsc_enabled;
2106 update->dsc_config = NULL;
2107 }
2108
2109 dc_release_state(dsc_validate_context);
2110 } else {
2111 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2112 update->dsc_config = NULL;
2113 }
2114 }
2115 }
2116
2117 static void commit_planes_do_stream_update(struct dc *dc,
2118 struct dc_stream_state *stream,
2119 struct dc_stream_update *stream_update,
2120 enum surface_update_type update_type,
2121 struct dc_state *context)
2122 {
2123 int j;
2124 bool should_program_abm;
2125
2126 // Stream updates
2127 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2128 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2129
2130 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2131
2132 if (stream_update->periodic_interrupt0 &&
2133 dc->hwss.setup_periodic_interrupt)
2134 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2135
2136 if (stream_update->periodic_interrupt1 &&
2137 dc->hwss.setup_periodic_interrupt)
2138 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2139
2140 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2141 stream_update->vrr_infopacket ||
2142 stream_update->vsc_infopacket ||
2143 stream_update->vsp_infopacket) {
2144 resource_build_info_frame(pipe_ctx);
2145 dc->hwss.update_info_frame(pipe_ctx);
2146 }
2147
2148 if (stream_update->hdr_static_metadata &&
2149 stream->use_dynamic_meta &&
2150 dc->hwss.set_dmdata_attributes &&
2151 pipe_ctx->stream->dmdata_address.quad_part != 0)
2152 dc->hwss.set_dmdata_attributes(pipe_ctx);
2153
2154 if (stream_update->gamut_remap)
2155 dc_stream_set_gamut_remap(dc, stream);
2156
2157 if (stream_update->output_csc_transform)
2158 dc_stream_program_csc_matrix(dc, stream);
2159
2160 if (stream_update->dither_option) {
2161 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2162 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2163 &pipe_ctx->stream->bit_depth_params);
2164 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2165 &stream->bit_depth_params,
2166 &stream->clamping);
2167 while (odm_pipe) {
2168 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2169 &stream->bit_depth_params,
2170 &stream->clamping);
2171 odm_pipe = odm_pipe->next_odm_pipe;
2172 }
2173 }
2174
2175 /* Full fe update*/
2176 if (update_type == UPDATE_TYPE_FAST)
2177 continue;
2178
2179 if (stream_update->dsc_config)
2180 dp_update_dsc_config(pipe_ctx);
2181
2182 if (stream_update->dpms_off) {
2183 if (*stream_update->dpms_off) {
2184 core_link_disable_stream(pipe_ctx);
2185 /* for dpms, keep acquired resources*/
2186 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2187 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2188
2189 dc->hwss.optimize_bandwidth(dc, dc->current_state);
2190 } else {
2191 if (dc->optimize_seamless_boot_streams == 0)
2192 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2193
2194 core_link_enable_stream(dc->current_state, pipe_ctx);
2195 }
2196 }
2197
2198 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2199 should_program_abm = true;
2200
2201 // if otg funcs defined check if blanked before programming
2202 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2203 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2204 should_program_abm = false;
2205
2206 if (should_program_abm) {
2207 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2208 pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
2209 } else {
2210 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2211 pipe_ctx->stream_res.abm, stream->abm_level);
2212 }
2213 }
2214 }
2215 }
2216 }
2217 }
2218
2219 static void commit_planes_for_stream(struct dc *dc,
2220 struct dc_surface_update *srf_updates,
2221 int surface_count,
2222 struct dc_stream_state *stream,
2223 struct dc_stream_update *stream_update,
2224 enum surface_update_type update_type,
2225 struct dc_state *context)
2226 {
2227 int i, j;
2228 struct pipe_ctx *top_pipe_to_program = NULL;
2229
2230 if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
2231 /* Optimize seamless boot flag keeps clocks and watermarks high until
2232 * first flip. After first flip, optimization is required to lower
2233 * bandwidth. Important to note that it is expected UEFI will
2234 * only light up a single display on POST, therefore we only expect
2235 * one stream with seamless boot flag set.
2236 */
2237 if (stream->apply_seamless_boot_optimization) {
2238 stream->apply_seamless_boot_optimization = false;
2239 dc->optimize_seamless_boot_streams--;
2240
2241 if (dc->optimize_seamless_boot_streams == 0)
2242 dc->optimized_required = true;
2243 }
2244 }
2245
2246 if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
2247 dc->hwss.prepare_bandwidth(dc, context);
2248 context_clock_trace(dc, context);
2249 }
2250
2251 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2252 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2253
2254 if (!pipe_ctx->top_pipe &&
2255 !pipe_ctx->prev_odm_pipe &&
2256 pipe_ctx->stream &&
2257 pipe_ctx->stream == stream) {
2258 top_pipe_to_program = pipe_ctx;
2259 }
2260 }
2261
2262 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2263 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
2264 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2265 top_pipe_to_program->stream_res.tg);
2266
2267 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2268 dc->hwss.interdependent_update_lock(dc, context, true);
2269 else
2270 /* Lock the top pipe while updating plane addrs, since freesync requires
2271 * plane addr update event triggers to be synchronized.
2272 * top_pipe_to_program is expected to never be NULL
2273 */
2274 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2275
2276
2277 // Stream updates
2278 if (stream_update)
2279 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2280
2281 if (surface_count == 0) {
2282 /*
2283 * In case of turning off screen, no need to program front end a second time.
2284 * just return after program blank.
2285 */
2286 if (dc->hwss.apply_ctx_for_surface)
2287 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2288 if (dc->hwss.program_front_end_for_ctx)
2289 dc->hwss.program_front_end_for_ctx(dc, context);
2290
2291 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2292 dc->hwss.interdependent_update_lock(dc, context, false);
2293 else
2294 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2295
2296 dc->hwss.post_unlock_program_front_end(dc, context);
2297 return;
2298 }
2299
2300 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2301 for (i = 0; i < surface_count; i++) {
2302 struct dc_plane_state *plane_state = srf_updates[i].surface;
2303 /*set logical flag for lock/unlock use*/
2304 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2305 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2306 if (!pipe_ctx->plane_state)
2307 continue;
2308 if (pipe_ctx->plane_state != plane_state)
2309 continue;
2310 plane_state->triplebuffer_flips = false;
2311 if (update_type == UPDATE_TYPE_FAST &&
2312 dc->hwss.program_triplebuffer != NULL &&
2313 !plane_state->flip_immediate &&
2314 !dc->debug.disable_tri_buf) {
2315 /*triple buffer for VUpdate only*/
2316 plane_state->triplebuffer_flips = true;
2317 }
2318 }
2319 }
2320 }
2321
2322 // Update Type FULL, Surface updates
2323 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2324 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2325
2326 if (!pipe_ctx->top_pipe &&
2327 !pipe_ctx->prev_odm_pipe &&
2328 pipe_ctx->stream &&
2329 pipe_ctx->stream == stream) {
2330 struct dc_stream_status *stream_status = NULL;
2331
2332 if (!pipe_ctx->plane_state)
2333 continue;
2334
2335 /* Full fe update*/
2336 if (update_type == UPDATE_TYPE_FAST)
2337 continue;
2338
2339 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2340
2341 if (dc->hwss.program_triplebuffer != NULL &&
2342 !dc->debug.disable_tri_buf) {
2343 /*turn off triple buffer for full update*/
2344 dc->hwss.program_triplebuffer(
2345 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2346 }
2347 stream_status =
2348 stream_get_status(context, pipe_ctx->stream);
2349
2350 if (dc->hwss.apply_ctx_for_surface)
2351 dc->hwss.apply_ctx_for_surface(
2352 dc, pipe_ctx->stream, stream_status->plane_count, context);
2353 }
2354 }
2355 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2356 dc->hwss.program_front_end_for_ctx(dc, context);
2357 #ifdef CONFIG_DRM_AMD_DC_DCN
2358 if (dc->debug.validate_dml_output) {
2359 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2360 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2361 if (cur_pipe.stream == NULL)
2362 continue;
2363
2364 cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2365 cur_pipe.plane_res.hubp, dc->ctx,
2366 &context->res_ctx.pipe_ctx[i].rq_regs,
2367 &context->res_ctx.pipe_ctx[i].dlg_regs,
2368 &context->res_ctx.pipe_ctx[i].ttu_regs);
2369 }
2370 }
2371 #endif
2372 }
2373
2374 // Update Type FAST, Surface updates
2375 if (update_type == UPDATE_TYPE_FAST) {
2376 if (dc->hwss.set_flip_control_gsl)
2377 for (i = 0; i < surface_count; i++) {
2378 struct dc_plane_state *plane_state = srf_updates[i].surface;
2379
2380 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2381 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2382
2383 if (pipe_ctx->stream != stream)
2384 continue;
2385
2386 if (pipe_ctx->plane_state != plane_state)
2387 continue;
2388
2389 // GSL has to be used for flip immediate
2390 dc->hwss.set_flip_control_gsl(pipe_ctx,
2391 plane_state->flip_immediate);
2392 }
2393 }
2394 /* Perform requested Updates */
2395 for (i = 0; i < surface_count; i++) {
2396 struct dc_plane_state *plane_state = srf_updates[i].surface;
2397
2398 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2399 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2400
2401 if (pipe_ctx->stream != stream)
2402 continue;
2403
2404 if (pipe_ctx->plane_state != plane_state)
2405 continue;
2406 /*program triple buffer after lock based on flip type*/
2407 if (dc->hwss.program_triplebuffer != NULL &&
2408 !dc->debug.disable_tri_buf) {
2409 /*only enable triplebuffer for fast_update*/
2410 dc->hwss.program_triplebuffer(
2411 dc, pipe_ctx, plane_state->triplebuffer_flips);
2412 }
2413 if (srf_updates[i].flip_addr)
2414 dc->hwss.update_plane_addr(dc, pipe_ctx);
2415 }
2416 }
2417 }
2418
2419 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2420 dc->hwss.interdependent_update_lock(dc, context, false);
2421 else
2422 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2423
2424 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2425 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2426 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2427 top_pipe_to_program->stream_res.tg,
2428 CRTC_STATE_VACTIVE);
2429 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2430 top_pipe_to_program->stream_res.tg,
2431 CRTC_STATE_VBLANK);
2432 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2433 top_pipe_to_program->stream_res.tg,
2434 CRTC_STATE_VACTIVE);
2435 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
2436 top_pipe_to_program->stream_res.tg);
2437 }
2438
2439 if (update_type != UPDATE_TYPE_FAST)
2440 dc->hwss.post_unlock_program_front_end(dc, context);
2441
2442 // Fire manual trigger only when bottom plane is flipped
2443 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2444 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2445
2446 if (pipe_ctx->bottom_pipe ||
2447 !pipe_ctx->stream ||
2448 pipe_ctx->stream != stream ||
2449 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2450 continue;
2451
2452 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2453 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2454 }
2455 }
2456
2457 void dc_commit_updates_for_stream(struct dc *dc,
2458 struct dc_surface_update *srf_updates,
2459 int surface_count,
2460 struct dc_stream_state *stream,
2461 struct dc_stream_update *stream_update,
2462 struct dc_state *state)
2463 {
2464 const struct dc_stream_status *stream_status;
2465 enum surface_update_type update_type;
2466 struct dc_state *context;
2467 struct dc_context *dc_ctx = dc->ctx;
2468 int i, j;
2469
2470 stream_status = dc_stream_get_status(stream);
2471 context = dc->current_state;
2472
2473 update_type = dc_check_update_surfaces_for_stream(
2474 dc, srf_updates, surface_count, stream_update, stream_status);
2475
2476 if (update_type >= update_surface_trace_level)
2477 update_surface_trace(dc, srf_updates, surface_count);
2478
2479
2480 if (update_type >= UPDATE_TYPE_FULL) {
2481
2482 /* initialize scratch memory for building context */
2483 context = dc_create_state(dc);
2484 if (context == NULL) {
2485 DC_ERROR("Failed to allocate new validate context!\n");
2486 return;
2487 }
2488
2489 dc_resource_state_copy_construct(state, context);
2490
2491 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2492 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2493 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2494
2495 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2496 new_pipe->plane_state->force_full_update = true;
2497 }
2498 }
2499
2500
2501 for (i = 0; i < surface_count; i++) {
2502 struct dc_plane_state *surface = srf_updates[i].surface;
2503
2504 copy_surface_update_to_plane(surface, &srf_updates[i]);
2505
2506 if (update_type >= UPDATE_TYPE_MED) {
2507 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2508 struct pipe_ctx *pipe_ctx =
2509 &context->res_ctx.pipe_ctx[j];
2510
2511 if (pipe_ctx->plane_state != surface)
2512 continue;
2513
2514 resource_build_scaling_params(pipe_ctx);
2515 }
2516 }
2517 }
2518
2519 copy_stream_update_to_stream(dc, context, stream, stream_update);
2520
2521 commit_planes_for_stream(
2522 dc,
2523 srf_updates,
2524 surface_count,
2525 stream,
2526 stream_update,
2527 update_type,
2528 context);
2529 /*update current_State*/
2530 if (dc->current_state != context) {
2531
2532 struct dc_state *old = dc->current_state;
2533
2534 dc->current_state = context;
2535 dc_release_state(old);
2536
2537 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2538 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2539
2540 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2541 pipe_ctx->plane_state->force_full_update = false;
2542 }
2543 }
2544 /*let's use current_state to update watermark etc*/
2545 if (update_type >= UPDATE_TYPE_FULL)
2546 dc_post_update_surfaces_to_stream(dc);
2547
2548 return;
2549
2550 }
2551
2552 uint8_t dc_get_current_stream_count(struct dc *dc)
2553 {
2554 return dc->current_state->stream_count;
2555 }
2556
2557 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2558 {
2559 if (i < dc->current_state->stream_count)
2560 return dc->current_state->streams[i];
2561 return NULL;
2562 }
2563
2564 enum dc_irq_source dc_interrupt_to_irq_source(
2565 struct dc *dc,
2566 uint32_t src_id,
2567 uint32_t ext_id)
2568 {
2569 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2570 }
2571
2572 /**
2573 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2574 */
2575 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2576 {
2577
2578 if (dc == NULL)
2579 return false;
2580
2581 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2582 }
2583
2584 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2585 {
2586 dal_irq_service_ack(dc->res_pool->irqs, src);
2587 }
2588
2589 void dc_set_power_state(
2590 struct dc *dc,
2591 enum dc_acpi_cm_power_state power_state)
2592 {
2593 struct kref refcount;
2594 struct display_mode_lib *dml;
2595
2596 switch (power_state) {
2597 case DC_ACPI_CM_POWER_STATE_D0:
2598 dc_resource_state_construct(dc, dc->current_state);
2599
2600 if (dc->ctx->dmub_srv)
2601 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
2602
2603 dc->hwss.init_hw(dc);
2604
2605 if (dc->hwss.init_sys_ctx != NULL &&
2606 dc->vm_pa_config.valid) {
2607 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2608 }
2609
2610 break;
2611 default:
2612 ASSERT(dc->current_state->stream_count == 0);
2613 /* Zero out the current context so that on resume we start with
2614 * clean state, and dc hw programming optimizations will not
2615 * cause any trouble.
2616 */
2617 dml = kzalloc(sizeof(struct display_mode_lib),
2618 GFP_KERNEL);
2619
2620 ASSERT(dml);
2621 if (!dml)
2622 return;
2623
2624 /* Preserve refcount */
2625 refcount = dc->current_state->refcount;
2626 /* Preserve display mode lib */
2627 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2628
2629 dc_resource_state_destruct(dc->current_state);
2630 memset(dc->current_state, 0,
2631 sizeof(*dc->current_state));
2632
2633 dc->current_state->refcount = refcount;
2634 dc->current_state->bw_ctx.dml = *dml;
2635
2636 kfree(dml);
2637
2638 break;
2639 }
2640 }
2641
2642 void dc_resume(struct dc *dc)
2643 {
2644
2645 uint32_t i;
2646
2647 for (i = 0; i < dc->link_count; i++)
2648 core_link_resume(dc->links[i]);
2649 }
2650
2651 unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2652 {
2653 struct abm *abm = dc->res_pool->abm;
2654
2655 if (abm)
2656 return abm->funcs->get_current_backlight(abm);
2657
2658 return 0;
2659 }
2660
2661 unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2662 {
2663 struct abm *abm = dc->res_pool->abm;
2664
2665 if (abm)
2666 return abm->funcs->get_target_backlight(abm);
2667
2668 return 0;
2669 }
2670
2671 bool dc_is_dmcu_initialized(struct dc *dc)
2672 {
2673 struct dmcu *dmcu = dc->res_pool->dmcu;
2674
2675 if (dmcu)
2676 return dmcu->funcs->is_dmcu_initialized(dmcu);
2677 return false;
2678 }
2679
2680 bool dc_submit_i2c(
2681 struct dc *dc,
2682 uint32_t link_index,
2683 struct i2c_command *cmd)
2684 {
2685
2686 struct dc_link *link = dc->links[link_index];
2687 struct ddc_service *ddc = link->ddc;
2688 return dce_i2c_submit_command(
2689 dc->res_pool,
2690 ddc->ddc_pin,
2691 cmd);
2692 }
2693
2694 bool dc_submit_i2c_oem(
2695 struct dc *dc,
2696 struct i2c_command *cmd)
2697 {
2698 struct ddc_service *ddc = dc->res_pool->oem_device;
2699 return dce_i2c_submit_command(
2700 dc->res_pool,
2701 ddc->ddc_pin,
2702 cmd);
2703 }
2704
2705 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2706 {
2707 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2708 BREAK_TO_DEBUGGER();
2709 return false;
2710 }
2711
2712 dc_sink_retain(sink);
2713
2714 dc_link->remote_sinks[dc_link->sink_count] = sink;
2715 dc_link->sink_count++;
2716
2717 return true;
2718 }
2719
2720 /**
2721 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
2722 *
2723 * EDID length is in bytes
2724 */
2725 struct dc_sink *dc_link_add_remote_sink(
2726 struct dc_link *link,
2727 const uint8_t *edid,
2728 int len,
2729 struct dc_sink_init_data *init_data)
2730 {
2731 struct dc_sink *dc_sink;
2732 enum dc_edid_status edid_status;
2733
2734 if (len > DC_MAX_EDID_BUFFER_SIZE) {
2735 dm_error("Max EDID buffer size breached!\n");
2736 return NULL;
2737 }
2738
2739 if (!init_data) {
2740 BREAK_TO_DEBUGGER();
2741 return NULL;
2742 }
2743
2744 if (!init_data->link) {
2745 BREAK_TO_DEBUGGER();
2746 return NULL;
2747 }
2748
2749 dc_sink = dc_sink_create(init_data);
2750
2751 if (!dc_sink)
2752 return NULL;
2753
2754 memmove(dc_sink->dc_edid.raw_edid, edid, len);
2755 dc_sink->dc_edid.length = len;
2756
2757 if (!link_add_remote_sink_helper(
2758 link,
2759 dc_sink))
2760 goto fail_add_sink;
2761
2762 edid_status = dm_helpers_parse_edid_caps(
2763 link->ctx,
2764 &dc_sink->dc_edid,
2765 &dc_sink->edid_caps);
2766
2767 /*
2768 * Treat device as no EDID device if EDID
2769 * parsing fails
2770 */
2771 if (edid_status != EDID_OK) {
2772 dc_sink->dc_edid.length = 0;
2773 dm_error("Bad EDID, status%d!\n", edid_status);
2774 }
2775
2776 return dc_sink;
2777
2778 fail_add_sink:
2779 dc_sink_release(dc_sink);
2780 return NULL;
2781 }
2782
2783 /**
2784 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
2785 *
2786 * Note that this just removes the struct dc_sink - it doesn't
2787 * program hardware or alter other members of dc_link
2788 */
2789 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2790 {
2791 int i;
2792
2793 if (!link->sink_count) {
2794 BREAK_TO_DEBUGGER();
2795 return;
2796 }
2797
2798 for (i = 0; i < link->sink_count; i++) {
2799 if (link->remote_sinks[i] == sink) {
2800 dc_sink_release(sink);
2801 link->remote_sinks[i] = NULL;
2802
2803 /* shrink array to remove empty place */
2804 while (i < link->sink_count - 1) {
2805 link->remote_sinks[i] = link->remote_sinks[i+1];
2806 i++;
2807 }
2808 link->remote_sinks[i] = NULL;
2809 link->sink_count--;
2810 return;
2811 }
2812 }
2813 }
2814
2815 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2816 {
2817 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2818 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2819 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2820 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2821 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2822 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2823 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2824 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2825 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2826 }
2827 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2828 {
2829 if (dc->hwss.set_clock)
2830 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2831 return DC_ERROR_UNEXPECTED;
2832 }
2833 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2834 {
2835 if (dc->hwss.get_clock)
2836 dc->hwss.get_clock(dc, clock_type, clock_cfg);
2837 }