]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/gpu/drm/i915/display/intel_dp.c
Merge tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[people/ms/linux.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/string_helpers.h>
33 #include <linux/timekeeping.h>
34 #include <linux/types.h>
35
36 #include <asm/byteorder.h>
37
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/display/drm_dsc_helper.h>
40 #include <drm/display/drm_hdmi_helper.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc.h>
43 #include <drm/drm_edid.h>
44 #include <drm/drm_probe_helper.h>
45
46 #include "g4x_dp.h"
47 #include "i915_debugfs.h"
48 #include "i915_drv.h"
49 #include "intel_atomic.h"
50 #include "intel_audio.h"
51 #include "intel_backlight.h"
52 #include "intel_combo_phy_regs.h"
53 #include "intel_connector.h"
54 #include "intel_crtc.h"
55 #include "intel_ddi.h"
56 #include "intel_de.h"
57 #include "intel_display_types.h"
58 #include "intel_dp.h"
59 #include "intel_dp_aux.h"
60 #include "intel_dp_hdcp.h"
61 #include "intel_dp_link_training.h"
62 #include "intel_dp_mst.h"
63 #include "intel_dpio_phy.h"
64 #include "intel_dpll.h"
65 #include "intel_fifo_underrun.h"
66 #include "intel_hdcp.h"
67 #include "intel_hdmi.h"
68 #include "intel_hotplug.h"
69 #include "intel_lspcon.h"
70 #include "intel_lvds.h"
71 #include "intel_panel.h"
72 #include "intel_pch_display.h"
73 #include "intel_pps.h"
74 #include "intel_psr.h"
75 #include "intel_tc.h"
76 #include "intel_vdsc.h"
77 #include "intel_vrr.h"
78
79 /* DP DSC throughput values used for slice count calculations KPixels/s */
80 #define DP_DSC_PEAK_PIXEL_RATE 2720000
81 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
82 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
83
84 /* DP DSC FEC Overhead factor = 1/(0.972261) */
85 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261
86
87 /* Compliance test status bits */
88 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
89 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
90 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
91 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
92
93
94 /* Constants for DP DSC configurations */
95 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
96
97 /* With Single pipe configuration, HW is capable of supporting maximum
98 * of 4 slices per line.
99 */
100 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
101
102 /**
103 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104 * @intel_dp: DP struct
105 *
106 * If a CPU or PCH DP output is attached to an eDP panel, this function
107 * will return true, and false otherwise.
108 *
109 * This function is not safe to use prior to encoder type being set.
110 */
111 bool intel_dp_is_edp(struct intel_dp *intel_dp)
112 {
113 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
114
115 return dig_port->base.type == INTEL_OUTPUT_EDP;
116 }
117
118 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
119 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
120
121 /* Is link rate UHBR and thus 128b/132b? */
122 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
123 {
124 return crtc_state->port_clock >= 1000000;
125 }
126
127 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
128 {
129 intel_dp->sink_rates[0] = 162000;
130 intel_dp->num_sink_rates = 1;
131 }
132
133 /* update sink rates from dpcd */
134 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
135 {
136 static const int dp_rates[] = {
137 162000, 270000, 540000, 810000
138 };
139 int i, max_rate;
140 int max_lttpr_rate;
141
142 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
143 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
144 static const int quirk_rates[] = { 162000, 270000, 324000 };
145
146 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
147 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
148
149 return;
150 }
151
152 /*
153 * Sink rates for 8b/10b.
154 */
155 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
156 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
157 if (max_lttpr_rate)
158 max_rate = min(max_rate, max_lttpr_rate);
159
160 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
161 if (dp_rates[i] > max_rate)
162 break;
163 intel_dp->sink_rates[i] = dp_rates[i];
164 }
165
166 /*
167 * Sink rates for 128b/132b. If set, sink should support all 8b/10b
168 * rates and 10 Gbps.
169 */
170 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
171 u8 uhbr_rates = 0;
172
173 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
174
175 drm_dp_dpcd_readb(&intel_dp->aux,
176 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
177
178 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
179 /* We have a repeater */
180 if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
181 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
182 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
183 DP_PHY_REPEATER_128B132B_SUPPORTED) {
184 /* Repeater supports 128b/132b, valid UHBR rates */
185 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
186 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
187 } else {
188 /* Does not support 128b/132b */
189 uhbr_rates = 0;
190 }
191 }
192
193 if (uhbr_rates & DP_UHBR10)
194 intel_dp->sink_rates[i++] = 1000000;
195 if (uhbr_rates & DP_UHBR13_5)
196 intel_dp->sink_rates[i++] = 1350000;
197 if (uhbr_rates & DP_UHBR20)
198 intel_dp->sink_rates[i++] = 2000000;
199 }
200
201 intel_dp->num_sink_rates = i;
202 }
203
204 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
205 {
206 struct intel_connector *connector = intel_dp->attached_connector;
207 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
208 struct intel_encoder *encoder = &intel_dig_port->base;
209
210 intel_dp_set_dpcd_sink_rates(intel_dp);
211
212 if (intel_dp->num_sink_rates)
213 return;
214
215 drm_err(&dp_to_i915(intel_dp)->drm,
216 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
217 connector->base.base.id, connector->base.name,
218 encoder->base.base.id, encoder->base.name);
219
220 intel_dp_set_default_sink_rates(intel_dp);
221 }
222
223 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
224 {
225 intel_dp->max_sink_lane_count = 1;
226 }
227
228 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
229 {
230 struct intel_connector *connector = intel_dp->attached_connector;
231 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
232 struct intel_encoder *encoder = &intel_dig_port->base;
233
234 intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
235
236 switch (intel_dp->max_sink_lane_count) {
237 case 1:
238 case 2:
239 case 4:
240 return;
241 }
242
243 drm_err(&dp_to_i915(intel_dp)->drm,
244 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
245 connector->base.base.id, connector->base.name,
246 encoder->base.base.id, encoder->base.name,
247 intel_dp->max_sink_lane_count);
248
249 intel_dp_set_default_max_sink_lane_count(intel_dp);
250 }
251
252 /* Get length of rates array potentially limited by max_rate. */
253 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
254 {
255 int i;
256
257 /* Limit results by potentially reduced max rate */
258 for (i = 0; i < len; i++) {
259 if (rates[len - i - 1] <= max_rate)
260 return len - i;
261 }
262
263 return 0;
264 }
265
266 /* Get length of common rates array potentially limited by max_rate. */
267 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
268 int max_rate)
269 {
270 return intel_dp_rate_limit_len(intel_dp->common_rates,
271 intel_dp->num_common_rates, max_rate);
272 }
273
274 static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
275 {
276 if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
277 index < 0 || index >= intel_dp->num_common_rates))
278 return 162000;
279
280 return intel_dp->common_rates[index];
281 }
282
283 /* Theoretical max between source and sink */
284 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
285 {
286 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
287 }
288
289 /* Theoretical max between source and sink */
290 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
291 {
292 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
293 int source_max = dig_port->max_lanes;
294 int sink_max = intel_dp->max_sink_lane_count;
295 int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
296 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
297
298 if (lttpr_max)
299 sink_max = min(sink_max, lttpr_max);
300
301 return min3(source_max, sink_max, fia_max);
302 }
303
304 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
305 {
306 switch (intel_dp->max_link_lane_count) {
307 case 1:
308 case 2:
309 case 4:
310 return intel_dp->max_link_lane_count;
311 default:
312 MISSING_CASE(intel_dp->max_link_lane_count);
313 return 1;
314 }
315 }
316
317 /*
318 * The required data bandwidth for a mode with given pixel clock and bpp. This
319 * is the required net bandwidth independent of the data bandwidth efficiency.
320 */
321 int
322 intel_dp_link_required(int pixel_clock, int bpp)
323 {
324 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
325 return DIV_ROUND_UP(pixel_clock * bpp, 8);
326 }
327
328 /*
329 * Given a link rate and lanes, get the data bandwidth.
330 *
331 * Data bandwidth is the actual payload rate, which depends on the data
332 * bandwidth efficiency and the link rate.
333 *
334 * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
335 * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
336 * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
337 * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
338 * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
339 * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
340 *
341 * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
342 * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
343 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
344 * does not match the symbol clock, the port clock (not even if you think in
345 * terms of a byte clock), nor the data bandwidth. It only matches the link bit
346 * rate in units of 10000 bps.
347 */
348 int
349 intel_dp_max_data_rate(int max_link_rate, int max_lanes)
350 {
351 if (max_link_rate >= 1000000) {
352 /*
353 * UHBR rates always use 128b/132b channel encoding, and have
354 * 97.71% data bandwidth efficiency. Consider max_link_rate the
355 * link bit rate in units of 10000 bps.
356 */
357 int max_link_rate_kbps = max_link_rate * 10;
358
359 max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
360 max_link_rate = max_link_rate_kbps / 8;
361 }
362
363 /*
364 * Lower than UHBR rates always use 8b/10b channel encoding, and have
365 * 80% data bandwidth efficiency for SST non-FEC. However, this turns
366 * out to be a nop by coincidence, and can be skipped:
367 *
368 * int max_link_rate_kbps = max_link_rate * 10;
369 * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
370 * max_link_rate = max_link_rate_kbps / 8;
371 */
372
373 return max_link_rate * max_lanes;
374 }
375
376 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
377 {
378 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
379 struct intel_encoder *encoder = &intel_dig_port->base;
380 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
381
382 return DISPLAY_VER(dev_priv) >= 12 ||
383 (DISPLAY_VER(dev_priv) == 11 &&
384 encoder->port != PORT_A);
385 }
386
387 static int dg2_max_source_rate(struct intel_dp *intel_dp)
388 {
389 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
390 }
391
392 static int icl_max_source_rate(struct intel_dp *intel_dp)
393 {
394 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
395 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
396 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
397
398 if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
399 return 540000;
400
401 return 810000;
402 }
403
404 static int ehl_max_source_rate(struct intel_dp *intel_dp)
405 {
406 if (intel_dp_is_edp(intel_dp))
407 return 540000;
408
409 return 810000;
410 }
411
412 static int vbt_max_link_rate(struct intel_dp *intel_dp)
413 {
414 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
415 int max_rate;
416
417 max_rate = intel_bios_dp_max_link_rate(encoder);
418
419 if (intel_dp_is_edp(intel_dp)) {
420 struct intel_connector *connector = intel_dp->attached_connector;
421 int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
422
423 if (max_rate && edp_max_rate)
424 max_rate = min(max_rate, edp_max_rate);
425 else if (edp_max_rate)
426 max_rate = edp_max_rate;
427 }
428
429 return max_rate;
430 }
431
432 static void
433 intel_dp_set_source_rates(struct intel_dp *intel_dp)
434 {
435 /* The values must be in increasing order */
436 static const int icl_rates[] = {
437 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
438 1000000, 1350000,
439 };
440 static const int bxt_rates[] = {
441 162000, 216000, 243000, 270000, 324000, 432000, 540000
442 };
443 static const int skl_rates[] = {
444 162000, 216000, 270000, 324000, 432000, 540000
445 };
446 static const int hsw_rates[] = {
447 162000, 270000, 540000
448 };
449 static const int g4x_rates[] = {
450 162000, 270000
451 };
452 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
453 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
454 const int *source_rates;
455 int size, max_rate = 0, vbt_max_rate;
456
457 /* This should only be done once */
458 drm_WARN_ON(&dev_priv->drm,
459 intel_dp->source_rates || intel_dp->num_source_rates);
460
461 if (DISPLAY_VER(dev_priv) >= 11) {
462 source_rates = icl_rates;
463 size = ARRAY_SIZE(icl_rates);
464 if (IS_DG2(dev_priv))
465 max_rate = dg2_max_source_rate(intel_dp);
466 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
467 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
468 max_rate = 810000;
469 else if (IS_JSL_EHL(dev_priv))
470 max_rate = ehl_max_source_rate(intel_dp);
471 else
472 max_rate = icl_max_source_rate(intel_dp);
473 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
474 source_rates = bxt_rates;
475 size = ARRAY_SIZE(bxt_rates);
476 } else if (DISPLAY_VER(dev_priv) == 9) {
477 source_rates = skl_rates;
478 size = ARRAY_SIZE(skl_rates);
479 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
480 IS_BROADWELL(dev_priv)) {
481 source_rates = hsw_rates;
482 size = ARRAY_SIZE(hsw_rates);
483 } else {
484 source_rates = g4x_rates;
485 size = ARRAY_SIZE(g4x_rates);
486 }
487
488 vbt_max_rate = vbt_max_link_rate(intel_dp);
489 if (max_rate && vbt_max_rate)
490 max_rate = min(max_rate, vbt_max_rate);
491 else if (vbt_max_rate)
492 max_rate = vbt_max_rate;
493
494 if (max_rate)
495 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
496
497 intel_dp->source_rates = source_rates;
498 intel_dp->num_source_rates = size;
499 }
500
501 static int intersect_rates(const int *source_rates, int source_len,
502 const int *sink_rates, int sink_len,
503 int *common_rates)
504 {
505 int i = 0, j = 0, k = 0;
506
507 while (i < source_len && j < sink_len) {
508 if (source_rates[i] == sink_rates[j]) {
509 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
510 return k;
511 common_rates[k] = source_rates[i];
512 ++k;
513 ++i;
514 ++j;
515 } else if (source_rates[i] < sink_rates[j]) {
516 ++i;
517 } else {
518 ++j;
519 }
520 }
521 return k;
522 }
523
524 /* return index of rate in rates array, or -1 if not found */
525 static int intel_dp_rate_index(const int *rates, int len, int rate)
526 {
527 int i;
528
529 for (i = 0; i < len; i++)
530 if (rate == rates[i])
531 return i;
532
533 return -1;
534 }
535
536 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
537 {
538 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
539
540 drm_WARN_ON(&i915->drm,
541 !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
542
543 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
544 intel_dp->num_source_rates,
545 intel_dp->sink_rates,
546 intel_dp->num_sink_rates,
547 intel_dp->common_rates);
548
549 /* Paranoia, there should always be something in common. */
550 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
551 intel_dp->common_rates[0] = 162000;
552 intel_dp->num_common_rates = 1;
553 }
554 }
555
556 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
557 u8 lane_count)
558 {
559 /*
560 * FIXME: we need to synchronize the current link parameters with
561 * hardware readout. Currently fast link training doesn't work on
562 * boot-up.
563 */
564 if (link_rate == 0 ||
565 link_rate > intel_dp->max_link_rate)
566 return false;
567
568 if (lane_count == 0 ||
569 lane_count > intel_dp_max_lane_count(intel_dp))
570 return false;
571
572 return true;
573 }
574
575 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
576 int link_rate,
577 u8 lane_count)
578 {
579 /* FIXME figure out what we actually want here */
580 const struct drm_display_mode *fixed_mode =
581 intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
582 int mode_rate, max_rate;
583
584 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
585 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
586 if (mode_rate > max_rate)
587 return false;
588
589 return true;
590 }
591
592 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
593 int link_rate, u8 lane_count)
594 {
595 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
596 int index;
597
598 /*
599 * TODO: Enable fallback on MST links once MST link compute can handle
600 * the fallback params.
601 */
602 if (intel_dp->is_mst) {
603 drm_err(&i915->drm, "Link Training Unsuccessful\n");
604 return -1;
605 }
606
607 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
608 drm_dbg_kms(&i915->drm,
609 "Retrying Link training for eDP with max parameters\n");
610 intel_dp->use_max_params = true;
611 return 0;
612 }
613
614 index = intel_dp_rate_index(intel_dp->common_rates,
615 intel_dp->num_common_rates,
616 link_rate);
617 if (index > 0) {
618 if (intel_dp_is_edp(intel_dp) &&
619 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
620 intel_dp_common_rate(intel_dp, index - 1),
621 lane_count)) {
622 drm_dbg_kms(&i915->drm,
623 "Retrying Link training for eDP with same parameters\n");
624 return 0;
625 }
626 intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1);
627 intel_dp->max_link_lane_count = lane_count;
628 } else if (lane_count > 1) {
629 if (intel_dp_is_edp(intel_dp) &&
630 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
631 intel_dp_max_common_rate(intel_dp),
632 lane_count >> 1)) {
633 drm_dbg_kms(&i915->drm,
634 "Retrying Link training for eDP with same parameters\n");
635 return 0;
636 }
637 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
638 intel_dp->max_link_lane_count = lane_count >> 1;
639 } else {
640 drm_err(&i915->drm, "Link Training Unsuccessful\n");
641 return -1;
642 }
643
644 return 0;
645 }
646
647 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
648 {
649 return div_u64(mul_u32_u32(mode_clock, 1000000U),
650 DP_DSC_FEC_OVERHEAD_FACTOR);
651 }
652
653 static int
654 small_joiner_ram_size_bits(struct drm_i915_private *i915)
655 {
656 if (DISPLAY_VER(i915) >= 13)
657 return 17280 * 8;
658 else if (DISPLAY_VER(i915) >= 11)
659 return 7680 * 8;
660 else
661 return 6144 * 8;
662 }
663
664 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
665 u32 link_clock, u32 lane_count,
666 u32 mode_clock, u32 mode_hdisplay,
667 bool bigjoiner,
668 u32 pipe_bpp)
669 {
670 u32 bits_per_pixel, max_bpp_small_joiner_ram;
671 int i;
672
673 /*
674 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
675 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
676 * for SST -> TimeSlotsPerMTP is 1,
677 * for MST -> TimeSlotsPerMTP has to be calculated
678 */
679 bits_per_pixel = (link_clock * lane_count * 8) /
680 intel_dp_mode_to_fec_clock(mode_clock);
681
682 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
683 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
684 mode_hdisplay;
685
686 if (bigjoiner)
687 max_bpp_small_joiner_ram *= 2;
688
689 /*
690 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
691 * check, output bpp from small joiner RAM check)
692 */
693 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
694
695 if (bigjoiner) {
696 u32 max_bpp_bigjoiner =
697 i915->max_cdclk_freq * 48 /
698 intel_dp_mode_to_fec_clock(mode_clock);
699
700 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
701 }
702
703 /* Error out if the max bpp is less than smallest allowed valid bpp */
704 if (bits_per_pixel < valid_dsc_bpp[0]) {
705 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
706 bits_per_pixel, valid_dsc_bpp[0]);
707 return 0;
708 }
709
710 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
711 if (DISPLAY_VER(i915) >= 13) {
712 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
713 } else {
714 /* Find the nearest match in the array of known BPPs from VESA */
715 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
716 if (bits_per_pixel < valid_dsc_bpp[i + 1])
717 break;
718 }
719 bits_per_pixel = valid_dsc_bpp[i];
720 }
721
722 /*
723 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
724 * fractional part is 0
725 */
726 return bits_per_pixel << 4;
727 }
728
729 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
730 int mode_clock, int mode_hdisplay,
731 bool bigjoiner)
732 {
733 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
734 u8 min_slice_count, i;
735 int max_slice_width;
736
737 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
738 min_slice_count = DIV_ROUND_UP(mode_clock,
739 DP_DSC_MAX_ENC_THROUGHPUT_0);
740 else
741 min_slice_count = DIV_ROUND_UP(mode_clock,
742 DP_DSC_MAX_ENC_THROUGHPUT_1);
743
744 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
745 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
746 drm_dbg_kms(&i915->drm,
747 "Unsupported slice width %d by DP DSC Sink device\n",
748 max_slice_width);
749 return 0;
750 }
751 /* Also take into account max slice width */
752 min_slice_count = max_t(u8, min_slice_count,
753 DIV_ROUND_UP(mode_hdisplay,
754 max_slice_width));
755
756 /* Find the closest match to the valid slice count values */
757 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
758 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
759
760 if (test_slice_count >
761 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
762 break;
763
764 /* big joiner needs small joiner to be enabled */
765 if (bigjoiner && test_slice_count < 4)
766 continue;
767
768 if (min_slice_count <= test_slice_count)
769 return test_slice_count;
770 }
771
772 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
773 min_slice_count);
774 return 0;
775 }
776
777 static enum intel_output_format
778 intel_dp_output_format(struct intel_connector *connector,
779 bool ycbcr_420_output)
780 {
781 struct intel_dp *intel_dp = intel_attached_dp(connector);
782
783 if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output)
784 return INTEL_OUTPUT_FORMAT_RGB;
785
786 if (intel_dp->dfp.rgb_to_ycbcr &&
787 intel_dp->dfp.ycbcr_444_to_420)
788 return INTEL_OUTPUT_FORMAT_RGB;
789
790 if (intel_dp->dfp.ycbcr_444_to_420)
791 return INTEL_OUTPUT_FORMAT_YCBCR444;
792 else
793 return INTEL_OUTPUT_FORMAT_YCBCR420;
794 }
795
796 int intel_dp_min_bpp(enum intel_output_format output_format)
797 {
798 if (output_format == INTEL_OUTPUT_FORMAT_RGB)
799 return 6 * 3;
800 else
801 return 8 * 3;
802 }
803
804 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
805 {
806 /*
807 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
808 * format of the number of bytes per pixel will be half the number
809 * of bytes of RGB pixel.
810 */
811 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
812 bpp /= 2;
813
814 return bpp;
815 }
816
817 static int
818 intel_dp_mode_min_output_bpp(struct intel_connector *connector,
819 const struct drm_display_mode *mode)
820 {
821 const struct drm_display_info *info = &connector->base.display_info;
822 enum intel_output_format output_format =
823 intel_dp_output_format(connector, drm_mode_is_420_only(info, mode));
824
825 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
826 }
827
828 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
829 int hdisplay)
830 {
831 /*
832 * Older platforms don't like hdisplay==4096 with DP.
833 *
834 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
835 * and frame counter increment), but we don't get vblank interrupts,
836 * and the pipe underruns immediately. The link also doesn't seem
837 * to get trained properly.
838 *
839 * On CHV the vblank interrupts don't seem to disappear but
840 * otherwise the symptoms are similar.
841 *
842 * TODO: confirm the behaviour on HSW+
843 */
844 return hdisplay == 4096 && !HAS_DDI(dev_priv);
845 }
846
847 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
848 {
849 struct intel_connector *connector = intel_dp->attached_connector;
850 const struct drm_display_info *info = &connector->base.display_info;
851 int max_tmds_clock = intel_dp->dfp.max_tmds_clock;
852
853 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */
854 if (max_tmds_clock && info->max_tmds_clock)
855 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
856
857 return max_tmds_clock;
858 }
859
860 static enum drm_mode_status
861 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
862 int clock, int bpc, bool ycbcr420_output,
863 bool respect_downstream_limits)
864 {
865 int tmds_clock, min_tmds_clock, max_tmds_clock;
866
867 if (!respect_downstream_limits)
868 return MODE_OK;
869
870 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
871
872 min_tmds_clock = intel_dp->dfp.min_tmds_clock;
873 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
874
875 if (min_tmds_clock && tmds_clock < min_tmds_clock)
876 return MODE_CLOCK_LOW;
877
878 if (max_tmds_clock && tmds_clock > max_tmds_clock)
879 return MODE_CLOCK_HIGH;
880
881 return MODE_OK;
882 }
883
884 static enum drm_mode_status
885 intel_dp_mode_valid_downstream(struct intel_connector *connector,
886 const struct drm_display_mode *mode,
887 int target_clock)
888 {
889 struct intel_dp *intel_dp = intel_attached_dp(connector);
890 const struct drm_display_info *info = &connector->base.display_info;
891 enum drm_mode_status status;
892 bool ycbcr_420_only;
893
894 /* If PCON supports FRL MODE, check FRL bandwidth constraints */
895 if (intel_dp->dfp.pcon_max_frl_bw) {
896 int target_bw;
897 int max_frl_bw;
898 int bpp = intel_dp_mode_min_output_bpp(connector, mode);
899
900 target_bw = bpp * target_clock;
901
902 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
903
904 /* converting bw from Gbps to Kbps*/
905 max_frl_bw = max_frl_bw * 1000000;
906
907 if (target_bw > max_frl_bw)
908 return MODE_CLOCK_HIGH;
909
910 return MODE_OK;
911 }
912
913 if (intel_dp->dfp.max_dotclock &&
914 target_clock > intel_dp->dfp.max_dotclock)
915 return MODE_CLOCK_HIGH;
916
917 ycbcr_420_only = drm_mode_is_420_only(info, mode);
918
919 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
920 status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
921 8, ycbcr_420_only, true);
922
923 if (status != MODE_OK) {
924 if (ycbcr_420_only ||
925 !connector->base.ycbcr_420_allowed ||
926 !drm_mode_is_420_also(info, mode))
927 return status;
928
929 status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
930 8, true, true);
931 if (status != MODE_OK)
932 return status;
933 }
934
935 return MODE_OK;
936 }
937
938 static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
939 int hdisplay, int clock)
940 {
941 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
942
943 if (!intel_dp_can_bigjoiner(intel_dp))
944 return false;
945
946 return clock > i915->max_dotclk_freq || hdisplay > 5120;
947 }
948
949 static enum drm_mode_status
950 intel_dp_mode_valid(struct drm_connector *_connector,
951 struct drm_display_mode *mode)
952 {
953 struct intel_connector *connector = to_intel_connector(_connector);
954 struct intel_dp *intel_dp = intel_attached_dp(connector);
955 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
956 const struct drm_display_mode *fixed_mode;
957 int target_clock = mode->clock;
958 int max_rate, mode_rate, max_lanes, max_link_clock;
959 int max_dotclk = dev_priv->max_dotclk_freq;
960 u16 dsc_max_output_bpp = 0;
961 u8 dsc_slice_count = 0;
962 enum drm_mode_status status;
963 bool dsc = false, bigjoiner = false;
964
965 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
966 return MODE_NO_DBLESCAN;
967
968 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
969 return MODE_H_ILLEGAL;
970
971 fixed_mode = intel_panel_fixed_mode(connector, mode);
972 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
973 status = intel_panel_mode_valid(connector, mode);
974 if (status != MODE_OK)
975 return status;
976
977 target_clock = fixed_mode->clock;
978 }
979
980 if (mode->clock < 10000)
981 return MODE_CLOCK_LOW;
982
983 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
984 bigjoiner = true;
985 max_dotclk *= 2;
986 }
987 if (target_clock > max_dotclk)
988 return MODE_CLOCK_HIGH;
989
990 max_link_clock = intel_dp_max_link_rate(intel_dp);
991 max_lanes = intel_dp_max_lane_count(intel_dp);
992
993 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
994 mode_rate = intel_dp_link_required(target_clock,
995 intel_dp_mode_min_output_bpp(connector, mode));
996
997 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
998 return MODE_H_ILLEGAL;
999
1000 /*
1001 * Output bpp is stored in 6.4 format so right shift by 4 to get the
1002 * integer value since we support only integer values of bpp.
1003 */
1004 if (DISPLAY_VER(dev_priv) >= 10 &&
1005 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
1006 /*
1007 * TBD pass the connector BPC,
1008 * for now U8_MAX so that max BPC on that platform would be picked
1009 */
1010 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
1011
1012 if (intel_dp_is_edp(intel_dp)) {
1013 dsc_max_output_bpp =
1014 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
1015 dsc_slice_count =
1016 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1017 true);
1018 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
1019 dsc_max_output_bpp =
1020 intel_dp_dsc_get_output_bpp(dev_priv,
1021 max_link_clock,
1022 max_lanes,
1023 target_clock,
1024 mode->hdisplay,
1025 bigjoiner,
1026 pipe_bpp) >> 4;
1027 dsc_slice_count =
1028 intel_dp_dsc_get_slice_count(intel_dp,
1029 target_clock,
1030 mode->hdisplay,
1031 bigjoiner);
1032 }
1033
1034 dsc = dsc_max_output_bpp && dsc_slice_count;
1035 }
1036
1037 /*
1038 * Big joiner configuration needs DSC for TGL which is not true for
1039 * XE_LPD where uncompressed joiner is supported.
1040 */
1041 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
1042 return MODE_CLOCK_HIGH;
1043
1044 if (mode_rate > max_rate && !dsc)
1045 return MODE_CLOCK_HIGH;
1046
1047 status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
1048 if (status != MODE_OK)
1049 return status;
1050
1051 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
1052 }
1053
1054 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
1055 {
1056 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
1057 }
1058
1059 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
1060 {
1061 return DISPLAY_VER(i915) >= 10;
1062 }
1063
1064 static void snprintf_int_array(char *str, size_t len,
1065 const int *array, int nelem)
1066 {
1067 int i;
1068
1069 str[0] = '\0';
1070
1071 for (i = 0; i < nelem; i++) {
1072 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1073 if (r >= len)
1074 return;
1075 str += r;
1076 len -= r;
1077 }
1078 }
1079
1080 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1081 {
1082 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1083 char str[128]; /* FIXME: too big for stack? */
1084
1085 if (!drm_debug_enabled(DRM_UT_KMS))
1086 return;
1087
1088 snprintf_int_array(str, sizeof(str),
1089 intel_dp->source_rates, intel_dp->num_source_rates);
1090 drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1091
1092 snprintf_int_array(str, sizeof(str),
1093 intel_dp->sink_rates, intel_dp->num_sink_rates);
1094 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1095
1096 snprintf_int_array(str, sizeof(str),
1097 intel_dp->common_rates, intel_dp->num_common_rates);
1098 drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1099 }
1100
1101 int
1102 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1103 {
1104 int len;
1105
1106 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1107
1108 return intel_dp_common_rate(intel_dp, len - 1);
1109 }
1110
1111 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1112 {
1113 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1114 int i = intel_dp_rate_index(intel_dp->sink_rates,
1115 intel_dp->num_sink_rates, rate);
1116
1117 if (drm_WARN_ON(&i915->drm, i < 0))
1118 i = 0;
1119
1120 return i;
1121 }
1122
1123 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1124 u8 *link_bw, u8 *rate_select)
1125 {
1126 /* eDP 1.4 rate select method. */
1127 if (intel_dp->use_rate_select) {
1128 *link_bw = 0;
1129 *rate_select =
1130 intel_dp_rate_select(intel_dp, port_clock);
1131 } else {
1132 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1133 *rate_select = 0;
1134 }
1135 }
1136
1137 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1138 const struct intel_crtc_state *pipe_config)
1139 {
1140 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1141
1142 /* On TGL, FEC is supported on all Pipes */
1143 if (DISPLAY_VER(dev_priv) >= 12)
1144 return true;
1145
1146 if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
1147 return true;
1148
1149 return false;
1150 }
1151
1152 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1153 const struct intel_crtc_state *pipe_config)
1154 {
1155 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1156 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1157 }
1158
1159 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1160 const struct intel_crtc_state *crtc_state)
1161 {
1162 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
1163 return false;
1164
1165 return intel_dsc_source_support(crtc_state) &&
1166 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1167 }
1168
1169 static bool intel_dp_is_ycbcr420(struct intel_dp *intel_dp,
1170 const struct intel_crtc_state *crtc_state)
1171 {
1172 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1173 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
1174 intel_dp->dfp.ycbcr_444_to_420);
1175 }
1176
1177 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
1178 const struct intel_crtc_state *crtc_state,
1179 int bpc, bool respect_downstream_limits)
1180 {
1181 bool ycbcr420_output = intel_dp_is_ycbcr420(intel_dp, crtc_state);
1182 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1183
1184 /*
1185 * Current bpc could already be below 8bpc due to
1186 * FDI bandwidth constraints or other limits.
1187 * HDMI minimum is 8bpc however.
1188 */
1189 bpc = max(bpc, 8);
1190
1191 /*
1192 * We will never exceed downstream TMDS clock limits while
1193 * attempting deep color. If the user insists on forcing an
1194 * out of spec mode they will have to be satisfied with 8bpc.
1195 */
1196 if (!respect_downstream_limits)
1197 bpc = 8;
1198
1199 for (; bpc >= 8; bpc -= 2) {
1200 if (intel_hdmi_bpc_possible(crtc_state, bpc,
1201 intel_dp->has_hdmi_sink, ycbcr420_output) &&
1202 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, ycbcr420_output,
1203 respect_downstream_limits) == MODE_OK)
1204 return bpc;
1205 }
1206
1207 return -EINVAL;
1208 }
1209
1210 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1211 const struct intel_crtc_state *crtc_state,
1212 bool respect_downstream_limits)
1213 {
1214 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1215 struct intel_connector *intel_connector = intel_dp->attached_connector;
1216 int bpp, bpc;
1217
1218 bpc = crtc_state->pipe_bpp / 3;
1219
1220 if (intel_dp->dfp.max_bpc)
1221 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1222
1223 if (intel_dp->dfp.min_tmds_clock) {
1224 int max_hdmi_bpc;
1225
1226 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc,
1227 respect_downstream_limits);
1228 if (max_hdmi_bpc < 0)
1229 return 0;
1230
1231 bpc = min(bpc, max_hdmi_bpc);
1232 }
1233
1234 bpp = bpc * 3;
1235 if (intel_dp_is_edp(intel_dp)) {
1236 /* Get bpp from vbt only for panels that dont have bpp in edid */
1237 if (intel_connector->base.display_info.bpc == 0 &&
1238 intel_connector->panel.vbt.edp.bpp &&
1239 intel_connector->panel.vbt.edp.bpp < bpp) {
1240 drm_dbg_kms(&dev_priv->drm,
1241 "clamping bpp for eDP panel to BIOS-provided %i\n",
1242 intel_connector->panel.vbt.edp.bpp);
1243 bpp = intel_connector->panel.vbt.edp.bpp;
1244 }
1245 }
1246
1247 return bpp;
1248 }
1249
1250 /* Adjust link config limits based on compliance test requests. */
1251 void
1252 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1253 struct intel_crtc_state *pipe_config,
1254 struct link_config_limits *limits)
1255 {
1256 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1257
1258 /* For DP Compliance we override the computed bpp for the pipe */
1259 if (intel_dp->compliance.test_data.bpc != 0) {
1260 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1261
1262 limits->min_bpp = limits->max_bpp = bpp;
1263 pipe_config->dither_force_disable = bpp == 6 * 3;
1264
1265 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1266 }
1267
1268 /* Use values requested by Compliance Test Request */
1269 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1270 int index;
1271
1272 /* Validate the compliance test data since max values
1273 * might have changed due to link train fallback.
1274 */
1275 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1276 intel_dp->compliance.test_lane_count)) {
1277 index = intel_dp_rate_index(intel_dp->common_rates,
1278 intel_dp->num_common_rates,
1279 intel_dp->compliance.test_link_rate);
1280 if (index >= 0)
1281 limits->min_rate = limits->max_rate =
1282 intel_dp->compliance.test_link_rate;
1283 limits->min_lane_count = limits->max_lane_count =
1284 intel_dp->compliance.test_lane_count;
1285 }
1286 }
1287 }
1288
1289 /* Optimize link config in order: max bpp, min clock, min lanes */
1290 static int
1291 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1292 struct intel_crtc_state *pipe_config,
1293 const struct link_config_limits *limits)
1294 {
1295 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1296 int bpp, i, lane_count;
1297 int mode_rate, link_rate, link_avail;
1298
1299 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1300 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1301
1302 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1303 output_bpp);
1304
1305 for (i = 0; i < intel_dp->num_common_rates; i++) {
1306 link_rate = intel_dp_common_rate(intel_dp, i);
1307 if (link_rate < limits->min_rate ||
1308 link_rate > limits->max_rate)
1309 continue;
1310
1311 for (lane_count = limits->min_lane_count;
1312 lane_count <= limits->max_lane_count;
1313 lane_count <<= 1) {
1314 link_avail = intel_dp_max_data_rate(link_rate,
1315 lane_count);
1316
1317 if (mode_rate <= link_avail) {
1318 pipe_config->lane_count = lane_count;
1319 pipe_config->pipe_bpp = bpp;
1320 pipe_config->port_clock = link_rate;
1321
1322 return 0;
1323 }
1324 }
1325 }
1326 }
1327
1328 return -EINVAL;
1329 }
1330
1331 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
1332 {
1333 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1334 int i, num_bpc;
1335 u8 dsc_bpc[3] = {0};
1336 u8 dsc_max_bpc;
1337
1338 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1339 if (DISPLAY_VER(i915) >= 12)
1340 dsc_max_bpc = min_t(u8, 12, max_req_bpc);
1341 else
1342 dsc_max_bpc = min_t(u8, 10, max_req_bpc);
1343
1344 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1345 dsc_bpc);
1346 for (i = 0; i < num_bpc; i++) {
1347 if (dsc_max_bpc >= dsc_bpc[i])
1348 return dsc_bpc[i] * 3;
1349 }
1350
1351 return 0;
1352 }
1353
1354 #define DSC_SUPPORTED_VERSION_MIN 1
1355
1356 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
1357 struct intel_crtc_state *crtc_state)
1358 {
1359 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1360 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1361 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1362 u8 line_buf_depth;
1363 int ret;
1364
1365 /*
1366 * RC_MODEL_SIZE is currently a constant across all configurations.
1367 *
1368 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1369 * DP_DSC_RC_BUF_SIZE for this.
1370 */
1371 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1372 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1373
1374 /*
1375 * Slice Height of 8 works for all currently available panels. So start
1376 * with that if pic_height is an integral multiple of 8. Eventually add
1377 * logic to try multiple slice heights.
1378 */
1379 if (vdsc_cfg->pic_height % 8 == 0)
1380 vdsc_cfg->slice_height = 8;
1381 else if (vdsc_cfg->pic_height % 4 == 0)
1382 vdsc_cfg->slice_height = 4;
1383 else
1384 vdsc_cfg->slice_height = 2;
1385
1386 ret = intel_dsc_compute_params(crtc_state);
1387 if (ret)
1388 return ret;
1389
1390 vdsc_cfg->dsc_version_major =
1391 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1392 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1393 vdsc_cfg->dsc_version_minor =
1394 min(DSC_SUPPORTED_VERSION_MIN,
1395 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1396 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
1397
1398 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1399 DP_DSC_RGB;
1400
1401 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
1402 if (!line_buf_depth) {
1403 drm_dbg_kms(&i915->drm,
1404 "DSC Sink Line Buffer Depth invalid\n");
1405 return -EINVAL;
1406 }
1407
1408 if (vdsc_cfg->dsc_version_minor == 2)
1409 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
1410 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
1411 else
1412 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
1413 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
1414
1415 vdsc_cfg->block_pred_enable =
1416 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1417 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1418
1419 return drm_dsc_compute_rc_parameters(vdsc_cfg);
1420 }
1421
1422 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1423 struct intel_crtc_state *pipe_config,
1424 struct drm_connector_state *conn_state,
1425 struct link_config_limits *limits)
1426 {
1427 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1428 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1429 const struct drm_display_mode *adjusted_mode =
1430 &pipe_config->hw.adjusted_mode;
1431 int pipe_bpp;
1432 int ret;
1433
1434 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1435 intel_dp_supports_fec(intel_dp, pipe_config);
1436
1437 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1438 return -EINVAL;
1439
1440 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
1441
1442 /* Min Input BPC for ICL+ is 8 */
1443 if (pipe_bpp < 8 * 3) {
1444 drm_dbg_kms(&dev_priv->drm,
1445 "No DSC support for less than 8bpc\n");
1446 return -EINVAL;
1447 }
1448
1449 /*
1450 * For now enable DSC for max bpp, max link rate, max lane count.
1451 * Optimize this later for the minimum possible link rate/lane count
1452 * with DSC enabled for the requested mode.
1453 */
1454 pipe_config->pipe_bpp = pipe_bpp;
1455 pipe_config->port_clock = limits->max_rate;
1456 pipe_config->lane_count = limits->max_lane_count;
1457
1458 if (intel_dp_is_edp(intel_dp)) {
1459 pipe_config->dsc.compressed_bpp =
1460 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1461 pipe_config->pipe_bpp);
1462 pipe_config->dsc.slice_count =
1463 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1464 true);
1465 } else {
1466 u16 dsc_max_output_bpp;
1467 u8 dsc_dp_slice_count;
1468
1469 dsc_max_output_bpp =
1470 intel_dp_dsc_get_output_bpp(dev_priv,
1471 pipe_config->port_clock,
1472 pipe_config->lane_count,
1473 adjusted_mode->crtc_clock,
1474 adjusted_mode->crtc_hdisplay,
1475 pipe_config->bigjoiner_pipes,
1476 pipe_bpp);
1477 dsc_dp_slice_count =
1478 intel_dp_dsc_get_slice_count(intel_dp,
1479 adjusted_mode->crtc_clock,
1480 adjusted_mode->crtc_hdisplay,
1481 pipe_config->bigjoiner_pipes);
1482 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1483 drm_dbg_kms(&dev_priv->drm,
1484 "Compressed BPP/Slice Count not supported\n");
1485 return -EINVAL;
1486 }
1487 pipe_config->dsc.compressed_bpp = min_t(u16,
1488 dsc_max_output_bpp >> 4,
1489 pipe_config->pipe_bpp);
1490 pipe_config->dsc.slice_count = dsc_dp_slice_count;
1491 }
1492
1493 /* As of today we support DSC for only RGB */
1494 if (intel_dp->force_dsc_bpp) {
1495 if (intel_dp->force_dsc_bpp >= 8 &&
1496 intel_dp->force_dsc_bpp < pipe_bpp) {
1497 drm_dbg_kms(&dev_priv->drm,
1498 "DSC BPP forced to %d",
1499 intel_dp->force_dsc_bpp);
1500 pipe_config->dsc.compressed_bpp =
1501 intel_dp->force_dsc_bpp;
1502 } else {
1503 drm_dbg_kms(&dev_priv->drm,
1504 "Invalid DSC BPP %d",
1505 intel_dp->force_dsc_bpp);
1506 }
1507 }
1508
1509 /*
1510 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1511 * is greater than the maximum Cdclock and if slice count is even
1512 * then we need to use 2 VDSC instances.
1513 */
1514 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
1515 pipe_config->bigjoiner_pipes) {
1516 if (pipe_config->dsc.slice_count < 2) {
1517 drm_dbg_kms(&dev_priv->drm,
1518 "Cannot split stream to use 2 VDSC instances\n");
1519 return -EINVAL;
1520 }
1521
1522 pipe_config->dsc.dsc_split = true;
1523 }
1524
1525 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
1526 if (ret < 0) {
1527 drm_dbg_kms(&dev_priv->drm,
1528 "Cannot compute valid DSC parameters for Input Bpp = %d "
1529 "Compressed BPP = %d\n",
1530 pipe_config->pipe_bpp,
1531 pipe_config->dsc.compressed_bpp);
1532 return ret;
1533 }
1534
1535 pipe_config->dsc.compression_enable = true;
1536 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
1537 "Compressed Bpp = %d Slice Count = %d\n",
1538 pipe_config->pipe_bpp,
1539 pipe_config->dsc.compressed_bpp,
1540 pipe_config->dsc.slice_count);
1541
1542 return 0;
1543 }
1544
1545 static int
1546 intel_dp_compute_link_config(struct intel_encoder *encoder,
1547 struct intel_crtc_state *pipe_config,
1548 struct drm_connector_state *conn_state,
1549 bool respect_downstream_limits)
1550 {
1551 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1552 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
1553 const struct drm_display_mode *adjusted_mode =
1554 &pipe_config->hw.adjusted_mode;
1555 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1556 struct link_config_limits limits;
1557 bool joiner_needs_dsc = false;
1558 int ret;
1559
1560 limits.min_rate = intel_dp_common_rate(intel_dp, 0);
1561 limits.max_rate = intel_dp_max_link_rate(intel_dp);
1562
1563 limits.min_lane_count = 1;
1564 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
1565
1566 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
1567 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config, respect_downstream_limits);
1568
1569 if (intel_dp->use_max_params) {
1570 /*
1571 * Use the maximum clock and number of lanes the eDP panel
1572 * advertizes being capable of in case the initial fast
1573 * optimal params failed us. The panels are generally
1574 * designed to support only a single clock and lane
1575 * configuration, and typically on older panels these
1576 * values correspond to the native resolution of the panel.
1577 */
1578 limits.min_lane_count = limits.max_lane_count;
1579 limits.min_rate = limits.max_rate;
1580 }
1581
1582 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
1583
1584 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
1585 "max rate %d max bpp %d pixel clock %iKHz\n",
1586 limits.max_lane_count, limits.max_rate,
1587 limits.max_bpp, adjusted_mode->crtc_clock);
1588
1589 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
1590 adjusted_mode->crtc_clock))
1591 pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
1592
1593 /*
1594 * Pipe joiner needs compression up to display 12 due to bandwidth
1595 * limitation. DG2 onwards pipe joiner can be enabled without
1596 * compression.
1597 */
1598 joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
1599
1600 /*
1601 * Optimize for slow and wide for everything, because there are some
1602 * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
1603 */
1604 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
1605
1606 if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
1607 drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
1608 str_yes_no(ret), str_yes_no(joiner_needs_dsc),
1609 str_yes_no(intel_dp->force_dsc_en));
1610 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
1611 conn_state, &limits);
1612 if (ret < 0)
1613 return ret;
1614 }
1615
1616 if (pipe_config->dsc.compression_enable) {
1617 drm_dbg_kms(&i915->drm,
1618 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
1619 pipe_config->lane_count, pipe_config->port_clock,
1620 pipe_config->pipe_bpp,
1621 pipe_config->dsc.compressed_bpp);
1622
1623 drm_dbg_kms(&i915->drm,
1624 "DP link rate required %i available %i\n",
1625 intel_dp_link_required(adjusted_mode->crtc_clock,
1626 pipe_config->dsc.compressed_bpp),
1627 intel_dp_max_data_rate(pipe_config->port_clock,
1628 pipe_config->lane_count));
1629 } else {
1630 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
1631 pipe_config->lane_count, pipe_config->port_clock,
1632 pipe_config->pipe_bpp);
1633
1634 drm_dbg_kms(&i915->drm,
1635 "DP link rate required %i available %i\n",
1636 intel_dp_link_required(adjusted_mode->crtc_clock,
1637 pipe_config->pipe_bpp),
1638 intel_dp_max_data_rate(pipe_config->port_clock,
1639 pipe_config->lane_count));
1640 }
1641 return 0;
1642 }
1643
1644 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
1645 const struct drm_connector_state *conn_state)
1646 {
1647 const struct intel_digital_connector_state *intel_conn_state =
1648 to_intel_digital_connector_state(conn_state);
1649 const struct drm_display_mode *adjusted_mode =
1650 &crtc_state->hw.adjusted_mode;
1651
1652 /*
1653 * Our YCbCr output is always limited range.
1654 * crtc_state->limited_color_range only applies to RGB,
1655 * and it must never be set for YCbCr or we risk setting
1656 * some conflicting bits in PIPECONF which will mess up
1657 * the colors on the monitor.
1658 */
1659 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
1660 return false;
1661
1662 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1663 /*
1664 * See:
1665 * CEA-861-E - 5.1 Default Encoding Parameters
1666 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1667 */
1668 return crtc_state->pipe_bpp != 18 &&
1669 drm_default_rgb_quant_range(adjusted_mode) ==
1670 HDMI_QUANTIZATION_RANGE_LIMITED;
1671 } else {
1672 return intel_conn_state->broadcast_rgb ==
1673 INTEL_BROADCAST_RGB_LIMITED;
1674 }
1675 }
1676
1677 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
1678 enum port port)
1679 {
1680 if (IS_G4X(dev_priv))
1681 return false;
1682 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
1683 return false;
1684
1685 return true;
1686 }
1687
1688 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
1689 const struct drm_connector_state *conn_state,
1690 struct drm_dp_vsc_sdp *vsc)
1691 {
1692 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1693 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1694
1695 /*
1696 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1697 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
1698 * Colorimetry Format indication.
1699 */
1700 vsc->revision = 0x5;
1701 vsc->length = 0x13;
1702
1703 /* DP 1.4a spec, Table 2-120 */
1704 switch (crtc_state->output_format) {
1705 case INTEL_OUTPUT_FORMAT_YCBCR444:
1706 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
1707 break;
1708 case INTEL_OUTPUT_FORMAT_YCBCR420:
1709 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
1710 break;
1711 case INTEL_OUTPUT_FORMAT_RGB:
1712 default:
1713 vsc->pixelformat = DP_PIXELFORMAT_RGB;
1714 }
1715
1716 switch (conn_state->colorspace) {
1717 case DRM_MODE_COLORIMETRY_BT709_YCC:
1718 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1719 break;
1720 case DRM_MODE_COLORIMETRY_XVYCC_601:
1721 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
1722 break;
1723 case DRM_MODE_COLORIMETRY_XVYCC_709:
1724 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
1725 break;
1726 case DRM_MODE_COLORIMETRY_SYCC_601:
1727 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
1728 break;
1729 case DRM_MODE_COLORIMETRY_OPYCC_601:
1730 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
1731 break;
1732 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
1733 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
1734 break;
1735 case DRM_MODE_COLORIMETRY_BT2020_RGB:
1736 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
1737 break;
1738 case DRM_MODE_COLORIMETRY_BT2020_YCC:
1739 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
1740 break;
1741 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
1742 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
1743 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
1744 break;
1745 default:
1746 /*
1747 * RGB->YCBCR color conversion uses the BT.709
1748 * color space.
1749 */
1750 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1751 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1752 else
1753 vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
1754 break;
1755 }
1756
1757 vsc->bpc = crtc_state->pipe_bpp / 3;
1758
1759 /* only RGB pixelformat supports 6 bpc */
1760 drm_WARN_ON(&dev_priv->drm,
1761 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
1762
1763 /* all YCbCr are always limited range */
1764 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
1765 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
1766 }
1767
1768 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
1769 struct intel_crtc_state *crtc_state,
1770 const struct drm_connector_state *conn_state)
1771 {
1772 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
1773
1774 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
1775 if (crtc_state->has_psr)
1776 return;
1777
1778 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
1779 return;
1780
1781 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1782 vsc->sdp_type = DP_SDP_VSC;
1783 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1784 &crtc_state->infoframes.vsc);
1785 }
1786
1787 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
1788 const struct intel_crtc_state *crtc_state,
1789 const struct drm_connector_state *conn_state,
1790 struct drm_dp_vsc_sdp *vsc)
1791 {
1792 vsc->sdp_type = DP_SDP_VSC;
1793
1794 if (crtc_state->has_psr2) {
1795 if (intel_dp->psr.colorimetry_support &&
1796 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
1797 /* [PSR2, +Colorimetry] */
1798 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1799 vsc);
1800 } else {
1801 /*
1802 * [PSR2, -Colorimetry]
1803 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
1804 * 3D stereo + PSR/PSR2 + Y-coordinate.
1805 */
1806 vsc->revision = 0x4;
1807 vsc->length = 0xe;
1808 }
1809 } else {
1810 /*
1811 * [PSR1]
1812 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1813 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
1814 * higher).
1815 */
1816 vsc->revision = 0x2;
1817 vsc->length = 0x8;
1818 }
1819 }
1820
1821 static void
1822 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
1823 struct intel_crtc_state *crtc_state,
1824 const struct drm_connector_state *conn_state)
1825 {
1826 int ret;
1827 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1828 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
1829
1830 if (!conn_state->hdr_output_metadata)
1831 return;
1832
1833 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
1834
1835 if (ret) {
1836 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
1837 return;
1838 }
1839
1840 crtc_state->infoframes.enable |=
1841 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
1842 }
1843
1844 static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
1845 enum transcoder cpu_transcoder)
1846 {
1847 /* M1/N1 is double buffered */
1848 if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
1849 return true;
1850
1851 return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
1852 }
1853
1854 static bool can_enable_drrs(struct intel_connector *connector,
1855 const struct intel_crtc_state *pipe_config,
1856 const struct drm_display_mode *downclock_mode)
1857 {
1858 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1859
1860 if (pipe_config->vrr.enable)
1861 return false;
1862
1863 /*
1864 * DRRS and PSR can't be enable together, so giving preference to PSR
1865 * as it allows more power-savings by complete shutting down display,
1866 * so to guarantee this, intel_drrs_compute_config() must be called
1867 * after intel_psr_compute_config().
1868 */
1869 if (pipe_config->has_psr)
1870 return false;
1871
1872 /* FIXME missing FDI M2/N2 etc. */
1873 if (pipe_config->has_pch_encoder)
1874 return false;
1875
1876 if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
1877 return false;
1878
1879 return downclock_mode &&
1880 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
1881 }
1882
1883 static void
1884 intel_dp_drrs_compute_config(struct intel_connector *connector,
1885 struct intel_crtc_state *pipe_config,
1886 int output_bpp, bool constant_n)
1887 {
1888 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1889 const struct drm_display_mode *downclock_mode =
1890 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
1891 int pixel_clock;
1892
1893 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
1894 if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
1895 intel_zero_m_n(&pipe_config->dp_m2_n2);
1896 return;
1897 }
1898
1899 if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
1900 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
1901
1902 pipe_config->has_drrs = true;
1903
1904 pixel_clock = downclock_mode->clock;
1905 if (pipe_config->splitter.enable)
1906 pixel_clock /= pipe_config->splitter.link_count;
1907
1908 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
1909 pipe_config->port_clock, &pipe_config->dp_m2_n2,
1910 constant_n, pipe_config->fec_enable);
1911
1912 /* FIXME: abstract this better */
1913 if (pipe_config->splitter.enable)
1914 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
1915 }
1916
1917 static bool intel_dp_has_audio(struct intel_encoder *encoder,
1918 const struct intel_crtc_state *crtc_state,
1919 const struct drm_connector_state *conn_state)
1920 {
1921 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1922 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1923 const struct intel_digital_connector_state *intel_conn_state =
1924 to_intel_digital_connector_state(conn_state);
1925
1926 if (!intel_dp_port_has_audio(i915, encoder->port))
1927 return false;
1928
1929 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1930 return intel_dp->has_audio;
1931 else
1932 return intel_conn_state->force_audio == HDMI_AUDIO_ON;
1933 }
1934
1935 static int
1936 intel_dp_compute_output_format(struct intel_encoder *encoder,
1937 struct intel_crtc_state *crtc_state,
1938 struct drm_connector_state *conn_state,
1939 bool respect_downstream_limits)
1940 {
1941 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1942 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1943 struct intel_connector *connector = intel_dp->attached_connector;
1944 const struct drm_display_info *info = &connector->base.display_info;
1945 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1946 bool ycbcr_420_only;
1947 int ret;
1948
1949 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
1950
1951 crtc_state->output_format = intel_dp_output_format(connector, ycbcr_420_only);
1952
1953 if (ycbcr_420_only && !intel_dp_is_ycbcr420(intel_dp, crtc_state)) {
1954 drm_dbg_kms(&i915->drm,
1955 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
1956 crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
1957 }
1958
1959 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
1960 respect_downstream_limits);
1961 if (ret) {
1962 if (intel_dp_is_ycbcr420(intel_dp, crtc_state) ||
1963 !connector->base.ycbcr_420_allowed ||
1964 !drm_mode_is_420_also(info, adjusted_mode))
1965 return ret;
1966
1967 crtc_state->output_format = intel_dp_output_format(connector, true);
1968 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
1969 respect_downstream_limits);
1970 }
1971
1972 return ret;
1973 }
1974
1975 int
1976 intel_dp_compute_config(struct intel_encoder *encoder,
1977 struct intel_crtc_state *pipe_config,
1978 struct drm_connector_state *conn_state)
1979 {
1980 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1981 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1982 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1983 const struct drm_display_mode *fixed_mode;
1984 struct intel_connector *connector = intel_dp->attached_connector;
1985 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
1986 int ret = 0, output_bpp;
1987
1988 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
1989 pipe_config->has_pch_encoder = true;
1990
1991 pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state);
1992
1993 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
1994 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
1995 ret = intel_panel_compute_config(connector, adjusted_mode);
1996 if (ret)
1997 return ret;
1998 }
1999
2000 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2001 return -EINVAL;
2002
2003 if (HAS_GMCH(dev_priv) &&
2004 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2005 return -EINVAL;
2006
2007 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2008 return -EINVAL;
2009
2010 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2011 return -EINVAL;
2012
2013 /*
2014 * Try to respect downstream TMDS clock limits first, if
2015 * that fails assume the user might know something we don't.
2016 */
2017 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true);
2018 if (ret)
2019 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false);
2020 if (ret)
2021 return ret;
2022
2023 if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
2024 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2025 ret = intel_panel_fitting(pipe_config, conn_state);
2026 if (ret)
2027 return ret;
2028 }
2029
2030 pipe_config->limited_color_range =
2031 intel_dp_limited_color_range(pipe_config, conn_state);
2032
2033 if (pipe_config->dsc.compression_enable)
2034 output_bpp = pipe_config->dsc.compressed_bpp;
2035 else
2036 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
2037 pipe_config->pipe_bpp);
2038
2039 if (intel_dp->mso_link_count) {
2040 int n = intel_dp->mso_link_count;
2041 int overlap = intel_dp->mso_pixel_overlap;
2042
2043 pipe_config->splitter.enable = true;
2044 pipe_config->splitter.link_count = n;
2045 pipe_config->splitter.pixel_overlap = overlap;
2046
2047 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
2048 n, overlap);
2049
2050 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
2051 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
2052 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
2053 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
2054 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
2055 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
2056 adjusted_mode->crtc_clock /= n;
2057 }
2058
2059 intel_link_compute_m_n(output_bpp,
2060 pipe_config->lane_count,
2061 adjusted_mode->crtc_clock,
2062 pipe_config->port_clock,
2063 &pipe_config->dp_m_n,
2064 constant_n, pipe_config->fec_enable);
2065
2066 /* FIXME: abstract this better */
2067 if (pipe_config->splitter.enable)
2068 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
2069
2070 if (!HAS_DDI(dev_priv))
2071 g4x_dp_set_clock(encoder, pipe_config);
2072
2073 intel_vrr_compute_config(pipe_config, conn_state);
2074 intel_psr_compute_config(intel_dp, pipe_config, conn_state);
2075 intel_dp_drrs_compute_config(connector, pipe_config,
2076 output_bpp, constant_n);
2077 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2078 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2079
2080 return 0;
2081 }
2082
2083 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2084 int link_rate, int lane_count)
2085 {
2086 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2087 intel_dp->link_trained = false;
2088 intel_dp->link_rate = link_rate;
2089 intel_dp->lane_count = lane_count;
2090 }
2091
2092 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp)
2093 {
2094 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
2095 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
2096 }
2097
2098 /* Enable backlight PWM and backlight PP control. */
2099 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2100 const struct drm_connector_state *conn_state)
2101 {
2102 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
2103 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2104
2105 if (!intel_dp_is_edp(intel_dp))
2106 return;
2107
2108 drm_dbg_kms(&i915->drm, "\n");
2109
2110 intel_backlight_enable(crtc_state, conn_state);
2111 intel_pps_backlight_on(intel_dp);
2112 }
2113
2114 /* Disable backlight PP control and backlight PWM. */
2115 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2116 {
2117 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
2118 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2119
2120 if (!intel_dp_is_edp(intel_dp))
2121 return;
2122
2123 drm_dbg_kms(&i915->drm, "\n");
2124
2125 intel_pps_backlight_off(intel_dp);
2126 intel_backlight_disable(old_conn_state);
2127 }
2128
2129 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2130 {
2131 /*
2132 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2133 * be capable of signalling downstream hpd with a long pulse.
2134 * Whether or not that means D3 is safe to use is not clear,
2135 * but let's assume so until proven otherwise.
2136 *
2137 * FIXME should really check all downstream ports...
2138 */
2139 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2140 drm_dp_is_branch(intel_dp->dpcd) &&
2141 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2142 }
2143
2144 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2145 const struct intel_crtc_state *crtc_state,
2146 bool enable)
2147 {
2148 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2149 int ret;
2150
2151 if (!crtc_state->dsc.compression_enable)
2152 return;
2153
2154 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2155 enable ? DP_DECOMPRESSION_EN : 0);
2156 if (ret < 0)
2157 drm_dbg_kms(&i915->drm,
2158 "Failed to %s sink decompression state\n",
2159 str_enable_disable(enable));
2160 }
2161
2162 static void
2163 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
2164 {
2165 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2166 u8 oui[] = { 0x00, 0xaa, 0x01 };
2167 u8 buf[3] = { 0 };
2168
2169 /*
2170 * During driver init, we want to be careful and avoid changing the source OUI if it's
2171 * already set to what we want, so as to avoid clearing any state by accident
2172 */
2173 if (careful) {
2174 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
2175 drm_err(&i915->drm, "Failed to read source OUI\n");
2176
2177 if (memcmp(oui, buf, sizeof(oui)) == 0)
2178 return;
2179 }
2180
2181 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
2182 drm_err(&i915->drm, "Failed to write source OUI\n");
2183
2184 intel_dp->last_oui_write = jiffies;
2185 }
2186
2187 void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
2188 {
2189 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2190
2191 drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
2192 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
2193 }
2194
2195 /* If the device supports it, try to set the power state appropriately */
2196 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
2197 {
2198 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2199 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2200 int ret, i;
2201
2202 /* Should have a valid DPCD by this point */
2203 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2204 return;
2205
2206 if (mode != DP_SET_POWER_D0) {
2207 if (downstream_hpd_needs_d0(intel_dp))
2208 return;
2209
2210 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
2211 } else {
2212 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2213
2214 lspcon_resume(dp_to_dig_port(intel_dp));
2215
2216 /* Write the source OUI as early as possible */
2217 if (intel_dp_is_edp(intel_dp))
2218 intel_edp_init_source_oui(intel_dp, false);
2219
2220 /*
2221 * When turning on, we need to retry for 1ms to give the sink
2222 * time to wake up.
2223 */
2224 for (i = 0; i < 3; i++) {
2225 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
2226 if (ret == 1)
2227 break;
2228 msleep(1);
2229 }
2230
2231 if (ret == 1 && lspcon->active)
2232 lspcon_wait_pcon_mode(lspcon);
2233 }
2234
2235 if (ret != 1)
2236 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
2237 encoder->base.base.id, encoder->base.name,
2238 mode == DP_SET_POWER_D0 ? "D0" : "D3");
2239 }
2240
2241 static bool
2242 intel_dp_get_dpcd(struct intel_dp *intel_dp);
2243
2244 /**
2245 * intel_dp_sync_state - sync the encoder state during init/resume
2246 * @encoder: intel encoder to sync
2247 * @crtc_state: state for the CRTC connected to the encoder
2248 *
2249 * Sync any state stored in the encoder wrt. HW state during driver init
2250 * and system resume.
2251 */
2252 void intel_dp_sync_state(struct intel_encoder *encoder,
2253 const struct intel_crtc_state *crtc_state)
2254 {
2255 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2256
2257 if (!crtc_state)
2258 return;
2259
2260 /*
2261 * Don't clobber DPCD if it's been already read out during output
2262 * setup (eDP) or detect.
2263 */
2264 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2265 intel_dp_get_dpcd(intel_dp);
2266
2267 intel_dp_reset_max_link_params(intel_dp);
2268 }
2269
2270 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
2271 struct intel_crtc_state *crtc_state)
2272 {
2273 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2274 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2275
2276 /*
2277 * If BIOS has set an unsupported or non-standard link rate for some
2278 * reason force an encoder recompute and full modeset.
2279 */
2280 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
2281 crtc_state->port_clock) < 0) {
2282 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
2283 crtc_state->uapi.connectors_changed = true;
2284 return false;
2285 }
2286
2287 /*
2288 * FIXME hack to force full modeset when DSC is being used.
2289 *
2290 * As long as we do not have full state readout and config comparison
2291 * of crtc_state->dsc, we have no way to ensure reliable fastset.
2292 * Remove once we have readout for DSC.
2293 */
2294 if (crtc_state->dsc.compression_enable) {
2295 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
2296 crtc_state->uapi.mode_changed = true;
2297 return false;
2298 }
2299
2300 if (CAN_PSR(intel_dp)) {
2301 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
2302 crtc_state->uapi.mode_changed = true;
2303 return false;
2304 }
2305
2306 return true;
2307 }
2308
2309 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
2310 {
2311 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2312
2313 /* Clear the cached register set to avoid using stale values */
2314
2315 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
2316
2317 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
2318 intel_dp->pcon_dsc_dpcd,
2319 sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
2320 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
2321 DP_PCON_DSC_ENCODER);
2322
2323 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
2324 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
2325 }
2326
2327 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
2328 {
2329 int bw_gbps[] = {9, 18, 24, 32, 40, 48};
2330 int i;
2331
2332 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
2333 if (frl_bw_mask & (1 << i))
2334 return bw_gbps[i];
2335 }
2336 return 0;
2337 }
2338
2339 static int intel_dp_pcon_set_frl_mask(int max_frl)
2340 {
2341 switch (max_frl) {
2342 case 48:
2343 return DP_PCON_FRL_BW_MASK_48GBPS;
2344 case 40:
2345 return DP_PCON_FRL_BW_MASK_40GBPS;
2346 case 32:
2347 return DP_PCON_FRL_BW_MASK_32GBPS;
2348 case 24:
2349 return DP_PCON_FRL_BW_MASK_24GBPS;
2350 case 18:
2351 return DP_PCON_FRL_BW_MASK_18GBPS;
2352 case 9:
2353 return DP_PCON_FRL_BW_MASK_9GBPS;
2354 }
2355
2356 return 0;
2357 }
2358
2359 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
2360 {
2361 struct intel_connector *intel_connector = intel_dp->attached_connector;
2362 struct drm_connector *connector = &intel_connector->base;
2363 int max_frl_rate;
2364 int max_lanes, rate_per_lane;
2365 int max_dsc_lanes, dsc_rate_per_lane;
2366
2367 max_lanes = connector->display_info.hdmi.max_lanes;
2368 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
2369 max_frl_rate = max_lanes * rate_per_lane;
2370
2371 if (connector->display_info.hdmi.dsc_cap.v_1p2) {
2372 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
2373 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
2374 if (max_dsc_lanes && dsc_rate_per_lane)
2375 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
2376 }
2377
2378 return max_frl_rate;
2379 }
2380
2381 static bool
2382 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
2383 u8 max_frl_bw_mask, u8 *frl_trained_mask)
2384 {
2385 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
2386 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
2387 *frl_trained_mask >= max_frl_bw_mask)
2388 return true;
2389
2390 return false;
2391 }
2392
2393 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
2394 {
2395 #define TIMEOUT_FRL_READY_MS 500
2396 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
2397
2398 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2399 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
2400 u8 max_frl_bw_mask = 0, frl_trained_mask;
2401 bool is_active;
2402
2403 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
2404 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
2405
2406 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
2407 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
2408
2409 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
2410
2411 if (max_frl_bw <= 0)
2412 return -EINVAL;
2413
2414 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
2415 drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
2416
2417 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
2418 goto frl_trained;
2419
2420 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
2421 if (ret < 0)
2422 return ret;
2423 /* Wait for PCON to be FRL Ready */
2424 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
2425
2426 if (!is_active)
2427 return -ETIMEDOUT;
2428
2429 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
2430 DP_PCON_ENABLE_SEQUENTIAL_LINK);
2431 if (ret < 0)
2432 return ret;
2433 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
2434 DP_PCON_FRL_LINK_TRAIN_NORMAL);
2435 if (ret < 0)
2436 return ret;
2437 ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
2438 if (ret < 0)
2439 return ret;
2440 /*
2441 * Wait for FRL to be completed
2442 * Check if the HDMI Link is up and active.
2443 */
2444 wait_for(is_active =
2445 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
2446 TIMEOUT_HDMI_LINK_ACTIVE_MS);
2447
2448 if (!is_active)
2449 return -ETIMEDOUT;
2450
2451 frl_trained:
2452 drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
2453 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
2454 intel_dp->frl.is_trained = true;
2455 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
2456
2457 return 0;
2458 }
2459
2460 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
2461 {
2462 if (drm_dp_is_branch(intel_dp->dpcd) &&
2463 intel_dp->has_hdmi_sink &&
2464 intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
2465 return true;
2466
2467 return false;
2468 }
2469
2470 static
2471 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
2472 {
2473 int ret;
2474 u8 buf = 0;
2475
2476 /* Set PCON source control mode */
2477 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
2478
2479 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
2480 if (ret < 0)
2481 return ret;
2482
2483 /* Set HDMI LINK ENABLE */
2484 buf |= DP_PCON_ENABLE_HDMI_LINK;
2485 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
2486 if (ret < 0)
2487 return ret;
2488
2489 return 0;
2490 }
2491
2492 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
2493 {
2494 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2495
2496 /*
2497 * Always go for FRL training if:
2498 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
2499 * -sink is HDMI2.1
2500 */
2501 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
2502 !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
2503 intel_dp->frl.is_trained)
2504 return;
2505
2506 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
2507 int ret, mode;
2508
2509 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
2510 ret = intel_dp_pcon_set_tmds_mode(intel_dp);
2511 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
2512
2513 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
2514 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
2515 } else {
2516 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
2517 }
2518 }
2519
2520 static int
2521 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
2522 {
2523 int vactive = crtc_state->hw.adjusted_mode.vdisplay;
2524
2525 return intel_hdmi_dsc_get_slice_height(vactive);
2526 }
2527
2528 static int
2529 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
2530 const struct intel_crtc_state *crtc_state)
2531 {
2532 struct intel_connector *intel_connector = intel_dp->attached_connector;
2533 struct drm_connector *connector = &intel_connector->base;
2534 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
2535 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
2536 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
2537 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
2538
2539 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
2540 pcon_max_slice_width,
2541 hdmi_max_slices, hdmi_throughput);
2542 }
2543
2544 static int
2545 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
2546 const struct intel_crtc_state *crtc_state,
2547 int num_slices, int slice_width)
2548 {
2549 struct intel_connector *intel_connector = intel_dp->attached_connector;
2550 struct drm_connector *connector = &intel_connector->base;
2551 int output_format = crtc_state->output_format;
2552 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
2553 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
2554 int hdmi_max_chunk_bytes =
2555 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
2556
2557 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
2558 num_slices, output_format, hdmi_all_bpp,
2559 hdmi_max_chunk_bytes);
2560 }
2561
2562 void
2563 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
2564 const struct intel_crtc_state *crtc_state)
2565 {
2566 u8 pps_param[6];
2567 int slice_height;
2568 int slice_width;
2569 int num_slices;
2570 int bits_per_pixel;
2571 int ret;
2572 struct intel_connector *intel_connector = intel_dp->attached_connector;
2573 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2574 struct drm_connector *connector;
2575 bool hdmi_is_dsc_1_2;
2576
2577 if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
2578 return;
2579
2580 if (!intel_connector)
2581 return;
2582 connector = &intel_connector->base;
2583 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
2584
2585 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
2586 !hdmi_is_dsc_1_2)
2587 return;
2588
2589 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
2590 if (!slice_height)
2591 return;
2592
2593 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
2594 if (!num_slices)
2595 return;
2596
2597 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
2598 num_slices);
2599
2600 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
2601 num_slices, slice_width);
2602 if (!bits_per_pixel)
2603 return;
2604
2605 pps_param[0] = slice_height & 0xFF;
2606 pps_param[1] = slice_height >> 8;
2607 pps_param[2] = slice_width & 0xFF;
2608 pps_param[3] = slice_width >> 8;
2609 pps_param[4] = bits_per_pixel & 0xFF;
2610 pps_param[5] = (bits_per_pixel >> 8) & 0x3;
2611
2612 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
2613 if (ret < 0)
2614 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
2615 }
2616
2617 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
2618 const struct intel_crtc_state *crtc_state)
2619 {
2620 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2621 u8 tmp;
2622
2623 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
2624 return;
2625
2626 if (!drm_dp_is_branch(intel_dp->dpcd))
2627 return;
2628
2629 tmp = intel_dp->has_hdmi_sink ?
2630 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
2631
2632 if (drm_dp_dpcd_writeb(&intel_dp->aux,
2633 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
2634 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
2635 str_enable_disable(intel_dp->has_hdmi_sink));
2636
2637 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2638 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
2639
2640 if (drm_dp_dpcd_writeb(&intel_dp->aux,
2641 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
2642 drm_dbg_kms(&i915->drm,
2643 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
2644 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
2645
2646 tmp = intel_dp->dfp.rgb_to_ycbcr ?
2647 DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
2648
2649 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
2650 drm_dbg_kms(&i915->drm,
2651 "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
2652 str_enable_disable(tmp));
2653 }
2654
2655
2656 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
2657 {
2658 u8 dprx = 0;
2659
2660 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
2661 &dprx) != 1)
2662 return false;
2663 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
2664 }
2665
2666 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
2667 {
2668 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2669
2670 /*
2671 * Clear the cached register set to avoid using stale values
2672 * for the sinks that do not support DSC.
2673 */
2674 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
2675
2676 /* Clear fec_capable to avoid using stale values */
2677 intel_dp->fec_capable = 0;
2678
2679 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
2680 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
2681 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2682 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
2683 intel_dp->dsc_dpcd,
2684 sizeof(intel_dp->dsc_dpcd)) < 0)
2685 drm_err(&i915->drm,
2686 "Failed to read DPCD register 0x%x\n",
2687 DP_DSC_SUPPORT);
2688
2689 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
2690 (int)sizeof(intel_dp->dsc_dpcd),
2691 intel_dp->dsc_dpcd);
2692
2693 /* FEC is supported only on DP 1.4 */
2694 if (!intel_dp_is_edp(intel_dp) &&
2695 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
2696 &intel_dp->fec_capable) < 0)
2697 drm_err(&i915->drm,
2698 "Failed to read FEC DPCD register\n");
2699
2700 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
2701 intel_dp->fec_capable);
2702 }
2703 }
2704
2705 static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
2706 struct drm_display_mode *mode)
2707 {
2708 struct intel_dp *intel_dp = intel_attached_dp(connector);
2709 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2710 int n = intel_dp->mso_link_count;
2711 int overlap = intel_dp->mso_pixel_overlap;
2712
2713 if (!mode || !n)
2714 return;
2715
2716 mode->hdisplay = (mode->hdisplay - overlap) * n;
2717 mode->hsync_start = (mode->hsync_start - overlap) * n;
2718 mode->hsync_end = (mode->hsync_end - overlap) * n;
2719 mode->htotal = (mode->htotal - overlap) * n;
2720 mode->clock *= n;
2721
2722 drm_mode_set_name(mode);
2723
2724 drm_dbg_kms(&i915->drm,
2725 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
2726 connector->base.base.id, connector->base.name,
2727 DRM_MODE_ARG(mode));
2728 }
2729
2730 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
2731 {
2732 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2733 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2734 struct intel_connector *connector = intel_dp->attached_connector;
2735
2736 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
2737 /*
2738 * This is a big fat ugly hack.
2739 *
2740 * Some machines in UEFI boot mode provide us a VBT that has 18
2741 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2742 * unknown we fail to light up. Yet the same BIOS boots up with
2743 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2744 * max, not what it tells us to use.
2745 *
2746 * Note: This will still be broken if the eDP panel is not lit
2747 * up by the BIOS, and thus we can't get the mode at module
2748 * load.
2749 */
2750 drm_dbg_kms(&dev_priv->drm,
2751 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2752 pipe_bpp, connector->panel.vbt.edp.bpp);
2753 connector->panel.vbt.edp.bpp = pipe_bpp;
2754 }
2755 }
2756
2757 static void intel_edp_mso_init(struct intel_dp *intel_dp)
2758 {
2759 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2760 struct intel_connector *connector = intel_dp->attached_connector;
2761 struct drm_display_info *info = &connector->base.display_info;
2762 u8 mso;
2763
2764 if (intel_dp->edp_dpcd[0] < DP_EDP_14)
2765 return;
2766
2767 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
2768 drm_err(&i915->drm, "Failed to read MSO cap\n");
2769 return;
2770 }
2771
2772 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
2773 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
2774 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
2775 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
2776 mso = 0;
2777 }
2778
2779 if (mso) {
2780 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
2781 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
2782 info->mso_pixel_overlap);
2783 if (!HAS_MSO(i915)) {
2784 drm_err(&i915->drm, "No source MSO support, disabling\n");
2785 mso = 0;
2786 }
2787 }
2788
2789 intel_dp->mso_link_count = mso;
2790 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
2791 }
2792
2793 static bool
2794 intel_edp_init_dpcd(struct intel_dp *intel_dp)
2795 {
2796 struct drm_i915_private *dev_priv =
2797 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2798
2799 /* this function is meant to be called only once */
2800 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
2801
2802 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
2803 return false;
2804
2805 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
2806 drm_dp_is_branch(intel_dp->dpcd));
2807
2808 /*
2809 * Read the eDP display control registers.
2810 *
2811 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
2812 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
2813 * set, but require eDP 1.4+ detection (e.g. for supported link rates
2814 * method). The display control registers should read zero if they're
2815 * not supported anyway.
2816 */
2817 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
2818 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
2819 sizeof(intel_dp->edp_dpcd)) {
2820 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
2821 (int)sizeof(intel_dp->edp_dpcd),
2822 intel_dp->edp_dpcd);
2823
2824 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
2825 }
2826
2827 /*
2828 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
2829 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
2830 */
2831 intel_psr_init_dpcd(intel_dp);
2832
2833 /* Clear the default sink rates */
2834 intel_dp->num_sink_rates = 0;
2835
2836 /* Read the eDP 1.4+ supported link rates. */
2837 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2838 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
2839 int i;
2840
2841 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
2842 sink_rates, sizeof(sink_rates));
2843
2844 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
2845 int val = le16_to_cpu(sink_rates[i]);
2846
2847 if (val == 0)
2848 break;
2849
2850 /* Value read multiplied by 200kHz gives the per-lane
2851 * link rate in kHz. The source rates are, however,
2852 * stored in terms of LS_Clk kHz. The full conversion
2853 * back to symbols is
2854 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
2855 */
2856 intel_dp->sink_rates[i] = (val * 200) / 10;
2857 }
2858 intel_dp->num_sink_rates = i;
2859 }
2860
2861 /*
2862 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
2863 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
2864 */
2865 if (intel_dp->num_sink_rates)
2866 intel_dp->use_rate_select = true;
2867 else
2868 intel_dp_set_sink_rates(intel_dp);
2869 intel_dp_set_max_sink_lane_count(intel_dp);
2870
2871 /* Read the eDP DSC DPCD registers */
2872 if (DISPLAY_VER(dev_priv) >= 10)
2873 intel_dp_get_dsc_sink_cap(intel_dp);
2874
2875 /*
2876 * If needed, program our source OUI so we can make various Intel-specific AUX services
2877 * available (such as HDR backlight controls)
2878 */
2879 intel_edp_init_source_oui(intel_dp, true);
2880
2881 return true;
2882 }
2883
2884 static bool
2885 intel_dp_has_sink_count(struct intel_dp *intel_dp)
2886 {
2887 if (!intel_dp->attached_connector)
2888 return false;
2889
2890 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
2891 intel_dp->dpcd,
2892 &intel_dp->desc);
2893 }
2894
2895 static bool
2896 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2897 {
2898 int ret;
2899
2900 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
2901 return false;
2902
2903 /*
2904 * Don't clobber cached eDP rates. Also skip re-reading
2905 * the OUI/ID since we know it won't change.
2906 */
2907 if (!intel_dp_is_edp(intel_dp)) {
2908 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
2909 drm_dp_is_branch(intel_dp->dpcd));
2910
2911 intel_dp_set_sink_rates(intel_dp);
2912 intel_dp_set_max_sink_lane_count(intel_dp);
2913 intel_dp_set_common_rates(intel_dp);
2914 }
2915
2916 if (intel_dp_has_sink_count(intel_dp)) {
2917 ret = drm_dp_read_sink_count(&intel_dp->aux);
2918 if (ret < 0)
2919 return false;
2920
2921 /*
2922 * Sink count can change between short pulse hpd hence
2923 * a member variable in intel_dp will track any changes
2924 * between short pulse interrupts.
2925 */
2926 intel_dp->sink_count = ret;
2927
2928 /*
2929 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
2930 * a dongle is present but no display. Unless we require to know
2931 * if a dongle is present or not, we don't need to update
2932 * downstream port information. So, an early return here saves
2933 * time from performing other operations which are not required.
2934 */
2935 if (!intel_dp->sink_count)
2936 return false;
2937 }
2938
2939 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
2940 intel_dp->downstream_ports) == 0;
2941 }
2942
2943 static bool
2944 intel_dp_can_mst(struct intel_dp *intel_dp)
2945 {
2946 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2947
2948 return i915->params.enable_dp_mst &&
2949 intel_dp_mst_source_support(intel_dp) &&
2950 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
2951 }
2952
2953 static void
2954 intel_dp_configure_mst(struct intel_dp *intel_dp)
2955 {
2956 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2957 struct intel_encoder *encoder =
2958 &dp_to_dig_port(intel_dp)->base;
2959 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
2960
2961 drm_dbg_kms(&i915->drm,
2962 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
2963 encoder->base.base.id, encoder->base.name,
2964 str_yes_no(intel_dp_mst_source_support(intel_dp)),
2965 str_yes_no(sink_can_mst),
2966 str_yes_no(i915->params.enable_dp_mst));
2967
2968 if (!intel_dp_mst_source_support(intel_dp))
2969 return;
2970
2971 intel_dp->is_mst = sink_can_mst &&
2972 i915->params.enable_dp_mst;
2973
2974 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
2975 intel_dp->is_mst);
2976 }
2977
2978 static bool
2979 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
2980 {
2981 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
2982 }
2983
2984 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
2985 {
2986 int retry;
2987
2988 for (retry = 0; retry < 3; retry++) {
2989 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1,
2990 &esi[1], 3) == 3)
2991 return true;
2992 }
2993
2994 return false;
2995 }
2996
2997 bool
2998 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
2999 const struct drm_connector_state *conn_state)
3000 {
3001 /*
3002 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
3003 * of Color Encoding Format and Content Color Gamut], in order to
3004 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
3005 */
3006 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3007 return true;
3008
3009 switch (conn_state->colorspace) {
3010 case DRM_MODE_COLORIMETRY_SYCC_601:
3011 case DRM_MODE_COLORIMETRY_OPYCC_601:
3012 case DRM_MODE_COLORIMETRY_BT2020_YCC:
3013 case DRM_MODE_COLORIMETRY_BT2020_RGB:
3014 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
3015 return true;
3016 default:
3017 break;
3018 }
3019
3020 return false;
3021 }
3022
3023 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
3024 struct dp_sdp *sdp, size_t size)
3025 {
3026 size_t length = sizeof(struct dp_sdp);
3027
3028 if (size < length)
3029 return -ENOSPC;
3030
3031 memset(sdp, 0, size);
3032
3033 /*
3034 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
3035 * VSC SDP Header Bytes
3036 */
3037 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
3038 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
3039 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
3040 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
3041
3042 /*
3043 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
3044 * per DP 1.4a spec.
3045 */
3046 if (vsc->revision != 0x5)
3047 goto out;
3048
3049 /* VSC SDP Payload for DB16 through DB18 */
3050 /* Pixel Encoding and Colorimetry Formats */
3051 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
3052 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
3053
3054 switch (vsc->bpc) {
3055 case 6:
3056 /* 6bpc: 0x0 */
3057 break;
3058 case 8:
3059 sdp->db[17] = 0x1; /* DB17[3:0] */
3060 break;
3061 case 10:
3062 sdp->db[17] = 0x2;
3063 break;
3064 case 12:
3065 sdp->db[17] = 0x3;
3066 break;
3067 case 16:
3068 sdp->db[17] = 0x4;
3069 break;
3070 default:
3071 MISSING_CASE(vsc->bpc);
3072 break;
3073 }
3074 /* Dynamic Range and Component Bit Depth */
3075 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
3076 sdp->db[17] |= 0x80; /* DB17[7] */
3077
3078 /* Content Type */
3079 sdp->db[18] = vsc->content_type & 0x7;
3080
3081 out:
3082 return length;
3083 }
3084
3085 static ssize_t
3086 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
3087 const struct hdmi_drm_infoframe *drm_infoframe,
3088 struct dp_sdp *sdp,
3089 size_t size)
3090 {
3091 size_t length = sizeof(struct dp_sdp);
3092 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
3093 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
3094 ssize_t len;
3095
3096 if (size < length)
3097 return -ENOSPC;
3098
3099 memset(sdp, 0, size);
3100
3101 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
3102 if (len < 0) {
3103 drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
3104 return -ENOSPC;
3105 }
3106
3107 if (len != infoframe_size) {
3108 drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
3109 return -ENOSPC;
3110 }
3111
3112 /*
3113 * Set up the infoframe sdp packet for HDR static metadata.
3114 * Prepare VSC Header for SU as per DP 1.4a spec,
3115 * Table 2-100 and Table 2-101
3116 */
3117
3118 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
3119 sdp->sdp_header.HB0 = 0;
3120 /*
3121 * Packet Type 80h + Non-audio INFOFRAME Type value
3122 * HDMI_INFOFRAME_TYPE_DRM: 0x87
3123 * - 80h + Non-audio INFOFRAME Type value
3124 * - InfoFrame Type: 0x07
3125 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
3126 */
3127 sdp->sdp_header.HB1 = drm_infoframe->type;
3128 /*
3129 * Least Significant Eight Bits of (Data Byte Count – 1)
3130 * infoframe_size - 1
3131 */
3132 sdp->sdp_header.HB2 = 0x1D;
3133 /* INFOFRAME SDP Version Number */
3134 sdp->sdp_header.HB3 = (0x13 << 2);
3135 /* CTA Header Byte 2 (INFOFRAME Version Number) */
3136 sdp->db[0] = drm_infoframe->version;
3137 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
3138 sdp->db[1] = drm_infoframe->length;
3139 /*
3140 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
3141 * HDMI_INFOFRAME_HEADER_SIZE
3142 */
3143 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
3144 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
3145 HDMI_DRM_INFOFRAME_SIZE);
3146
3147 /*
3148 * Size of DP infoframe sdp packet for HDR static metadata consists of
3149 * - DP SDP Header(struct dp_sdp_header): 4 bytes
3150 * - Two Data Blocks: 2 bytes
3151 * CTA Header Byte2 (INFOFRAME Version Number)
3152 * CTA Header Byte3 (Length of INFOFRAME)
3153 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
3154 *
3155 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
3156 * infoframe size. But GEN11+ has larger than that size, write_infoframe
3157 * will pad rest of the size.
3158 */
3159 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
3160 }
3161
3162 static void intel_write_dp_sdp(struct intel_encoder *encoder,
3163 const struct intel_crtc_state *crtc_state,
3164 unsigned int type)
3165 {
3166 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3167 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3168 struct dp_sdp sdp = {};
3169 ssize_t len;
3170
3171 if ((crtc_state->infoframes.enable &
3172 intel_hdmi_infoframe_enable(type)) == 0)
3173 return;
3174
3175 switch (type) {
3176 case DP_SDP_VSC:
3177 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
3178 sizeof(sdp));
3179 break;
3180 case HDMI_PACKET_TYPE_GAMUT_METADATA:
3181 len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
3182 &crtc_state->infoframes.drm.drm,
3183 &sdp, sizeof(sdp));
3184 break;
3185 default:
3186 MISSING_CASE(type);
3187 return;
3188 }
3189
3190 if (drm_WARN_ON(&dev_priv->drm, len < 0))
3191 return;
3192
3193 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
3194 }
3195
3196 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
3197 const struct intel_crtc_state *crtc_state,
3198 const struct drm_dp_vsc_sdp *vsc)
3199 {
3200 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3201 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3202 struct dp_sdp sdp = {};
3203 ssize_t len;
3204
3205 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
3206
3207 if (drm_WARN_ON(&dev_priv->drm, len < 0))
3208 return;
3209
3210 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
3211 &sdp, len);
3212 }
3213
3214 void intel_dp_set_infoframes(struct intel_encoder *encoder,
3215 bool enable,
3216 const struct intel_crtc_state *crtc_state,
3217 const struct drm_connector_state *conn_state)
3218 {
3219 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3220 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
3221 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
3222 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
3223 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
3224 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
3225
3226 /* TODO: Add DSC case (DIP_ENABLE_PPS) */
3227 /* When PSR is enabled, this routine doesn't disable VSC DIP */
3228 if (!crtc_state->has_psr)
3229 val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
3230
3231 intel_de_write(dev_priv, reg, val);
3232 intel_de_posting_read(dev_priv, reg);
3233
3234 if (!enable)
3235 return;
3236
3237 /* When PSR is enabled, VSC SDP is handled by PSR routine */
3238 if (!crtc_state->has_psr)
3239 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
3240
3241 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
3242 }
3243
3244 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
3245 const void *buffer, size_t size)
3246 {
3247 const struct dp_sdp *sdp = buffer;
3248
3249 if (size < sizeof(struct dp_sdp))
3250 return -EINVAL;
3251
3252 memset(vsc, 0, sizeof(*vsc));
3253
3254 if (sdp->sdp_header.HB0 != 0)
3255 return -EINVAL;
3256
3257 if (sdp->sdp_header.HB1 != DP_SDP_VSC)
3258 return -EINVAL;
3259
3260 vsc->sdp_type = sdp->sdp_header.HB1;
3261 vsc->revision = sdp->sdp_header.HB2;
3262 vsc->length = sdp->sdp_header.HB3;
3263
3264 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
3265 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
3266 /*
3267 * - HB2 = 0x2, HB3 = 0x8
3268 * VSC SDP supporting 3D stereo + PSR
3269 * - HB2 = 0x4, HB3 = 0xe
3270 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
3271 * first scan line of the SU region (applies to eDP v1.4b
3272 * and higher).
3273 */
3274 return 0;
3275 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
3276 /*
3277 * - HB2 = 0x5, HB3 = 0x13
3278 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
3279 * Format.
3280 */
3281 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
3282 vsc->colorimetry = sdp->db[16] & 0xf;
3283 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
3284
3285 switch (sdp->db[17] & 0x7) {
3286 case 0x0:
3287 vsc->bpc = 6;
3288 break;
3289 case 0x1:
3290 vsc->bpc = 8;
3291 break;
3292 case 0x2:
3293 vsc->bpc = 10;
3294 break;
3295 case 0x3:
3296 vsc->bpc = 12;
3297 break;
3298 case 0x4:
3299 vsc->bpc = 16;
3300 break;
3301 default:
3302 MISSING_CASE(sdp->db[17] & 0x7);
3303 return -EINVAL;
3304 }
3305
3306 vsc->content_type = sdp->db[18] & 0x7;
3307 } else {
3308 return -EINVAL;
3309 }
3310
3311 return 0;
3312 }
3313
3314 static int
3315 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
3316 const void *buffer, size_t size)
3317 {
3318 int ret;
3319
3320 const struct dp_sdp *sdp = buffer;
3321
3322 if (size < sizeof(struct dp_sdp))
3323 return -EINVAL;
3324
3325 if (sdp->sdp_header.HB0 != 0)
3326 return -EINVAL;
3327
3328 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
3329 return -EINVAL;
3330
3331 /*
3332 * Least Significant Eight Bits of (Data Byte Count – 1)
3333 * 1Dh (i.e., Data Byte Count = 30 bytes).
3334 */
3335 if (sdp->sdp_header.HB2 != 0x1D)
3336 return -EINVAL;
3337
3338 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
3339 if ((sdp->sdp_header.HB3 & 0x3) != 0)
3340 return -EINVAL;
3341
3342 /* INFOFRAME SDP Version Number */
3343 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
3344 return -EINVAL;
3345
3346 /* CTA Header Byte 2 (INFOFRAME Version Number) */
3347 if (sdp->db[0] != 1)
3348 return -EINVAL;
3349
3350 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
3351 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
3352 return -EINVAL;
3353
3354 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
3355 HDMI_DRM_INFOFRAME_SIZE);
3356
3357 return ret;
3358 }
3359
3360 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
3361 struct intel_crtc_state *crtc_state,
3362 struct drm_dp_vsc_sdp *vsc)
3363 {
3364 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3365 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3366 unsigned int type = DP_SDP_VSC;
3367 struct dp_sdp sdp = {};
3368 int ret;
3369
3370 /* When PSR is enabled, VSC SDP is handled by PSR routine */
3371 if (crtc_state->has_psr)
3372 return;
3373
3374 if ((crtc_state->infoframes.enable &
3375 intel_hdmi_infoframe_enable(type)) == 0)
3376 return;
3377
3378 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
3379
3380 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
3381
3382 if (ret)
3383 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
3384 }
3385
3386 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
3387 struct intel_crtc_state *crtc_state,
3388 struct hdmi_drm_infoframe *drm_infoframe)
3389 {
3390 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3391 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3392 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
3393 struct dp_sdp sdp = {};
3394 int ret;
3395
3396 if ((crtc_state->infoframes.enable &
3397 intel_hdmi_infoframe_enable(type)) == 0)
3398 return;
3399
3400 dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
3401 sizeof(sdp));
3402
3403 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
3404 sizeof(sdp));
3405
3406 if (ret)
3407 drm_dbg_kms(&dev_priv->drm,
3408 "Failed to unpack DP HDR Metadata Infoframe SDP\n");
3409 }
3410
3411 void intel_read_dp_sdp(struct intel_encoder *encoder,
3412 struct intel_crtc_state *crtc_state,
3413 unsigned int type)
3414 {
3415 switch (type) {
3416 case DP_SDP_VSC:
3417 intel_read_dp_vsc_sdp(encoder, crtc_state,
3418 &crtc_state->infoframes.vsc);
3419 break;
3420 case HDMI_PACKET_TYPE_GAMUT_METADATA:
3421 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
3422 &crtc_state->infoframes.drm.drm);
3423 break;
3424 default:
3425 MISSING_CASE(type);
3426 break;
3427 }
3428 }
3429
3430 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3431 {
3432 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3433 int status = 0;
3434 int test_link_rate;
3435 u8 test_lane_count, test_link_bw;
3436 /* (DP CTS 1.2)
3437 * 4.3.1.11
3438 */
3439 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3440 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3441 &test_lane_count);
3442
3443 if (status <= 0) {
3444 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
3445 return DP_TEST_NAK;
3446 }
3447 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3448
3449 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3450 &test_link_bw);
3451 if (status <= 0) {
3452 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
3453 return DP_TEST_NAK;
3454 }
3455 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3456
3457 /* Validate the requested link rate and lane count */
3458 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
3459 test_lane_count))
3460 return DP_TEST_NAK;
3461
3462 intel_dp->compliance.test_lane_count = test_lane_count;
3463 intel_dp->compliance.test_link_rate = test_link_rate;
3464
3465 return DP_TEST_ACK;
3466 }
3467
3468 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3469 {
3470 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3471 u8 test_pattern;
3472 u8 test_misc;
3473 __be16 h_width, v_height;
3474 int status = 0;
3475
3476 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
3477 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
3478 &test_pattern);
3479 if (status <= 0) {
3480 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
3481 return DP_TEST_NAK;
3482 }
3483 if (test_pattern != DP_COLOR_RAMP)
3484 return DP_TEST_NAK;
3485
3486 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
3487 &h_width, 2);
3488 if (status <= 0) {
3489 drm_dbg_kms(&i915->drm, "H Width read failed\n");
3490 return DP_TEST_NAK;
3491 }
3492
3493 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
3494 &v_height, 2);
3495 if (status <= 0) {
3496 drm_dbg_kms(&i915->drm, "V Height read failed\n");
3497 return DP_TEST_NAK;
3498 }
3499
3500 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
3501 &test_misc);
3502 if (status <= 0) {
3503 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
3504 return DP_TEST_NAK;
3505 }
3506 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
3507 return DP_TEST_NAK;
3508 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
3509 return DP_TEST_NAK;
3510 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
3511 case DP_TEST_BIT_DEPTH_6:
3512 intel_dp->compliance.test_data.bpc = 6;
3513 break;
3514 case DP_TEST_BIT_DEPTH_8:
3515 intel_dp->compliance.test_data.bpc = 8;
3516 break;
3517 default:
3518 return DP_TEST_NAK;
3519 }
3520
3521 intel_dp->compliance.test_data.video_pattern = test_pattern;
3522 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
3523 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
3524 /* Set test active flag here so userspace doesn't interrupt things */
3525 intel_dp->compliance.test_active = true;
3526
3527 return DP_TEST_ACK;
3528 }
3529
3530 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
3531 {
3532 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3533 u8 test_result = DP_TEST_ACK;
3534 struct intel_connector *intel_connector = intel_dp->attached_connector;
3535 struct drm_connector *connector = &intel_connector->base;
3536
3537 if (intel_connector->detect_edid == NULL ||
3538 connector->edid_corrupt ||
3539 intel_dp->aux.i2c_defer_count > 6) {
3540 /* Check EDID read for NACKs, DEFERs and corruption
3541 * (DP CTS 1.2 Core r1.1)
3542 * 4.2.2.4 : Failed EDID read, I2C_NAK
3543 * 4.2.2.5 : Failed EDID read, I2C_DEFER
3544 * 4.2.2.6 : EDID corruption detected
3545 * Use failsafe mode for all cases
3546 */
3547 if (intel_dp->aux.i2c_nack_count > 0 ||
3548 intel_dp->aux.i2c_defer_count > 0)
3549 drm_dbg_kms(&i915->drm,
3550 "EDID read had %d NACKs, %d DEFERs\n",
3551 intel_dp->aux.i2c_nack_count,
3552 intel_dp->aux.i2c_defer_count);
3553 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
3554 } else {
3555 struct edid *block = intel_connector->detect_edid;
3556
3557 /* We have to write the checksum
3558 * of the last block read
3559 */
3560 block += intel_connector->detect_edid->extensions;
3561
3562 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
3563 block->checksum) <= 0)
3564 drm_dbg_kms(&i915->drm,
3565 "Failed to write EDID checksum\n");
3566
3567 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3568 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
3569 }
3570
3571 /* Set test active flag here so userspace doesn't interrupt things */
3572 intel_dp->compliance.test_active = true;
3573
3574 return test_result;
3575 }
3576
3577 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
3578 const struct intel_crtc_state *crtc_state)
3579 {
3580 struct drm_i915_private *dev_priv =
3581 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3582 struct drm_dp_phy_test_params *data =
3583 &intel_dp->compliance.test_data.phytest;
3584 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3585 enum pipe pipe = crtc->pipe;
3586 u32 pattern_val;
3587
3588 switch (data->phy_pattern) {
3589 case DP_PHY_TEST_PATTERN_NONE:
3590 drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
3591 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
3592 break;
3593 case DP_PHY_TEST_PATTERN_D10_2:
3594 drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
3595 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3596 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
3597 break;
3598 case DP_PHY_TEST_PATTERN_ERROR_COUNT:
3599 drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
3600 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3601 DDI_DP_COMP_CTL_ENABLE |
3602 DDI_DP_COMP_CTL_SCRAMBLED_0);
3603 break;
3604 case DP_PHY_TEST_PATTERN_PRBS7:
3605 drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
3606 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3607 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
3608 break;
3609 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
3610 /*
3611 * FIXME: Ideally pattern should come from DPCD 0x250. As
3612 * current firmware of DPR-100 could not set it, so hardcoding
3613 * now for complaince test.
3614 */
3615 drm_dbg_kms(&dev_priv->drm,
3616 "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
3617 pattern_val = 0x3e0f83e0;
3618 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
3619 pattern_val = 0x0f83e0f8;
3620 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
3621 pattern_val = 0x0000f83e;
3622 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
3623 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3624 DDI_DP_COMP_CTL_ENABLE |
3625 DDI_DP_COMP_CTL_CUSTOM80);
3626 break;
3627 case DP_PHY_TEST_PATTERN_CP2520:
3628 /*
3629 * FIXME: Ideally pattern should come from DPCD 0x24A. As
3630 * current firmware of DPR-100 could not set it, so hardcoding
3631 * now for complaince test.
3632 */
3633 drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
3634 pattern_val = 0xFB;
3635 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3636 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
3637 pattern_val);
3638 break;
3639 default:
3640 WARN(1, "Invalid Phy Test Pattern\n");
3641 }
3642 }
3643
3644 static void
3645 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
3646 const struct intel_crtc_state *crtc_state)
3647 {
3648 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3649 struct drm_device *dev = dig_port->base.base.dev;
3650 struct drm_i915_private *dev_priv = to_i915(dev);
3651 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
3652 enum pipe pipe = crtc->pipe;
3653 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
3654
3655 trans_ddi_func_ctl_value = intel_de_read(dev_priv,
3656 TRANS_DDI_FUNC_CTL(pipe));
3657 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
3658 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
3659
3660 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
3661 TGL_TRANS_DDI_PORT_MASK);
3662 trans_conf_value &= ~PIPECONF_ENABLE;
3663 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
3664
3665 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
3666 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
3667 trans_ddi_func_ctl_value);
3668 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
3669 }
3670
3671 static void
3672 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
3673 const struct intel_crtc_state *crtc_state)
3674 {
3675 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3676 struct drm_device *dev = dig_port->base.base.dev;
3677 struct drm_i915_private *dev_priv = to_i915(dev);
3678 enum port port = dig_port->base.port;
3679 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
3680 enum pipe pipe = crtc->pipe;
3681 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
3682
3683 trans_ddi_func_ctl_value = intel_de_read(dev_priv,
3684 TRANS_DDI_FUNC_CTL(pipe));
3685 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
3686 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
3687
3688 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
3689 TGL_TRANS_DDI_SELECT_PORT(port);
3690 trans_conf_value |= PIPECONF_ENABLE;
3691 dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
3692
3693 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
3694 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
3695 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
3696 trans_ddi_func_ctl_value);
3697 }
3698
3699 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
3700 const struct intel_crtc_state *crtc_state)
3701 {
3702 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3703 struct drm_dp_phy_test_params *data =
3704 &intel_dp->compliance.test_data.phytest;
3705 u8 link_status[DP_LINK_STATUS_SIZE];
3706
3707 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
3708 link_status) < 0) {
3709 drm_dbg_kms(&i915->drm, "failed to get link status\n");
3710 return;
3711 }
3712
3713 /* retrieve vswing & pre-emphasis setting */
3714 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
3715 link_status);
3716
3717 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
3718
3719 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
3720
3721 intel_dp_phy_pattern_update(intel_dp, crtc_state);
3722
3723 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
3724
3725 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3726 intel_dp->train_set, crtc_state->lane_count);
3727
3728 drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
3729 link_status[DP_DPCD_REV]);
3730 }
3731
3732 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3733 {
3734 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3735 struct drm_dp_phy_test_params *data =
3736 &intel_dp->compliance.test_data.phytest;
3737
3738 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
3739 drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
3740 return DP_TEST_NAK;
3741 }
3742
3743 /* Set test active flag here so userspace doesn't interrupt things */
3744 intel_dp->compliance.test_active = true;
3745
3746 return DP_TEST_ACK;
3747 }
3748
3749 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3750 {
3751 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3752 u8 response = DP_TEST_NAK;
3753 u8 request = 0;
3754 int status;
3755
3756 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
3757 if (status <= 0) {
3758 drm_dbg_kms(&i915->drm,
3759 "Could not read test request from sink\n");
3760 goto update_status;
3761 }
3762
3763 switch (request) {
3764 case DP_TEST_LINK_TRAINING:
3765 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
3766 response = intel_dp_autotest_link_training(intel_dp);
3767 break;
3768 case DP_TEST_LINK_VIDEO_PATTERN:
3769 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
3770 response = intel_dp_autotest_video_pattern(intel_dp);
3771 break;
3772 case DP_TEST_LINK_EDID_READ:
3773 drm_dbg_kms(&i915->drm, "EDID test requested\n");
3774 response = intel_dp_autotest_edid(intel_dp);
3775 break;
3776 case DP_TEST_LINK_PHY_TEST_PATTERN:
3777 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
3778 response = intel_dp_autotest_phy_pattern(intel_dp);
3779 break;
3780 default:
3781 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
3782 request);
3783 break;
3784 }
3785
3786 if (response & DP_TEST_ACK)
3787 intel_dp->compliance.test_type = request;
3788
3789 update_status:
3790 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
3791 if (status <= 0)
3792 drm_dbg_kms(&i915->drm,
3793 "Could not write test response to sink\n");
3794 }
3795
3796 static bool intel_dp_link_ok(struct intel_dp *intel_dp,
3797 u8 link_status[DP_LINK_STATUS_SIZE])
3798 {
3799 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3800 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3801 bool uhbr = intel_dp->link_rate >= 1000000;
3802 bool ok;
3803
3804 if (uhbr)
3805 ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
3806 intel_dp->lane_count);
3807 else
3808 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
3809
3810 if (ok)
3811 return true;
3812
3813 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
3814 drm_dbg_kms(&i915->drm,
3815 "[ENCODER:%d:%s] %s link not ok, retraining\n",
3816 encoder->base.base.id, encoder->base.name,
3817 uhbr ? "128b/132b" : "8b/10b");
3818
3819 return false;
3820 }
3821
3822 static void
3823 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
3824 {
3825 bool handled = false;
3826
3827 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3828 if (handled)
3829 ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
3830
3831 if (esi[1] & DP_CP_IRQ) {
3832 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
3833 ack[1] |= DP_CP_IRQ;
3834 }
3835 }
3836
3837 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
3838 {
3839 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3840 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3841 u8 link_status[DP_LINK_STATUS_SIZE] = {};
3842 const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
3843
3844 if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
3845 esi_link_status_size) != esi_link_status_size) {
3846 drm_err(&i915->drm,
3847 "[ENCODER:%d:%s] Failed to read link status\n",
3848 encoder->base.base.id, encoder->base.name);
3849 return false;
3850 }
3851
3852 return intel_dp_link_ok(intel_dp, link_status);
3853 }
3854
3855 /**
3856 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
3857 * @intel_dp: Intel DP struct
3858 *
3859 * Read any pending MST interrupts, call MST core to handle these and ack the
3860 * interrupts. Check if the main and AUX link state is ok.
3861 *
3862 * Returns:
3863 * - %true if pending interrupts were serviced (or no interrupts were
3864 * pending) w/o detecting an error condition.
3865 * - %false if an error condition - like AUX failure or a loss of link - is
3866 * detected, which needs servicing from the hotplug work.
3867 */
3868 static bool
3869 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3870 {
3871 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3872 bool link_ok = true;
3873
3874 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
3875
3876 for (;;) {
3877 u8 esi[4] = {};
3878 u8 ack[4] = {};
3879
3880 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
3881 drm_dbg_kms(&i915->drm,
3882 "failed to get ESI - device may have failed\n");
3883 link_ok = false;
3884
3885 break;
3886 }
3887
3888 drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
3889
3890 if (intel_dp->active_mst_links > 0 && link_ok &&
3891 esi[3] & LINK_STATUS_CHANGED) {
3892 if (!intel_dp_mst_link_status(intel_dp))
3893 link_ok = false;
3894 ack[3] |= LINK_STATUS_CHANGED;
3895 }
3896
3897 intel_dp_mst_hpd_irq(intel_dp, esi, ack);
3898
3899 if (!memchr_inv(ack, 0, sizeof(ack)))
3900 break;
3901
3902 if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
3903 drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
3904 }
3905
3906 return link_ok;
3907 }
3908
3909 static void
3910 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
3911 {
3912 bool is_active;
3913 u8 buf = 0;
3914
3915 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
3916 if (intel_dp->frl.is_trained && !is_active) {
3917 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
3918 return;
3919
3920 buf &= ~DP_PCON_ENABLE_HDMI_LINK;
3921 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
3922 return;
3923
3924 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
3925
3926 /* Restart FRL training or fall back to TMDS mode */
3927 intel_dp_check_frl_training(intel_dp);
3928 }
3929 }
3930
3931 static bool
3932 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
3933 {
3934 u8 link_status[DP_LINK_STATUS_SIZE];
3935
3936 if (!intel_dp->link_trained)
3937 return false;
3938
3939 /*
3940 * While PSR source HW is enabled, it will control main-link sending
3941 * frames, enabling and disabling it so trying to do a retrain will fail
3942 * as the link would or not be on or it could mix training patterns
3943 * and frame data at the same time causing retrain to fail.
3944 * Also when exiting PSR, HW will retrain the link anyways fixing
3945 * any link status error.
3946 */
3947 if (intel_psr_enabled(intel_dp))
3948 return false;
3949
3950 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
3951 link_status) < 0)
3952 return false;
3953
3954 /*
3955 * Validate the cached values of intel_dp->link_rate and
3956 * intel_dp->lane_count before attempting to retrain.
3957 *
3958 * FIXME would be nice to user the crtc state here, but since
3959 * we need to call this from the short HPD handler that seems
3960 * a bit hard.
3961 */
3962 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
3963 intel_dp->lane_count))
3964 return false;
3965
3966 /* Retrain if link not ok */
3967 return !intel_dp_link_ok(intel_dp, link_status);
3968 }
3969
3970 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
3971 const struct drm_connector_state *conn_state)
3972 {
3973 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3974 struct intel_encoder *encoder;
3975 enum pipe pipe;
3976
3977 if (!conn_state->best_encoder)
3978 return false;
3979
3980 /* SST */
3981 encoder = &dp_to_dig_port(intel_dp)->base;
3982 if (conn_state->best_encoder == &encoder->base)
3983 return true;
3984
3985 /* MST */
3986 for_each_pipe(i915, pipe) {
3987 encoder = &intel_dp->mst_encoders[pipe]->base;
3988 if (conn_state->best_encoder == &encoder->base)
3989 return true;
3990 }
3991
3992 return false;
3993 }
3994
3995 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
3996 struct drm_modeset_acquire_ctx *ctx,
3997 u8 *pipe_mask)
3998 {
3999 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4000 struct drm_connector_list_iter conn_iter;
4001 struct intel_connector *connector;
4002 int ret = 0;
4003
4004 *pipe_mask = 0;
4005
4006 if (!intel_dp_needs_link_retrain(intel_dp))
4007 return 0;
4008
4009 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
4010 for_each_intel_connector_iter(connector, &conn_iter) {
4011 struct drm_connector_state *conn_state =
4012 connector->base.state;
4013 struct intel_crtc_state *crtc_state;
4014 struct intel_crtc *crtc;
4015
4016 if (!intel_dp_has_connector(intel_dp, conn_state))
4017 continue;
4018
4019 crtc = to_intel_crtc(conn_state->crtc);
4020 if (!crtc)
4021 continue;
4022
4023 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4024 if (ret)
4025 break;
4026
4027 crtc_state = to_intel_crtc_state(crtc->base.state);
4028
4029 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
4030
4031 if (!crtc_state->hw.active)
4032 continue;
4033
4034 if (conn_state->commit &&
4035 !try_wait_for_completion(&conn_state->commit->hw_done))
4036 continue;
4037
4038 *pipe_mask |= BIT(crtc->pipe);
4039 }
4040 drm_connector_list_iter_end(&conn_iter);
4041
4042 if (!intel_dp_needs_link_retrain(intel_dp))
4043 *pipe_mask = 0;
4044
4045 return ret;
4046 }
4047
4048 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
4049 {
4050 struct intel_connector *connector = intel_dp->attached_connector;
4051
4052 return connector->base.status == connector_status_connected ||
4053 intel_dp->is_mst;
4054 }
4055
4056 int intel_dp_retrain_link(struct intel_encoder *encoder,
4057 struct drm_modeset_acquire_ctx *ctx)
4058 {
4059 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4060 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4061 struct intel_crtc *crtc;
4062 u8 pipe_mask;
4063 int ret;
4064
4065 if (!intel_dp_is_connected(intel_dp))
4066 return 0;
4067
4068 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4069 ctx);
4070 if (ret)
4071 return ret;
4072
4073 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask);
4074 if (ret)
4075 return ret;
4076
4077 if (pipe_mask == 0)
4078 return 0;
4079
4080 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
4081 encoder->base.base.id, encoder->base.name);
4082
4083 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
4084 const struct intel_crtc_state *crtc_state =
4085 to_intel_crtc_state(crtc->base.state);
4086
4087 /* Suppress underruns caused by re-training */
4088 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4089 if (crtc_state->has_pch_encoder)
4090 intel_set_pch_fifo_underrun_reporting(dev_priv,
4091 intel_crtc_pch_transcoder(crtc), false);
4092 }
4093
4094 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
4095 const struct intel_crtc_state *crtc_state =
4096 to_intel_crtc_state(crtc->base.state);
4097
4098 /* retrain on the MST master transcoder */
4099 if (DISPLAY_VER(dev_priv) >= 12 &&
4100 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
4101 !intel_dp_mst_is_master_trans(crtc_state))
4102 continue;
4103
4104 intel_dp_check_frl_training(intel_dp);
4105 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
4106 intel_dp_start_link_train(intel_dp, crtc_state);
4107 intel_dp_stop_link_train(intel_dp, crtc_state);
4108 break;
4109 }
4110
4111 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
4112 const struct intel_crtc_state *crtc_state =
4113 to_intel_crtc_state(crtc->base.state);
4114
4115 /* Keep underrun reporting disabled until things are stable */
4116 intel_crtc_wait_for_next_vblank(crtc);
4117
4118 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4119 if (crtc_state->has_pch_encoder)
4120 intel_set_pch_fifo_underrun_reporting(dev_priv,
4121 intel_crtc_pch_transcoder(crtc), true);
4122 }
4123
4124 return 0;
4125 }
4126
4127 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
4128 struct drm_modeset_acquire_ctx *ctx,
4129 u8 *pipe_mask)
4130 {
4131 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4132 struct drm_connector_list_iter conn_iter;
4133 struct intel_connector *connector;
4134 int ret = 0;
4135
4136 *pipe_mask = 0;
4137
4138 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
4139 for_each_intel_connector_iter(connector, &conn_iter) {
4140 struct drm_connector_state *conn_state =
4141 connector->base.state;
4142 struct intel_crtc_state *crtc_state;
4143 struct intel_crtc *crtc;
4144
4145 if (!intel_dp_has_connector(intel_dp, conn_state))
4146 continue;
4147
4148 crtc = to_intel_crtc(conn_state->crtc);
4149 if (!crtc)
4150 continue;
4151
4152 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4153 if (ret)
4154 break;
4155
4156 crtc_state = to_intel_crtc_state(crtc->base.state);
4157
4158 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
4159
4160 if (!crtc_state->hw.active)
4161 continue;
4162
4163 if (conn_state->commit &&
4164 !try_wait_for_completion(&conn_state->commit->hw_done))
4165 continue;
4166
4167 *pipe_mask |= BIT(crtc->pipe);
4168 }
4169 drm_connector_list_iter_end(&conn_iter);
4170
4171 return ret;
4172 }
4173
4174 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
4175 struct drm_modeset_acquire_ctx *ctx)
4176 {
4177 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4178 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4179 struct intel_crtc *crtc;
4180 u8 pipe_mask;
4181 int ret;
4182
4183 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4184 ctx);
4185 if (ret)
4186 return ret;
4187
4188 ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
4189 if (ret)
4190 return ret;
4191
4192 if (pipe_mask == 0)
4193 return 0;
4194
4195 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
4196 encoder->base.base.id, encoder->base.name);
4197
4198 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
4199 const struct intel_crtc_state *crtc_state =
4200 to_intel_crtc_state(crtc->base.state);
4201
4202 /* test on the MST master transcoder */
4203 if (DISPLAY_VER(dev_priv) >= 12 &&
4204 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
4205 !intel_dp_mst_is_master_trans(crtc_state))
4206 continue;
4207
4208 intel_dp_process_phy_request(intel_dp, crtc_state);
4209 break;
4210 }
4211
4212 return 0;
4213 }
4214
4215 void intel_dp_phy_test(struct intel_encoder *encoder)
4216 {
4217 struct drm_modeset_acquire_ctx ctx;
4218 int ret;
4219
4220 drm_modeset_acquire_init(&ctx, 0);
4221
4222 for (;;) {
4223 ret = intel_dp_do_phy_test(encoder, &ctx);
4224
4225 if (ret == -EDEADLK) {
4226 drm_modeset_backoff(&ctx);
4227 continue;
4228 }
4229
4230 break;
4231 }
4232
4233 drm_modeset_drop_locks(&ctx);
4234 drm_modeset_acquire_fini(&ctx);
4235 drm_WARN(encoder->base.dev, ret,
4236 "Acquiring modeset locks failed with %i\n", ret);
4237 }
4238
4239 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
4240 {
4241 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4242 u8 val;
4243
4244 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4245 return;
4246
4247 if (drm_dp_dpcd_readb(&intel_dp->aux,
4248 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4249 return;
4250
4251 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4252
4253 if (val & DP_AUTOMATED_TEST_REQUEST)
4254 intel_dp_handle_test_request(intel_dp);
4255
4256 if (val & DP_CP_IRQ)
4257 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4258
4259 if (val & DP_SINK_SPECIFIC_IRQ)
4260 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
4261 }
4262
4263 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
4264 {
4265 u8 val;
4266
4267 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4268 return;
4269
4270 if (drm_dp_dpcd_readb(&intel_dp->aux,
4271 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
4272 return;
4273
4274 if (drm_dp_dpcd_writeb(&intel_dp->aux,
4275 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
4276 return;
4277
4278 if (val & HDMI_LINK_STATUS_CHANGED)
4279 intel_dp_handle_hdmi_link_status_change(intel_dp);
4280 }
4281
4282 /*
4283 * According to DP spec
4284 * 5.1.2:
4285 * 1. Read DPCD
4286 * 2. Configure link according to Receiver Capabilities
4287 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4288 * 4. Check link status on receipt of hot-plug interrupt
4289 *
4290 * intel_dp_short_pulse - handles short pulse interrupts
4291 * when full detection is not required.
4292 * Returns %true if short pulse is handled and full detection
4293 * is NOT required and %false otherwise.
4294 */
4295 static bool
4296 intel_dp_short_pulse(struct intel_dp *intel_dp)
4297 {
4298 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4299 u8 old_sink_count = intel_dp->sink_count;
4300 bool ret;
4301
4302 /*
4303 * Clearing compliance test variables to allow capturing
4304 * of values for next automated test request.
4305 */
4306 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4307
4308 /*
4309 * Now read the DPCD to see if it's actually running
4310 * If the current value of sink count doesn't match with
4311 * the value that was stored earlier or dpcd read failed
4312 * we need to do full detection
4313 */
4314 ret = intel_dp_get_dpcd(intel_dp);
4315
4316 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4317 /* No need to proceed if we are going to do full detect */
4318 return false;
4319 }
4320
4321 intel_dp_check_device_service_irq(intel_dp);
4322 intel_dp_check_link_service_irq(intel_dp);
4323
4324 /* Handle CEC interrupts, if any */
4325 drm_dp_cec_irq(&intel_dp->aux);
4326
4327 /* defer to the hotplug work for link retraining if needed */
4328 if (intel_dp_needs_link_retrain(intel_dp))
4329 return false;
4330
4331 intel_psr_short_pulse(intel_dp);
4332
4333 switch (intel_dp->compliance.test_type) {
4334 case DP_TEST_LINK_TRAINING:
4335 drm_dbg_kms(&dev_priv->drm,
4336 "Link Training Compliance Test requested\n");
4337 /* Send a Hotplug Uevent to userspace to start modeset */
4338 drm_kms_helper_hotplug_event(&dev_priv->drm);
4339 break;
4340 case DP_TEST_LINK_PHY_TEST_PATTERN:
4341 drm_dbg_kms(&dev_priv->drm,
4342 "PHY test pattern Compliance Test requested\n");
4343 /*
4344 * Schedule long hpd to do the test
4345 *
4346 * FIXME get rid of the ad-hoc phy test modeset code
4347 * and properly incorporate it into the normal modeset.
4348 */
4349 return false;
4350 }
4351
4352 return true;
4353 }
4354
4355 /* XXX this is probably wrong for multiple downstream ports */
4356 static enum drm_connector_status
4357 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4358 {
4359 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4360 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4361 u8 *dpcd = intel_dp->dpcd;
4362 u8 type;
4363
4364 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
4365 return connector_status_connected;
4366
4367 lspcon_resume(dig_port);
4368
4369 if (!intel_dp_get_dpcd(intel_dp))
4370 return connector_status_disconnected;
4371
4372 /* if there's no downstream port, we're done */
4373 if (!drm_dp_is_branch(dpcd))
4374 return connector_status_connected;
4375
4376 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4377 if (intel_dp_has_sink_count(intel_dp) &&
4378 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4379 return intel_dp->sink_count ?
4380 connector_status_connected : connector_status_disconnected;
4381 }
4382
4383 if (intel_dp_can_mst(intel_dp))
4384 return connector_status_connected;
4385
4386 /* If no HPD, poke DDC gently */
4387 if (drm_probe_ddc(&intel_dp->aux.ddc))
4388 return connector_status_connected;
4389
4390 /* Well we tried, say unknown for unreliable port types */
4391 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4392 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4393 if (type == DP_DS_PORT_TYPE_VGA ||
4394 type == DP_DS_PORT_TYPE_NON_EDID)
4395 return connector_status_unknown;
4396 } else {
4397 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4398 DP_DWN_STRM_PORT_TYPE_MASK;
4399 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4400 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4401 return connector_status_unknown;
4402 }
4403
4404 /* Anything else is out of spec, warn and ignore */
4405 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
4406 return connector_status_disconnected;
4407 }
4408
4409 static enum drm_connector_status
4410 edp_detect(struct intel_dp *intel_dp)
4411 {
4412 return connector_status_connected;
4413 }
4414
4415 /*
4416 * intel_digital_port_connected - is the specified port connected?
4417 * @encoder: intel_encoder
4418 *
4419 * In cases where there's a connector physically connected but it can't be used
4420 * by our hardware we also return false, since the rest of the driver should
4421 * pretty much treat the port as disconnected. This is relevant for type-C
4422 * (starting on ICL) where there's ownership involved.
4423 *
4424 * Return %true if port is connected, %false otherwise.
4425 */
4426 bool intel_digital_port_connected(struct intel_encoder *encoder)
4427 {
4428 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4429 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4430 bool is_connected = false;
4431 intel_wakeref_t wakeref;
4432
4433 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
4434 is_connected = dig_port->connected(encoder);
4435
4436 return is_connected;
4437 }
4438
4439 static struct edid *
4440 intel_dp_get_edid(struct intel_dp *intel_dp)
4441 {
4442 struct intel_connector *intel_connector = intel_dp->attached_connector;
4443
4444 /* use cached edid if we have one */
4445 if (intel_connector->edid) {
4446 /* invalid edid */
4447 if (IS_ERR(intel_connector->edid))
4448 return NULL;
4449
4450 return drm_edid_duplicate(intel_connector->edid);
4451 } else
4452 return drm_get_edid(&intel_connector->base,
4453 &intel_dp->aux.ddc);
4454 }
4455
4456 static void
4457 intel_dp_update_dfp(struct intel_dp *intel_dp,
4458 const struct edid *edid)
4459 {
4460 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4461 struct intel_connector *connector = intel_dp->attached_connector;
4462
4463 intel_dp->dfp.max_bpc =
4464 drm_dp_downstream_max_bpc(intel_dp->dpcd,
4465 intel_dp->downstream_ports, edid);
4466
4467 intel_dp->dfp.max_dotclock =
4468 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
4469 intel_dp->downstream_ports);
4470
4471 intel_dp->dfp.min_tmds_clock =
4472 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
4473 intel_dp->downstream_ports,
4474 edid);
4475 intel_dp->dfp.max_tmds_clock =
4476 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
4477 intel_dp->downstream_ports,
4478 edid);
4479
4480 intel_dp->dfp.pcon_max_frl_bw =
4481 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
4482 intel_dp->downstream_ports);
4483
4484 drm_dbg_kms(&i915->drm,
4485 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
4486 connector->base.base.id, connector->base.name,
4487 intel_dp->dfp.max_bpc,
4488 intel_dp->dfp.max_dotclock,
4489 intel_dp->dfp.min_tmds_clock,
4490 intel_dp->dfp.max_tmds_clock,
4491 intel_dp->dfp.pcon_max_frl_bw);
4492
4493 intel_dp_get_pcon_dsc_cap(intel_dp);
4494 }
4495
4496 static void
4497 intel_dp_update_420(struct intel_dp *intel_dp)
4498 {
4499 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4500 struct intel_connector *connector = intel_dp->attached_connector;
4501 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
4502
4503 /* No YCbCr output support on gmch platforms */
4504 if (HAS_GMCH(i915))
4505 return;
4506
4507 /*
4508 * ILK doesn't seem capable of DP YCbCr output. The
4509 * displayed image is severly corrupted. SNB+ is fine.
4510 */
4511 if (IS_IRONLAKE(i915))
4512 return;
4513
4514 is_branch = drm_dp_is_branch(intel_dp->dpcd);
4515 ycbcr_420_passthrough =
4516 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
4517 intel_dp->downstream_ports);
4518 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
4519 ycbcr_444_to_420 =
4520 dp_to_dig_port(intel_dp)->lspcon.active ||
4521 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
4522 intel_dp->downstream_ports);
4523 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4524 intel_dp->downstream_ports,
4525 DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
4526
4527 if (DISPLAY_VER(i915) >= 11) {
4528 /* Let PCON convert from RGB->YCbCr if possible */
4529 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
4530 intel_dp->dfp.rgb_to_ycbcr = true;
4531 intel_dp->dfp.ycbcr_444_to_420 = true;
4532 connector->base.ycbcr_420_allowed = true;
4533 } else {
4534 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
4535 intel_dp->dfp.ycbcr_444_to_420 =
4536 ycbcr_444_to_420 && !ycbcr_420_passthrough;
4537
4538 connector->base.ycbcr_420_allowed =
4539 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
4540 }
4541 } else {
4542 /* 4:4:4->4:2:0 conversion is the only way */
4543 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
4544
4545 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
4546 }
4547
4548 drm_dbg_kms(&i915->drm,
4549 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
4550 connector->base.base.id, connector->base.name,
4551 str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
4552 str_yes_no(connector->base.ycbcr_420_allowed),
4553 str_yes_no(intel_dp->dfp.ycbcr_444_to_420));
4554 }
4555
4556 static void
4557 intel_dp_set_edid(struct intel_dp *intel_dp)
4558 {
4559 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4560 struct intel_connector *connector = intel_dp->attached_connector;
4561 struct edid *edid;
4562 bool vrr_capable;
4563
4564 intel_dp_unset_edid(intel_dp);
4565 edid = intel_dp_get_edid(intel_dp);
4566 connector->detect_edid = edid;
4567
4568 vrr_capable = intel_vrr_is_capable(connector);
4569 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
4570 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
4571 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
4572
4573 intel_dp_update_dfp(intel_dp, edid);
4574 intel_dp_update_420(intel_dp);
4575
4576 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
4577 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
4578 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4579 }
4580
4581 drm_dp_cec_set_edid(&intel_dp->aux, edid);
4582 }
4583
4584 static void
4585 intel_dp_unset_edid(struct intel_dp *intel_dp)
4586 {
4587 struct intel_connector *connector = intel_dp->attached_connector;
4588
4589 drm_dp_cec_unset_edid(&intel_dp->aux);
4590 kfree(connector->detect_edid);
4591 connector->detect_edid = NULL;
4592
4593 intel_dp->has_hdmi_sink = false;
4594 intel_dp->has_audio = false;
4595
4596 intel_dp->dfp.max_bpc = 0;
4597 intel_dp->dfp.max_dotclock = 0;
4598 intel_dp->dfp.min_tmds_clock = 0;
4599 intel_dp->dfp.max_tmds_clock = 0;
4600
4601 intel_dp->dfp.pcon_max_frl_bw = 0;
4602
4603 intel_dp->dfp.ycbcr_444_to_420 = false;
4604 connector->base.ycbcr_420_allowed = false;
4605
4606 drm_connector_set_vrr_capable_property(&connector->base,
4607 false);
4608 }
4609
4610 static int
4611 intel_dp_detect(struct drm_connector *connector,
4612 struct drm_modeset_acquire_ctx *ctx,
4613 bool force)
4614 {
4615 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4616 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4617 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4618 struct intel_encoder *encoder = &dig_port->base;
4619 enum drm_connector_status status;
4620
4621 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
4622 connector->base.id, connector->name);
4623 drm_WARN_ON(&dev_priv->drm,
4624 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4625
4626 if (!INTEL_DISPLAY_ENABLED(dev_priv))
4627 return connector_status_disconnected;
4628
4629 /* Can't disconnect eDP */
4630 if (intel_dp_is_edp(intel_dp))
4631 status = edp_detect(intel_dp);
4632 else if (intel_digital_port_connected(encoder))
4633 status = intel_dp_detect_dpcd(intel_dp);
4634 else
4635 status = connector_status_disconnected;
4636
4637 if (status == connector_status_disconnected) {
4638 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4639 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4640
4641 if (intel_dp->is_mst) {
4642 drm_dbg_kms(&dev_priv->drm,
4643 "MST device may have disappeared %d vs %d\n",
4644 intel_dp->is_mst,
4645 intel_dp->mst_mgr.mst_state);
4646 intel_dp->is_mst = false;
4647 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4648 intel_dp->is_mst);
4649 }
4650
4651 goto out;
4652 }
4653
4654 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
4655 if (DISPLAY_VER(dev_priv) >= 11)
4656 intel_dp_get_dsc_sink_cap(intel_dp);
4657
4658 intel_dp_configure_mst(intel_dp);
4659
4660 /*
4661 * TODO: Reset link params when switching to MST mode, until MST
4662 * supports link training fallback params.
4663 */
4664 if (intel_dp->reset_link_params || intel_dp->is_mst) {
4665 intel_dp_reset_max_link_params(intel_dp);
4666 intel_dp->reset_link_params = false;
4667 }
4668
4669 intel_dp_print_rates(intel_dp);
4670
4671 if (intel_dp->is_mst) {
4672 /*
4673 * If we are in MST mode then this connector
4674 * won't appear connected or have anything
4675 * with EDID on it
4676 */
4677 status = connector_status_disconnected;
4678 goto out;
4679 }
4680
4681 /*
4682 * Some external monitors do not signal loss of link synchronization
4683 * with an IRQ_HPD, so force a link status check.
4684 */
4685 if (!intel_dp_is_edp(intel_dp)) {
4686 int ret;
4687
4688 ret = intel_dp_retrain_link(encoder, ctx);
4689 if (ret)
4690 return ret;
4691 }
4692
4693 /*
4694 * Clearing NACK and defer counts to get their exact values
4695 * while reading EDID which are required by Compliance tests
4696 * 4.2.2.4 and 4.2.2.5
4697 */
4698 intel_dp->aux.i2c_nack_count = 0;
4699 intel_dp->aux.i2c_defer_count = 0;
4700
4701 intel_dp_set_edid(intel_dp);
4702 if (intel_dp_is_edp(intel_dp) ||
4703 to_intel_connector(connector)->detect_edid)
4704 status = connector_status_connected;
4705
4706 intel_dp_check_device_service_irq(intel_dp);
4707
4708 out:
4709 if (status != connector_status_connected && !intel_dp->is_mst)
4710 intel_dp_unset_edid(intel_dp);
4711
4712 /*
4713 * Make sure the refs for power wells enabled during detect are
4714 * dropped to avoid a new detect cycle triggered by HPD polling.
4715 */
4716 intel_display_power_flush_work(dev_priv);
4717
4718 if (!intel_dp_is_edp(intel_dp))
4719 drm_dp_set_subconnector_property(connector,
4720 status,
4721 intel_dp->dpcd,
4722 intel_dp->downstream_ports);
4723 return status;
4724 }
4725
4726 static void
4727 intel_dp_force(struct drm_connector *connector)
4728 {
4729 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4730 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4731 struct intel_encoder *intel_encoder = &dig_port->base;
4732 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4733 enum intel_display_power_domain aux_domain =
4734 intel_aux_power_domain(dig_port);
4735 intel_wakeref_t wakeref;
4736
4737 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
4738 connector->base.id, connector->name);
4739 intel_dp_unset_edid(intel_dp);
4740
4741 if (connector->status != connector_status_connected)
4742 return;
4743
4744 wakeref = intel_display_power_get(dev_priv, aux_domain);
4745
4746 intel_dp_set_edid(intel_dp);
4747
4748 intel_display_power_put(dev_priv, aux_domain, wakeref);
4749 }
4750
4751 static int intel_dp_get_modes(struct drm_connector *connector)
4752 {
4753 struct intel_connector *intel_connector = to_intel_connector(connector);
4754 struct edid *edid;
4755 int num_modes = 0;
4756
4757 edid = intel_connector->detect_edid;
4758 if (edid)
4759 num_modes = intel_connector_update_modes(connector, edid);
4760
4761 /* Also add fixed mode, which may or may not be present in EDID */
4762 if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
4763 num_modes += intel_panel_get_modes(intel_connector);
4764
4765 if (num_modes)
4766 return num_modes;
4767
4768 if (!edid) {
4769 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
4770 struct drm_display_mode *mode;
4771
4772 mode = drm_dp_downstream_mode(connector->dev,
4773 intel_dp->dpcd,
4774 intel_dp->downstream_ports);
4775 if (mode) {
4776 drm_mode_probed_add(connector, mode);
4777 num_modes++;
4778 }
4779 }
4780
4781 return num_modes;
4782 }
4783
4784 static int
4785 intel_dp_connector_register(struct drm_connector *connector)
4786 {
4787 struct drm_i915_private *i915 = to_i915(connector->dev);
4788 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4789 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4790 struct intel_lspcon *lspcon = &dig_port->lspcon;
4791 int ret;
4792
4793 ret = intel_connector_register(connector);
4794 if (ret)
4795 return ret;
4796
4797 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
4798 intel_dp->aux.name, connector->kdev->kobj.name);
4799
4800 intel_dp->aux.dev = connector->kdev;
4801 ret = drm_dp_aux_register(&intel_dp->aux);
4802 if (!ret)
4803 drm_dp_cec_register_connector(&intel_dp->aux, connector);
4804
4805 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
4806 return ret;
4807
4808 /*
4809 * ToDo: Clean this up to handle lspcon init and resume more
4810 * efficiently and streamlined.
4811 */
4812 if (lspcon_init(dig_port)) {
4813 lspcon_detect_hdr_capability(lspcon);
4814 if (lspcon->hdr_supported)
4815 drm_connector_attach_hdr_output_metadata_property(connector);
4816 }
4817
4818 return ret;
4819 }
4820
4821 static void
4822 intel_dp_connector_unregister(struct drm_connector *connector)
4823 {
4824 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4825
4826 drm_dp_cec_unregister_connector(&intel_dp->aux);
4827 drm_dp_aux_unregister(&intel_dp->aux);
4828 intel_connector_unregister(connector);
4829 }
4830
4831 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
4832 {
4833 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
4834 struct intel_dp *intel_dp = &dig_port->dp;
4835
4836 intel_dp_mst_encoder_cleanup(dig_port);
4837
4838 intel_pps_vdd_off_sync(intel_dp);
4839
4840 intel_dp_aux_fini(intel_dp);
4841 }
4842
4843 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4844 {
4845 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
4846
4847 intel_pps_vdd_off_sync(intel_dp);
4848 }
4849
4850 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
4851 {
4852 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
4853
4854 intel_pps_wait_power_cycle(intel_dp);
4855 }
4856
4857 static int intel_modeset_tile_group(struct intel_atomic_state *state,
4858 int tile_group_id)
4859 {
4860 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4861 struct drm_connector_list_iter conn_iter;
4862 struct drm_connector *connector;
4863 int ret = 0;
4864
4865 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
4866 drm_for_each_connector_iter(connector, &conn_iter) {
4867 struct drm_connector_state *conn_state;
4868 struct intel_crtc_state *crtc_state;
4869 struct intel_crtc *crtc;
4870
4871 if (!connector->has_tile ||
4872 connector->tile_group->id != tile_group_id)
4873 continue;
4874
4875 conn_state = drm_atomic_get_connector_state(&state->base,
4876 connector);
4877 if (IS_ERR(conn_state)) {
4878 ret = PTR_ERR(conn_state);
4879 break;
4880 }
4881
4882 crtc = to_intel_crtc(conn_state->crtc);
4883
4884 if (!crtc)
4885 continue;
4886
4887 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
4888 crtc_state->uapi.mode_changed = true;
4889
4890 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
4891 if (ret)
4892 break;
4893 }
4894 drm_connector_list_iter_end(&conn_iter);
4895
4896 return ret;
4897 }
4898
4899 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
4900 {
4901 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4902 struct intel_crtc *crtc;
4903
4904 if (transcoders == 0)
4905 return 0;
4906
4907 for_each_intel_crtc(&dev_priv->drm, crtc) {
4908 struct intel_crtc_state *crtc_state;
4909 int ret;
4910
4911 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
4912 if (IS_ERR(crtc_state))
4913 return PTR_ERR(crtc_state);
4914
4915 if (!crtc_state->hw.enable)
4916 continue;
4917
4918 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
4919 continue;
4920
4921 crtc_state->uapi.mode_changed = true;
4922
4923 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
4924 if (ret)
4925 return ret;
4926
4927 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
4928 if (ret)
4929 return ret;
4930
4931 transcoders &= ~BIT(crtc_state->cpu_transcoder);
4932 }
4933
4934 drm_WARN_ON(&dev_priv->drm, transcoders != 0);
4935
4936 return 0;
4937 }
4938
4939 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
4940 struct drm_connector *connector)
4941 {
4942 const struct drm_connector_state *old_conn_state =
4943 drm_atomic_get_old_connector_state(&state->base, connector);
4944 const struct intel_crtc_state *old_crtc_state;
4945 struct intel_crtc *crtc;
4946 u8 transcoders;
4947
4948 crtc = to_intel_crtc(old_conn_state->crtc);
4949 if (!crtc)
4950 return 0;
4951
4952 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
4953
4954 if (!old_crtc_state->hw.active)
4955 return 0;
4956
4957 transcoders = old_crtc_state->sync_mode_slaves_mask;
4958 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
4959 transcoders |= BIT(old_crtc_state->master_transcoder);
4960
4961 return intel_modeset_affected_transcoders(state,
4962 transcoders);
4963 }
4964
4965 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
4966 struct drm_atomic_state *_state)
4967 {
4968 struct drm_i915_private *dev_priv = to_i915(conn->dev);
4969 struct intel_atomic_state *state = to_intel_atomic_state(_state);
4970 int ret;
4971
4972 ret = intel_digital_connector_atomic_check(conn, &state->base);
4973 if (ret)
4974 return ret;
4975
4976 /*
4977 * We don't enable port sync on BDW due to missing w/as and
4978 * due to not having adjusted the modeset sequence appropriately.
4979 */
4980 if (DISPLAY_VER(dev_priv) < 9)
4981 return 0;
4982
4983 if (!intel_connector_needs_modeset(state, conn))
4984 return 0;
4985
4986 if (conn->has_tile) {
4987 ret = intel_modeset_tile_group(state, conn->tile_group->id);
4988 if (ret)
4989 return ret;
4990 }
4991
4992 return intel_modeset_synced_crtcs(state, conn);
4993 }
4994
4995 static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
4996 {
4997 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
4998 struct drm_i915_private *i915 = to_i915(connector->dev);
4999
5000 spin_lock_irq(&i915->irq_lock);
5001 i915->hotplug.event_bits |= BIT(encoder->hpd_pin);
5002 spin_unlock_irq(&i915->irq_lock);
5003 queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0);
5004 }
5005
5006 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5007 .force = intel_dp_force,
5008 .fill_modes = drm_helper_probe_single_connector_modes,
5009 .atomic_get_property = intel_digital_connector_atomic_get_property,
5010 .atomic_set_property = intel_digital_connector_atomic_set_property,
5011 .late_register = intel_dp_connector_register,
5012 .early_unregister = intel_dp_connector_unregister,
5013 .destroy = intel_connector_destroy,
5014 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5015 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5016 .oob_hotplug_event = intel_dp_oob_hotplug_event,
5017 };
5018
5019 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5020 .detect_ctx = intel_dp_detect,
5021 .get_modes = intel_dp_get_modes,
5022 .mode_valid = intel_dp_mode_valid,
5023 .atomic_check = intel_dp_connector_atomic_check,
5024 };
5025
5026 enum irqreturn
5027 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
5028 {
5029 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
5030 struct intel_dp *intel_dp = &dig_port->dp;
5031
5032 if (dig_port->base.type == INTEL_OUTPUT_EDP &&
5033 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
5034 /*
5035 * vdd off can generate a long/short pulse on eDP which
5036 * would require vdd on to handle it, and thus we
5037 * would end up in an endless cycle of
5038 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
5039 */
5040 drm_dbg_kms(&i915->drm,
5041 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
5042 long_hpd ? "long" : "short",
5043 dig_port->base.base.base.id,
5044 dig_port->base.base.name);
5045 return IRQ_HANDLED;
5046 }
5047
5048 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
5049 dig_port->base.base.base.id,
5050 dig_port->base.base.name,
5051 long_hpd ? "long" : "short");
5052
5053 if (long_hpd) {
5054 intel_dp->reset_link_params = true;
5055 return IRQ_NONE;
5056 }
5057
5058 if (intel_dp->is_mst) {
5059 if (!intel_dp_check_mst_status(intel_dp))
5060 return IRQ_NONE;
5061 } else if (!intel_dp_short_pulse(intel_dp)) {
5062 return IRQ_NONE;
5063 }
5064
5065 return IRQ_HANDLED;
5066 }
5067
5068 /* check the VBT to see whether the eDP is on another port */
5069 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
5070 {
5071 /*
5072 * eDP not supported on g4x. so bail out early just
5073 * for a bit extra safety in case the VBT is bonkers.
5074 */
5075 if (DISPLAY_VER(dev_priv) < 5)
5076 return false;
5077
5078 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
5079 return true;
5080
5081 return intel_bios_is_port_edp(dev_priv, port);
5082 }
5083
5084 static bool
5085 has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
5086 {
5087 if (intel_bios_is_lspcon_present(i915, port))
5088 return false;
5089
5090 if (DISPLAY_VER(i915) >= 11)
5091 return true;
5092
5093 if (port == PORT_A)
5094 return false;
5095
5096 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
5097 DISPLAY_VER(i915) >= 9)
5098 return true;
5099
5100 return false;
5101 }
5102
5103 static void
5104 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5105 {
5106 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5107 enum port port = dp_to_dig_port(intel_dp)->base.port;
5108
5109 if (!intel_dp_is_edp(intel_dp))
5110 drm_connector_attach_dp_subconnector_property(connector);
5111
5112 if (!IS_G4X(dev_priv) && port != PORT_A)
5113 intel_attach_force_audio_property(connector);
5114
5115 intel_attach_broadcast_rgb_property(connector);
5116 if (HAS_GMCH(dev_priv))
5117 drm_connector_attach_max_bpc_property(connector, 6, 10);
5118 else if (DISPLAY_VER(dev_priv) >= 5)
5119 drm_connector_attach_max_bpc_property(connector, 6, 12);
5120
5121 /* Register HDMI colorspace for case of lspcon */
5122 if (intel_bios_is_lspcon_present(dev_priv, port)) {
5123 drm_connector_attach_content_type_property(connector);
5124 intel_attach_hdmi_colorspace_property(connector);
5125 } else {
5126 intel_attach_dp_colorspace_property(connector);
5127 }
5128
5129 if (has_gamut_metadata_dip(dev_priv, port))
5130 drm_connector_attach_hdr_output_metadata_property(connector);
5131
5132 if (intel_dp_is_edp(intel_dp)) {
5133 u32 allowed_scalers;
5134
5135 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
5136 if (!HAS_GMCH(dev_priv))
5137 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
5138
5139 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
5140
5141 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5142
5143 }
5144
5145 if (HAS_VRR(dev_priv))
5146 drm_connector_attach_vrr_capable_property(connector);
5147 }
5148
5149 static void
5150 intel_edp_add_properties(struct intel_dp *intel_dp)
5151 {
5152 struct intel_connector *connector = intel_dp->attached_connector;
5153 struct drm_i915_private *i915 = to_i915(connector->base.dev);
5154 const struct drm_display_mode *fixed_mode =
5155 intel_panel_preferred_fixed_mode(connector);
5156
5157 if (!fixed_mode)
5158 return;
5159
5160 drm_connector_set_panel_orientation_with_quirk(&connector->base,
5161 i915->vbt.orientation,
5162 fixed_mode->hdisplay,
5163 fixed_mode->vdisplay);
5164 }
5165
5166 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5167 struct intel_connector *intel_connector)
5168 {
5169 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5170 struct drm_device *dev = &dev_priv->drm;
5171 struct drm_connector *connector = &intel_connector->base;
5172 struct drm_display_mode *fixed_mode;
5173 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
5174 bool has_dpcd;
5175 enum pipe pipe = INVALID_PIPE;
5176 struct edid *edid;
5177
5178 if (!intel_dp_is_edp(intel_dp))
5179 return true;
5180
5181 /*
5182 * On IBX/CPT we may get here with LVDS already registered. Since the
5183 * driver uses the only internal power sequencer available for both
5184 * eDP and LVDS bail out early in this case to prevent interfering
5185 * with an already powered-on LVDS power sequencer.
5186 */
5187 if (intel_get_lvds_encoder(dev_priv)) {
5188 drm_WARN_ON(dev,
5189 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5190 drm_info(&dev_priv->drm,
5191 "LVDS was detected, not registering eDP\n");
5192
5193 return false;
5194 }
5195
5196 intel_pps_init(intel_dp);
5197
5198 /* Cache DPCD and EDID for edp. */
5199 has_dpcd = intel_edp_init_dpcd(intel_dp);
5200
5201 if (!has_dpcd) {
5202 /* if this fails, presume the device is a ghost */
5203 drm_info(&dev_priv->drm,
5204 "failed to retrieve link info, disabling eDP\n");
5205 goto out_vdd_off;
5206 }
5207
5208 mutex_lock(&dev->mode_config.mutex);
5209 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5210 if (!edid) {
5211 /* Fallback to EDID from ACPI OpRegion, if any */
5212 edid = intel_opregion_get_edid(intel_connector);
5213 if (edid)
5214 drm_dbg_kms(&dev_priv->drm,
5215 "[CONNECTOR:%d:%s] Using OpRegion EDID\n",
5216 connector->base.id, connector->name);
5217 }
5218 if (edid) {
5219 if (drm_add_edid_modes(connector, edid)) {
5220 drm_connector_update_edid_property(connector, edid);
5221 } else {
5222 kfree(edid);
5223 edid = ERR_PTR(-EINVAL);
5224 }
5225 } else {
5226 edid = ERR_PTR(-ENOENT);
5227 }
5228 intel_connector->edid = edid;
5229
5230 intel_bios_init_panel(dev_priv, &intel_connector->panel,
5231 encoder->devdata, IS_ERR(edid) ? NULL : edid);
5232
5233 intel_panel_add_edid_fixed_modes(intel_connector,
5234 intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
5235 intel_vrr_is_capable(intel_connector));
5236
5237 /* MSO requires information from the EDID */
5238 intel_edp_mso_init(intel_dp);
5239
5240 /* multiply the mode clock and horizontal timings for MSO */
5241 list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
5242 intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
5243
5244 /* fallback to VBT if available for eDP */
5245 if (!intel_panel_preferred_fixed_mode(intel_connector))
5246 intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
5247
5248 mutex_unlock(&dev->mode_config.mutex);
5249
5250 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5251 /*
5252 * Figure out the current pipe for the initial backlight setup.
5253 * If the current pipe isn't valid, try the PPS pipe, and if that
5254 * fails just assume pipe A.
5255 */
5256 pipe = vlv_active_pipe(intel_dp);
5257
5258 if (pipe != PIPE_A && pipe != PIPE_B)
5259 pipe = intel_dp->pps.pps_pipe;
5260
5261 if (pipe != PIPE_A && pipe != PIPE_B)
5262 pipe = PIPE_A;
5263
5264 drm_dbg_kms(&dev_priv->drm,
5265 "using pipe %c for initial backlight setup\n",
5266 pipe_name(pipe));
5267 }
5268
5269 intel_panel_init(intel_connector);
5270
5271 intel_backlight_setup(intel_connector, pipe);
5272
5273 intel_edp_add_properties(intel_dp);
5274
5275 intel_pps_init_late(intel_dp);
5276
5277 return true;
5278
5279 out_vdd_off:
5280 intel_pps_vdd_off_sync(intel_dp);
5281
5282 return false;
5283 }
5284
5285 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
5286 {
5287 struct intel_connector *intel_connector;
5288 struct drm_connector *connector;
5289
5290 intel_connector = container_of(work, typeof(*intel_connector),
5291 modeset_retry_work);
5292 connector = &intel_connector->base;
5293 drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
5294 connector->name);
5295
5296 /* Grab the locks before changing connector property*/
5297 mutex_lock(&connector->dev->mode_config.mutex);
5298 /* Set connector link status to BAD and send a Uevent to notify
5299 * userspace to do a modeset.
5300 */
5301 drm_connector_set_link_status_property(connector,
5302 DRM_MODE_LINK_STATUS_BAD);
5303 mutex_unlock(&connector->dev->mode_config.mutex);
5304 /* Send Hotplug uevent so userspace can reprobe */
5305 drm_kms_helper_connector_hotplug_event(connector);
5306 }
5307
5308 bool
5309 intel_dp_init_connector(struct intel_digital_port *dig_port,
5310 struct intel_connector *intel_connector)
5311 {
5312 struct drm_connector *connector = &intel_connector->base;
5313 struct intel_dp *intel_dp = &dig_port->dp;
5314 struct intel_encoder *intel_encoder = &dig_port->base;
5315 struct drm_device *dev = intel_encoder->base.dev;
5316 struct drm_i915_private *dev_priv = to_i915(dev);
5317 enum port port = intel_encoder->port;
5318 enum phy phy = intel_port_to_phy(dev_priv, port);
5319 int type;
5320
5321 /* Initialize the work for modeset in case of link train failure */
5322 INIT_WORK(&intel_connector->modeset_retry_work,
5323 intel_dp_modeset_retry_work_fn);
5324
5325 if (drm_WARN(dev, dig_port->max_lanes < 1,
5326 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
5327 dig_port->max_lanes, intel_encoder->base.base.id,
5328 intel_encoder->base.name))
5329 return false;
5330
5331 intel_dp->reset_link_params = true;
5332 intel_dp->pps.pps_pipe = INVALID_PIPE;
5333 intel_dp->pps.active_pipe = INVALID_PIPE;
5334
5335 /* Preserve the current hw state. */
5336 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
5337 intel_dp->attached_connector = intel_connector;
5338
5339 if (intel_dp_is_port_edp(dev_priv, port)) {
5340 /*
5341 * Currently we don't support eDP on TypeC ports, although in
5342 * theory it could work on TypeC legacy ports.
5343 */
5344 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
5345 type = DRM_MODE_CONNECTOR_eDP;
5346 intel_encoder->type = INTEL_OUTPUT_EDP;
5347
5348 /* eDP only on port B and/or C on vlv/chv */
5349 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
5350 IS_CHERRYVIEW(dev_priv)) &&
5351 port != PORT_B && port != PORT_C))
5352 return false;
5353 } else {
5354 type = DRM_MODE_CONNECTOR_DisplayPort;
5355 }
5356
5357 intel_dp_set_default_sink_rates(intel_dp);
5358 intel_dp_set_default_max_sink_lane_count(intel_dp);
5359
5360 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5361 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
5362
5363 drm_dbg_kms(&dev_priv->drm,
5364 "Adding %s connector on [ENCODER:%d:%s]\n",
5365 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5366 intel_encoder->base.base.id, intel_encoder->base.name);
5367
5368 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5369 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5370
5371 if (!HAS_GMCH(dev_priv))
5372 connector->interlace_allowed = true;
5373 connector->doublescan_allowed = 0;
5374
5375 intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
5376
5377 intel_dp_aux_init(intel_dp);
5378
5379 intel_connector_attach_encoder(intel_connector, intel_encoder);
5380
5381 if (HAS_DDI(dev_priv))
5382 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5383 else
5384 intel_connector->get_hw_state = intel_connector_get_hw_state;
5385
5386 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5387 intel_dp_aux_fini(intel_dp);
5388 goto fail;
5389 }
5390
5391 intel_dp_set_source_rates(intel_dp);
5392 intel_dp_set_common_rates(intel_dp);
5393 intel_dp_reset_max_link_params(intel_dp);
5394
5395 /* init MST on ports that can support it */
5396 intel_dp_mst_encoder_init(dig_port,
5397 intel_connector->base.base.id);
5398
5399 intel_dp_add_properties(intel_dp, connector);
5400
5401 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
5402 int ret = intel_dp_hdcp_init(dig_port, intel_connector);
5403 if (ret)
5404 drm_dbg_kms(&dev_priv->drm,
5405 "HDCP init failed, skipping.\n");
5406 }
5407
5408 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5409 * 0xd. Failure to do so will result in spurious interrupts being
5410 * generated on the port when a cable is not attached.
5411 */
5412 if (IS_G45(dev_priv)) {
5413 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
5414 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
5415 (temp & ~0xf) | 0xd);
5416 }
5417
5418 intel_dp->frl.is_trained = false;
5419 intel_dp->frl.trained_rate_gbps = 0;
5420
5421 intel_psr_init(intel_dp);
5422
5423 return true;
5424
5425 fail:
5426 drm_connector_cleanup(connector);
5427
5428 return false;
5429 }
5430
5431 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
5432 {
5433 struct intel_encoder *encoder;
5434
5435 if (!HAS_DISPLAY(dev_priv))
5436 return;
5437
5438 for_each_intel_encoder(&dev_priv->drm, encoder) {
5439 struct intel_dp *intel_dp;
5440
5441 if (encoder->type != INTEL_OUTPUT_DDI)
5442 continue;
5443
5444 intel_dp = enc_to_intel_dp(encoder);
5445
5446 if (!intel_dp_mst_source_support(intel_dp))
5447 continue;
5448
5449 if (intel_dp->is_mst)
5450 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
5451 }
5452 }
5453
5454 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
5455 {
5456 struct intel_encoder *encoder;
5457
5458 if (!HAS_DISPLAY(dev_priv))
5459 return;
5460
5461 for_each_intel_encoder(&dev_priv->drm, encoder) {
5462 struct intel_dp *intel_dp;
5463 int ret;
5464
5465 if (encoder->type != INTEL_OUTPUT_DDI)
5466 continue;
5467
5468 intel_dp = enc_to_intel_dp(encoder);
5469
5470 if (!intel_dp_mst_source_support(intel_dp))
5471 continue;
5472
5473 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
5474 true);
5475 if (ret) {
5476 intel_dp->is_mst = false;
5477 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5478 false);
5479 }
5480 }
5481 }