]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_dp.c
cf709835fb9a9eece3c0761c21c53c34a25b7e22
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_crtc.h>
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_edid.h>
39 #include <drm/drm_hdcp.h>
40 #include <drm/drm_probe_helper.h>
41 #include "intel_drv.h"
42 #include <drm/i915_drm.h>
43 #include "i915_drv.h"
44
45 #define DP_DPRX_ESI_LEN 14
46
47 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
48 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
49 #define DP_DSC_MIN_SUPPORTED_BPC 8
50 #define DP_DSC_MAX_SUPPORTED_BPC 10
51
52 /* DP DSC throughput values used for slice count calculations KPixels/s */
53 #define DP_DSC_PEAK_PIXEL_RATE 2720000
54 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
55 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
56
57 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
58 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
59
60 /* Compliance test status bits */
61 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
62 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
63 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
64 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
65
66 struct dp_link_dpll {
67 int clock;
68 struct dpll dpll;
69 };
70
71 static const struct dp_link_dpll g4x_dpll[] = {
72 { 162000,
73 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
74 { 270000,
75 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
76 };
77
78 static const struct dp_link_dpll pch_dpll[] = {
79 { 162000,
80 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
81 { 270000,
82 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
83 };
84
85 static const struct dp_link_dpll vlv_dpll[] = {
86 { 162000,
87 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
88 { 270000,
89 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
90 };
91
92 /*
93 * CHV supports eDP 1.4 that have more link rates.
94 * Below only provides the fixed rate but exclude variable rate.
95 */
96 static const struct dp_link_dpll chv_dpll[] = {
97 /*
98 * CHV requires to program fractional division for m2.
99 * m2 is stored in fixed point format using formula below
100 * (m2_int << 22) | m2_fraction
101 */
102 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
103 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
104 { 270000, /* m2_int = 27, m2_fraction = 0 */
105 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
106 };
107
108 /* Constants for DP DSC configurations */
109 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
110
111 /* With Single pipe configuration, HW is capable of supporting maximum
112 * of 4 slices per line.
113 */
114 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
115
116 /**
117 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
118 * @intel_dp: DP struct
119 *
120 * If a CPU or PCH DP output is attached to an eDP panel, this function
121 * will return true, and false otherwise.
122 */
123 bool intel_dp_is_edp(struct intel_dp *intel_dp)
124 {
125 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
126
127 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
128 }
129
130 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
131 {
132 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
133 }
134
135 static void intel_dp_link_down(struct intel_encoder *encoder,
136 const struct intel_crtc_state *old_crtc_state);
137 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
138 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
139 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
140 const struct intel_crtc_state *crtc_state);
141 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
142 enum pipe pipe);
143 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
144
145 /* update sink rates from dpcd */
146 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
147 {
148 static const int dp_rates[] = {
149 162000, 270000, 540000, 810000
150 };
151 int i, max_rate;
152
153 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
154
155 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
156 if (dp_rates[i] > max_rate)
157 break;
158 intel_dp->sink_rates[i] = dp_rates[i];
159 }
160
161 intel_dp->num_sink_rates = i;
162 }
163
164 /* Get length of rates array potentially limited by max_rate. */
165 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
166 {
167 int i;
168
169 /* Limit results by potentially reduced max rate */
170 for (i = 0; i < len; i++) {
171 if (rates[len - i - 1] <= max_rate)
172 return len - i;
173 }
174
175 return 0;
176 }
177
178 /* Get length of common rates array potentially limited by max_rate. */
179 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
180 int max_rate)
181 {
182 return intel_dp_rate_limit_len(intel_dp->common_rates,
183 intel_dp->num_common_rates, max_rate);
184 }
185
186 /* Theoretical max between source and sink */
187 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
188 {
189 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
190 }
191
192 static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
193 {
194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
195 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
196 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
197 u32 lane_info;
198
199 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
200 return 4;
201
202 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
203 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
204 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
205
206 switch (lane_info) {
207 default:
208 MISSING_CASE(lane_info);
209 case 1:
210 case 2:
211 case 4:
212 case 8:
213 return 1;
214 case 3:
215 case 12:
216 return 2;
217 case 15:
218 return 4;
219 }
220 }
221
222 /* Theoretical max between source and sink */
223 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
224 {
225 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
226 int source_max = intel_dig_port->max_lanes;
227 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
228 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
229
230 return min3(source_max, sink_max, fia_max);
231 }
232
233 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
234 {
235 return intel_dp->max_link_lane_count;
236 }
237
238 int
239 intel_dp_link_required(int pixel_clock, int bpp)
240 {
241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242 return DIV_ROUND_UP(pixel_clock * bpp, 8);
243 }
244
245 int
246 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
247 {
248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249 * link rate that is generally expressed in Gbps. Since, 8 bits of data
250 * is transmitted every LS_Clk per lane, there is no need to account for
251 * the channel encoding that is done in the PHY layer here.
252 */
253
254 return max_link_clock * max_lanes;
255 }
256
257 static int
258 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
259 {
260 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
261 struct intel_encoder *encoder = &intel_dig_port->base;
262 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
263 int max_dotclk = dev_priv->max_dotclk_freq;
264 int ds_max_dotclk;
265
266 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
267
268 if (type != DP_DS_PORT_TYPE_VGA)
269 return max_dotclk;
270
271 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
272 intel_dp->downstream_ports);
273
274 if (ds_max_dotclk != 0)
275 max_dotclk = min(max_dotclk, ds_max_dotclk);
276
277 return max_dotclk;
278 }
279
280 static int cnl_max_source_rate(struct intel_dp *intel_dp)
281 {
282 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
283 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
284 enum port port = dig_port->base.port;
285
286 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
287
288 /* Low voltage SKUs are limited to max of 5.4G */
289 if (voltage == VOLTAGE_INFO_0_85V)
290 return 540000;
291
292 /* For this SKU 8.1G is supported in all ports */
293 if (IS_CNL_WITH_PORT_F(dev_priv))
294 return 810000;
295
296 /* For other SKUs, max rate on ports A and D is 5.4G */
297 if (port == PORT_A || port == PORT_D)
298 return 540000;
299
300 return 810000;
301 }
302
303 static int icl_max_source_rate(struct intel_dp *intel_dp)
304 {
305 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
306 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
307 enum port port = dig_port->base.port;
308
309 if (intel_port_is_combophy(dev_priv, port) &&
310 !intel_dp_is_edp(intel_dp))
311 return 540000;
312
313 return 810000;
314 }
315
316 static void
317 intel_dp_set_source_rates(struct intel_dp *intel_dp)
318 {
319 /* The values must be in increasing order */
320 static const int cnl_rates[] = {
321 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
322 };
323 static const int bxt_rates[] = {
324 162000, 216000, 243000, 270000, 324000, 432000, 540000
325 };
326 static const int skl_rates[] = {
327 162000, 216000, 270000, 324000, 432000, 540000
328 };
329 static const int hsw_rates[] = {
330 162000, 270000, 540000
331 };
332 static const int g4x_rates[] = {
333 162000, 270000
334 };
335 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
336 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
337 const struct ddi_vbt_port_info *info =
338 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
339 const int *source_rates;
340 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
341
342 /* This should only be done once */
343 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
344
345 if (INTEL_GEN(dev_priv) >= 10) {
346 source_rates = cnl_rates;
347 size = ARRAY_SIZE(cnl_rates);
348 if (IS_GEN(dev_priv, 10))
349 max_rate = cnl_max_source_rate(intel_dp);
350 else
351 max_rate = icl_max_source_rate(intel_dp);
352 } else if (IS_GEN9_LP(dev_priv)) {
353 source_rates = bxt_rates;
354 size = ARRAY_SIZE(bxt_rates);
355 } else if (IS_GEN9_BC(dev_priv)) {
356 source_rates = skl_rates;
357 size = ARRAY_SIZE(skl_rates);
358 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
359 IS_BROADWELL(dev_priv)) {
360 source_rates = hsw_rates;
361 size = ARRAY_SIZE(hsw_rates);
362 } else {
363 source_rates = g4x_rates;
364 size = ARRAY_SIZE(g4x_rates);
365 }
366
367 if (max_rate && vbt_max_rate)
368 max_rate = min(max_rate, vbt_max_rate);
369 else if (vbt_max_rate)
370 max_rate = vbt_max_rate;
371
372 if (max_rate)
373 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
374
375 intel_dp->source_rates = source_rates;
376 intel_dp->num_source_rates = size;
377 }
378
379 static int intersect_rates(const int *source_rates, int source_len,
380 const int *sink_rates, int sink_len,
381 int *common_rates)
382 {
383 int i = 0, j = 0, k = 0;
384
385 while (i < source_len && j < sink_len) {
386 if (source_rates[i] == sink_rates[j]) {
387 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
388 return k;
389 common_rates[k] = source_rates[i];
390 ++k;
391 ++i;
392 ++j;
393 } else if (source_rates[i] < sink_rates[j]) {
394 ++i;
395 } else {
396 ++j;
397 }
398 }
399 return k;
400 }
401
402 /* return index of rate in rates array, or -1 if not found */
403 static int intel_dp_rate_index(const int *rates, int len, int rate)
404 {
405 int i;
406
407 for (i = 0; i < len; i++)
408 if (rate == rates[i])
409 return i;
410
411 return -1;
412 }
413
414 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
415 {
416 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
417
418 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
419 intel_dp->num_source_rates,
420 intel_dp->sink_rates,
421 intel_dp->num_sink_rates,
422 intel_dp->common_rates);
423
424 /* Paranoia, there should always be something in common. */
425 if (WARN_ON(intel_dp->num_common_rates == 0)) {
426 intel_dp->common_rates[0] = 162000;
427 intel_dp->num_common_rates = 1;
428 }
429 }
430
431 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
432 u8 lane_count)
433 {
434 /*
435 * FIXME: we need to synchronize the current link parameters with
436 * hardware readout. Currently fast link training doesn't work on
437 * boot-up.
438 */
439 if (link_rate == 0 ||
440 link_rate > intel_dp->max_link_rate)
441 return false;
442
443 if (lane_count == 0 ||
444 lane_count > intel_dp_max_lane_count(intel_dp))
445 return false;
446
447 return true;
448 }
449
450 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
451 int link_rate,
452 u8 lane_count)
453 {
454 const struct drm_display_mode *fixed_mode =
455 intel_dp->attached_connector->panel.fixed_mode;
456 int mode_rate, max_rate;
457
458 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
459 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
460 if (mode_rate > max_rate)
461 return false;
462
463 return true;
464 }
465
466 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
467 int link_rate, u8 lane_count)
468 {
469 int index;
470
471 index = intel_dp_rate_index(intel_dp->common_rates,
472 intel_dp->num_common_rates,
473 link_rate);
474 if (index > 0) {
475 if (intel_dp_is_edp(intel_dp) &&
476 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
477 intel_dp->common_rates[index - 1],
478 lane_count)) {
479 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
480 return 0;
481 }
482 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
483 intel_dp->max_link_lane_count = lane_count;
484 } else if (lane_count > 1) {
485 if (intel_dp_is_edp(intel_dp) &&
486 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
487 intel_dp_max_common_rate(intel_dp),
488 lane_count >> 1)) {
489 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
490 return 0;
491 }
492 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
493 intel_dp->max_link_lane_count = lane_count >> 1;
494 } else {
495 DRM_ERROR("Link Training Unsuccessful\n");
496 return -1;
497 }
498
499 return 0;
500 }
501
502 static enum drm_mode_status
503 intel_dp_mode_valid(struct drm_connector *connector,
504 struct drm_display_mode *mode)
505 {
506 struct intel_dp *intel_dp = intel_attached_dp(connector);
507 struct intel_connector *intel_connector = to_intel_connector(connector);
508 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
509 struct drm_i915_private *dev_priv = to_i915(connector->dev);
510 int target_clock = mode->clock;
511 int max_rate, mode_rate, max_lanes, max_link_clock;
512 int max_dotclk;
513 u16 dsc_max_output_bpp = 0;
514 u8 dsc_slice_count = 0;
515
516 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
517 return MODE_NO_DBLESCAN;
518
519 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
520
521 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
522 if (mode->hdisplay > fixed_mode->hdisplay)
523 return MODE_PANEL;
524
525 if (mode->vdisplay > fixed_mode->vdisplay)
526 return MODE_PANEL;
527
528 target_clock = fixed_mode->clock;
529 }
530
531 max_link_clock = intel_dp_max_link_rate(intel_dp);
532 max_lanes = intel_dp_max_lane_count(intel_dp);
533
534 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
535 mode_rate = intel_dp_link_required(target_clock, 18);
536
537 /*
538 * Output bpp is stored in 6.4 format so right shift by 4 to get the
539 * integer value since we support only integer values of bpp.
540 */
541 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
542 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
543 if (intel_dp_is_edp(intel_dp)) {
544 dsc_max_output_bpp =
545 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
546 dsc_slice_count =
547 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
548 true);
549 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
550 dsc_max_output_bpp =
551 intel_dp_dsc_get_output_bpp(max_link_clock,
552 max_lanes,
553 target_clock,
554 mode->hdisplay) >> 4;
555 dsc_slice_count =
556 intel_dp_dsc_get_slice_count(intel_dp,
557 target_clock,
558 mode->hdisplay);
559 }
560 }
561
562 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
563 target_clock > max_dotclk)
564 return MODE_CLOCK_HIGH;
565
566 if (mode->clock < 10000)
567 return MODE_CLOCK_LOW;
568
569 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
570 return MODE_H_ILLEGAL;
571
572 return MODE_OK;
573 }
574
575 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
576 {
577 int i;
578 u32 v = 0;
579
580 if (src_bytes > 4)
581 src_bytes = 4;
582 for (i = 0; i < src_bytes; i++)
583 v |= ((u32)src[i]) << ((3 - i) * 8);
584 return v;
585 }
586
587 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
588 {
589 int i;
590 if (dst_bytes > 4)
591 dst_bytes = 4;
592 for (i = 0; i < dst_bytes; i++)
593 dst[i] = src >> ((3-i) * 8);
594 }
595
596 static void
597 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
598 static void
599 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
600 bool force_disable_vdd);
601 static void
602 intel_dp_pps_init(struct intel_dp *intel_dp);
603
604 static intel_wakeref_t
605 pps_lock(struct intel_dp *intel_dp)
606 {
607 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
608 intel_wakeref_t wakeref;
609
610 /*
611 * See intel_power_sequencer_reset() why we need
612 * a power domain reference here.
613 */
614 wakeref = intel_display_power_get(dev_priv,
615 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
616
617 mutex_lock(&dev_priv->pps_mutex);
618
619 return wakeref;
620 }
621
622 static intel_wakeref_t
623 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
624 {
625 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
626
627 mutex_unlock(&dev_priv->pps_mutex);
628 intel_display_power_put(dev_priv,
629 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
630 wakeref);
631 return 0;
632 }
633
634 #define with_pps_lock(dp, wf) \
635 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
636
637 static void
638 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
639 {
640 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
641 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
642 enum pipe pipe = intel_dp->pps_pipe;
643 bool pll_enabled, release_cl_override = false;
644 enum dpio_phy phy = DPIO_PHY(pipe);
645 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
646 u32 DP;
647
648 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
649 "skipping pipe %c power sequencer kick due to port %c being active\n",
650 pipe_name(pipe), port_name(intel_dig_port->base.port)))
651 return;
652
653 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
654 pipe_name(pipe), port_name(intel_dig_port->base.port));
655
656 /* Preserve the BIOS-computed detected bit. This is
657 * supposed to be read-only.
658 */
659 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
660 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
661 DP |= DP_PORT_WIDTH(1);
662 DP |= DP_LINK_TRAIN_PAT_1;
663
664 if (IS_CHERRYVIEW(dev_priv))
665 DP |= DP_PIPE_SEL_CHV(pipe);
666 else
667 DP |= DP_PIPE_SEL(pipe);
668
669 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
670
671 /*
672 * The DPLL for the pipe must be enabled for this to work.
673 * So enable temporarily it if it's not already enabled.
674 */
675 if (!pll_enabled) {
676 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
677 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
678
679 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
680 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
681 DRM_ERROR("Failed to force on pll for pipe %c!\n",
682 pipe_name(pipe));
683 return;
684 }
685 }
686
687 /*
688 * Similar magic as in intel_dp_enable_port().
689 * We _must_ do this port enable + disable trick
690 * to make this power sequencer lock onto the port.
691 * Otherwise even VDD force bit won't work.
692 */
693 I915_WRITE(intel_dp->output_reg, DP);
694 POSTING_READ(intel_dp->output_reg);
695
696 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
697 POSTING_READ(intel_dp->output_reg);
698
699 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
700 POSTING_READ(intel_dp->output_reg);
701
702 if (!pll_enabled) {
703 vlv_force_pll_off(dev_priv, pipe);
704
705 if (release_cl_override)
706 chv_phy_powergate_ch(dev_priv, phy, ch, false);
707 }
708 }
709
710 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
711 {
712 struct intel_encoder *encoder;
713 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
714
715 /*
716 * We don't have power sequencer currently.
717 * Pick one that's not used by other ports.
718 */
719 for_each_intel_dp(&dev_priv->drm, encoder) {
720 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
721
722 if (encoder->type == INTEL_OUTPUT_EDP) {
723 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
724 intel_dp->active_pipe != intel_dp->pps_pipe);
725
726 if (intel_dp->pps_pipe != INVALID_PIPE)
727 pipes &= ~(1 << intel_dp->pps_pipe);
728 } else {
729 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
730
731 if (intel_dp->active_pipe != INVALID_PIPE)
732 pipes &= ~(1 << intel_dp->active_pipe);
733 }
734 }
735
736 if (pipes == 0)
737 return INVALID_PIPE;
738
739 return ffs(pipes) - 1;
740 }
741
742 static enum pipe
743 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
744 {
745 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
746 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747 enum pipe pipe;
748
749 lockdep_assert_held(&dev_priv->pps_mutex);
750
751 /* We should never land here with regular DP ports */
752 WARN_ON(!intel_dp_is_edp(intel_dp));
753
754 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
755 intel_dp->active_pipe != intel_dp->pps_pipe);
756
757 if (intel_dp->pps_pipe != INVALID_PIPE)
758 return intel_dp->pps_pipe;
759
760 pipe = vlv_find_free_pps(dev_priv);
761
762 /*
763 * Didn't find one. This should not happen since there
764 * are two power sequencers and up to two eDP ports.
765 */
766 if (WARN_ON(pipe == INVALID_PIPE))
767 pipe = PIPE_A;
768
769 vlv_steal_power_sequencer(dev_priv, pipe);
770 intel_dp->pps_pipe = pipe;
771
772 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
773 pipe_name(intel_dp->pps_pipe),
774 port_name(intel_dig_port->base.port));
775
776 /* init power sequencer on this pipe and port */
777 intel_dp_init_panel_power_sequencer(intel_dp);
778 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
779
780 /*
781 * Even vdd force doesn't work until we've made
782 * the power sequencer lock in on the port.
783 */
784 vlv_power_sequencer_kick(intel_dp);
785
786 return intel_dp->pps_pipe;
787 }
788
789 static int
790 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
791 {
792 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
793 int backlight_controller = dev_priv->vbt.backlight.controller;
794
795 lockdep_assert_held(&dev_priv->pps_mutex);
796
797 /* We should never land here with regular DP ports */
798 WARN_ON(!intel_dp_is_edp(intel_dp));
799
800 if (!intel_dp->pps_reset)
801 return backlight_controller;
802
803 intel_dp->pps_reset = false;
804
805 /*
806 * Only the HW needs to be reprogrammed, the SW state is fixed and
807 * has been setup during connector init.
808 */
809 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
810
811 return backlight_controller;
812 }
813
814 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
815 enum pipe pipe);
816
817 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
818 enum pipe pipe)
819 {
820 return I915_READ(PP_STATUS(pipe)) & PP_ON;
821 }
822
823 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
824 enum pipe pipe)
825 {
826 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
827 }
828
829 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
830 enum pipe pipe)
831 {
832 return true;
833 }
834
835 static enum pipe
836 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
837 enum port port,
838 vlv_pipe_check pipe_check)
839 {
840 enum pipe pipe;
841
842 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
843 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
844 PANEL_PORT_SELECT_MASK;
845
846 if (port_sel != PANEL_PORT_SELECT_VLV(port))
847 continue;
848
849 if (!pipe_check(dev_priv, pipe))
850 continue;
851
852 return pipe;
853 }
854
855 return INVALID_PIPE;
856 }
857
858 static void
859 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
860 {
861 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
862 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
863 enum port port = intel_dig_port->base.port;
864
865 lockdep_assert_held(&dev_priv->pps_mutex);
866
867 /* try to find a pipe with this port selected */
868 /* first pick one where the panel is on */
869 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
870 vlv_pipe_has_pp_on);
871 /* didn't find one? pick one where vdd is on */
872 if (intel_dp->pps_pipe == INVALID_PIPE)
873 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
874 vlv_pipe_has_vdd_on);
875 /* didn't find one? pick one with just the correct port */
876 if (intel_dp->pps_pipe == INVALID_PIPE)
877 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
878 vlv_pipe_any);
879
880 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
881 if (intel_dp->pps_pipe == INVALID_PIPE) {
882 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
883 port_name(port));
884 return;
885 }
886
887 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
888 port_name(port), pipe_name(intel_dp->pps_pipe));
889
890 intel_dp_init_panel_power_sequencer(intel_dp);
891 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
892 }
893
894 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
895 {
896 struct intel_encoder *encoder;
897
898 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
899 !IS_GEN9_LP(dev_priv)))
900 return;
901
902 /*
903 * We can't grab pps_mutex here due to deadlock with power_domain
904 * mutex when power_domain functions are called while holding pps_mutex.
905 * That also means that in order to use pps_pipe the code needs to
906 * hold both a power domain reference and pps_mutex, and the power domain
907 * reference get/put must be done while _not_ holding pps_mutex.
908 * pps_{lock,unlock}() do these steps in the correct order, so one
909 * should use them always.
910 */
911
912 for_each_intel_dp(&dev_priv->drm, encoder) {
913 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
914
915 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
916
917 if (encoder->type != INTEL_OUTPUT_EDP)
918 continue;
919
920 if (IS_GEN9_LP(dev_priv))
921 intel_dp->pps_reset = true;
922 else
923 intel_dp->pps_pipe = INVALID_PIPE;
924 }
925 }
926
927 struct pps_registers {
928 i915_reg_t pp_ctrl;
929 i915_reg_t pp_stat;
930 i915_reg_t pp_on;
931 i915_reg_t pp_off;
932 i915_reg_t pp_div;
933 };
934
935 static void intel_pps_get_registers(struct intel_dp *intel_dp,
936 struct pps_registers *regs)
937 {
938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
939 int pps_idx = 0;
940
941 memset(regs, 0, sizeof(*regs));
942
943 if (IS_GEN9_LP(dev_priv))
944 pps_idx = bxt_power_sequencer_idx(intel_dp);
945 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
946 pps_idx = vlv_power_sequencer_pipe(intel_dp);
947
948 regs->pp_ctrl = PP_CONTROL(pps_idx);
949 regs->pp_stat = PP_STATUS(pps_idx);
950 regs->pp_on = PP_ON_DELAYS(pps_idx);
951 regs->pp_off = PP_OFF_DELAYS(pps_idx);
952 if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
953 !HAS_PCH_ICP(dev_priv))
954 regs->pp_div = PP_DIVISOR(pps_idx);
955 }
956
957 static i915_reg_t
958 _pp_ctrl_reg(struct intel_dp *intel_dp)
959 {
960 struct pps_registers regs;
961
962 intel_pps_get_registers(intel_dp, &regs);
963
964 return regs.pp_ctrl;
965 }
966
967 static i915_reg_t
968 _pp_stat_reg(struct intel_dp *intel_dp)
969 {
970 struct pps_registers regs;
971
972 intel_pps_get_registers(intel_dp, &regs);
973
974 return regs.pp_stat;
975 }
976
977 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
978 This function only applicable when panel PM state is not to be tracked */
979 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
980 void *unused)
981 {
982 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
983 edp_notifier);
984 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
985 intel_wakeref_t wakeref;
986
987 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
988 return 0;
989
990 with_pps_lock(intel_dp, wakeref) {
991 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
992 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
993 i915_reg_t pp_ctrl_reg, pp_div_reg;
994 u32 pp_div;
995
996 pp_ctrl_reg = PP_CONTROL(pipe);
997 pp_div_reg = PP_DIVISOR(pipe);
998 pp_div = I915_READ(pp_div_reg);
999 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1000
1001 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1002 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1003 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1004 msleep(intel_dp->panel_power_cycle_delay);
1005 }
1006 }
1007
1008 return 0;
1009 }
1010
1011 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1012 {
1013 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1014
1015 lockdep_assert_held(&dev_priv->pps_mutex);
1016
1017 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1018 intel_dp->pps_pipe == INVALID_PIPE)
1019 return false;
1020
1021 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1022 }
1023
1024 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1025 {
1026 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1027
1028 lockdep_assert_held(&dev_priv->pps_mutex);
1029
1030 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1031 intel_dp->pps_pipe == INVALID_PIPE)
1032 return false;
1033
1034 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1035 }
1036
1037 static void
1038 intel_dp_check_edp(struct intel_dp *intel_dp)
1039 {
1040 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041
1042 if (!intel_dp_is_edp(intel_dp))
1043 return;
1044
1045 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1046 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1047 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1048 I915_READ(_pp_stat_reg(intel_dp)),
1049 I915_READ(_pp_ctrl_reg(intel_dp)));
1050 }
1051 }
1052
1053 static u32
1054 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1055 {
1056 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1057 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1058 u32 status;
1059 bool done;
1060
1061 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1062 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
1063 msecs_to_jiffies_timeout(10));
1064
1065 /* just trace the final value */
1066 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1067
1068 if (!done)
1069 DRM_ERROR("dp aux hw did not signal timeout!\n");
1070 #undef C
1071
1072 return status;
1073 }
1074
1075 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1076 {
1077 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1078
1079 if (index)
1080 return 0;
1081
1082 /*
1083 * The clock divider is based off the hrawclk, and would like to run at
1084 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1085 */
1086 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1087 }
1088
1089 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1090 {
1091 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1092 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1093
1094 if (index)
1095 return 0;
1096
1097 /*
1098 * The clock divider is based off the cdclk or PCH rawclk, and would
1099 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1100 * divide by 2000 and use that
1101 */
1102 if (dig_port->aux_ch == AUX_CH_A)
1103 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1104 else
1105 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1106 }
1107
1108 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1109 {
1110 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1111 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1112
1113 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1114 /* Workaround for non-ULT HSW */
1115 switch (index) {
1116 case 0: return 63;
1117 case 1: return 72;
1118 default: return 0;
1119 }
1120 }
1121
1122 return ilk_get_aux_clock_divider(intel_dp, index);
1123 }
1124
1125 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1126 {
1127 /*
1128 * SKL doesn't need us to program the AUX clock divider (Hardware will
1129 * derive the clock from CDCLK automatically). We still implement the
1130 * get_aux_clock_divider vfunc to plug-in into the existing code.
1131 */
1132 return index ? 0 : 1;
1133 }
1134
1135 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1136 int send_bytes,
1137 u32 aux_clock_divider)
1138 {
1139 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1140 struct drm_i915_private *dev_priv =
1141 to_i915(intel_dig_port->base.base.dev);
1142 u32 precharge, timeout;
1143
1144 if (IS_GEN(dev_priv, 6))
1145 precharge = 3;
1146 else
1147 precharge = 5;
1148
1149 if (IS_BROADWELL(dev_priv))
1150 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1151 else
1152 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1153
1154 return DP_AUX_CH_CTL_SEND_BUSY |
1155 DP_AUX_CH_CTL_DONE |
1156 DP_AUX_CH_CTL_INTERRUPT |
1157 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1158 timeout |
1159 DP_AUX_CH_CTL_RECEIVE_ERROR |
1160 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1161 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1162 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1163 }
1164
1165 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1166 int send_bytes,
1167 u32 unused)
1168 {
1169 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1170 u32 ret;
1171
1172 ret = DP_AUX_CH_CTL_SEND_BUSY |
1173 DP_AUX_CH_CTL_DONE |
1174 DP_AUX_CH_CTL_INTERRUPT |
1175 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1176 DP_AUX_CH_CTL_TIME_OUT_MAX |
1177 DP_AUX_CH_CTL_RECEIVE_ERROR |
1178 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1179 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1180 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1181
1182 if (intel_dig_port->tc_type == TC_PORT_TBT)
1183 ret |= DP_AUX_CH_CTL_TBT_IO;
1184
1185 return ret;
1186 }
1187
1188 static int
1189 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1190 const u8 *send, int send_bytes,
1191 u8 *recv, int recv_size,
1192 u32 aux_send_ctl_flags)
1193 {
1194 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1195 struct drm_i915_private *dev_priv =
1196 to_i915(intel_dig_port->base.base.dev);
1197 i915_reg_t ch_ctl, ch_data[5];
1198 u32 aux_clock_divider;
1199 intel_wakeref_t wakeref;
1200 int i, ret, recv_bytes;
1201 int try, clock = 0;
1202 u32 status;
1203 bool vdd;
1204
1205 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1206 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1207 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1208
1209 wakeref = pps_lock(intel_dp);
1210
1211 /*
1212 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1213 * In such cases we want to leave VDD enabled and it's up to upper layers
1214 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1215 * ourselves.
1216 */
1217 vdd = edp_panel_vdd_on(intel_dp);
1218
1219 /* dp aux is extremely sensitive to irq latency, hence request the
1220 * lowest possible wakeup latency and so prevent the cpu from going into
1221 * deep sleep states.
1222 */
1223 pm_qos_update_request(&dev_priv->pm_qos, 0);
1224
1225 intel_dp_check_edp(intel_dp);
1226
1227 /* Try to wait for any previous AUX channel activity */
1228 for (try = 0; try < 3; try++) {
1229 status = I915_READ_NOTRACE(ch_ctl);
1230 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1231 break;
1232 msleep(1);
1233 }
1234 /* just trace the final value */
1235 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1236
1237 if (try == 3) {
1238 static u32 last_status = -1;
1239 const u32 status = I915_READ(ch_ctl);
1240
1241 if (status != last_status) {
1242 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1243 status);
1244 last_status = status;
1245 }
1246
1247 ret = -EBUSY;
1248 goto out;
1249 }
1250
1251 /* Only 5 data registers! */
1252 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1253 ret = -E2BIG;
1254 goto out;
1255 }
1256
1257 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1258 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1259 send_bytes,
1260 aux_clock_divider);
1261
1262 send_ctl |= aux_send_ctl_flags;
1263
1264 /* Must try at least 3 times according to DP spec */
1265 for (try = 0; try < 5; try++) {
1266 /* Load the send data into the aux channel data registers */
1267 for (i = 0; i < send_bytes; i += 4)
1268 I915_WRITE(ch_data[i >> 2],
1269 intel_dp_pack_aux(send + i,
1270 send_bytes - i));
1271
1272 /* Send the command and wait for it to complete */
1273 I915_WRITE(ch_ctl, send_ctl);
1274
1275 status = intel_dp_aux_wait_done(intel_dp);
1276
1277 /* Clear done status and any errors */
1278 I915_WRITE(ch_ctl,
1279 status |
1280 DP_AUX_CH_CTL_DONE |
1281 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1282 DP_AUX_CH_CTL_RECEIVE_ERROR);
1283
1284 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1285 * 400us delay required for errors and timeouts
1286 * Timeout errors from the HW already meet this
1287 * requirement so skip to next iteration
1288 */
1289 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1290 continue;
1291
1292 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1293 usleep_range(400, 500);
1294 continue;
1295 }
1296 if (status & DP_AUX_CH_CTL_DONE)
1297 goto done;
1298 }
1299 }
1300
1301 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1302 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1303 ret = -EBUSY;
1304 goto out;
1305 }
1306
1307 done:
1308 /* Check for timeout or receive error.
1309 * Timeouts occur when the sink is not connected
1310 */
1311 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1312 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1313 ret = -EIO;
1314 goto out;
1315 }
1316
1317 /* Timeouts occur when the device isn't connected, so they're
1318 * "normal" -- don't fill the kernel log with these */
1319 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1320 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1321 ret = -ETIMEDOUT;
1322 goto out;
1323 }
1324
1325 /* Unload any bytes sent back from the other side */
1326 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1327 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1328
1329 /*
1330 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1331 * We have no idea of what happened so we return -EBUSY so
1332 * drm layer takes care for the necessary retries.
1333 */
1334 if (recv_bytes == 0 || recv_bytes > 20) {
1335 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1336 recv_bytes);
1337 ret = -EBUSY;
1338 goto out;
1339 }
1340
1341 if (recv_bytes > recv_size)
1342 recv_bytes = recv_size;
1343
1344 for (i = 0; i < recv_bytes; i += 4)
1345 intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
1346 recv + i, recv_bytes - i);
1347
1348 ret = recv_bytes;
1349 out:
1350 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1351
1352 if (vdd)
1353 edp_panel_vdd_off(intel_dp, false);
1354
1355 pps_unlock(intel_dp, wakeref);
1356
1357 return ret;
1358 }
1359
1360 #define BARE_ADDRESS_SIZE 3
1361 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1362
1363 static void
1364 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1365 const struct drm_dp_aux_msg *msg)
1366 {
1367 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1368 txbuf[1] = (msg->address >> 8) & 0xff;
1369 txbuf[2] = msg->address & 0xff;
1370 txbuf[3] = msg->size - 1;
1371 }
1372
1373 static ssize_t
1374 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1375 {
1376 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1377 u8 txbuf[20], rxbuf[20];
1378 size_t txsize, rxsize;
1379 int ret;
1380
1381 intel_dp_aux_header(txbuf, msg);
1382
1383 switch (msg->request & ~DP_AUX_I2C_MOT) {
1384 case DP_AUX_NATIVE_WRITE:
1385 case DP_AUX_I2C_WRITE:
1386 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1387 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1388 rxsize = 2; /* 0 or 1 data bytes */
1389
1390 if (WARN_ON(txsize > 20))
1391 return -E2BIG;
1392
1393 WARN_ON(!msg->buffer != !msg->size);
1394
1395 if (msg->buffer)
1396 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1397
1398 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1399 rxbuf, rxsize, 0);
1400 if (ret > 0) {
1401 msg->reply = rxbuf[0] >> 4;
1402
1403 if (ret > 1) {
1404 /* Number of bytes written in a short write. */
1405 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1406 } else {
1407 /* Return payload size. */
1408 ret = msg->size;
1409 }
1410 }
1411 break;
1412
1413 case DP_AUX_NATIVE_READ:
1414 case DP_AUX_I2C_READ:
1415 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1416 rxsize = msg->size + 1;
1417
1418 if (WARN_ON(rxsize > 20))
1419 return -E2BIG;
1420
1421 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1422 rxbuf, rxsize, 0);
1423 if (ret > 0) {
1424 msg->reply = rxbuf[0] >> 4;
1425 /*
1426 * Assume happy day, and copy the data. The caller is
1427 * expected to check msg->reply before touching it.
1428 *
1429 * Return payload size.
1430 */
1431 ret--;
1432 memcpy(msg->buffer, rxbuf + 1, ret);
1433 }
1434 break;
1435
1436 default:
1437 ret = -EINVAL;
1438 break;
1439 }
1440
1441 return ret;
1442 }
1443
1444
1445 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1446 {
1447 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1448 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1449 enum aux_ch aux_ch = dig_port->aux_ch;
1450
1451 switch (aux_ch) {
1452 case AUX_CH_B:
1453 case AUX_CH_C:
1454 case AUX_CH_D:
1455 return DP_AUX_CH_CTL(aux_ch);
1456 default:
1457 MISSING_CASE(aux_ch);
1458 return DP_AUX_CH_CTL(AUX_CH_B);
1459 }
1460 }
1461
1462 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1463 {
1464 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1465 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1466 enum aux_ch aux_ch = dig_port->aux_ch;
1467
1468 switch (aux_ch) {
1469 case AUX_CH_B:
1470 case AUX_CH_C:
1471 case AUX_CH_D:
1472 return DP_AUX_CH_DATA(aux_ch, index);
1473 default:
1474 MISSING_CASE(aux_ch);
1475 return DP_AUX_CH_DATA(AUX_CH_B, index);
1476 }
1477 }
1478
1479 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1480 {
1481 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1482 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1483 enum aux_ch aux_ch = dig_port->aux_ch;
1484
1485 switch (aux_ch) {
1486 case AUX_CH_A:
1487 return DP_AUX_CH_CTL(aux_ch);
1488 case AUX_CH_B:
1489 case AUX_CH_C:
1490 case AUX_CH_D:
1491 return PCH_DP_AUX_CH_CTL(aux_ch);
1492 default:
1493 MISSING_CASE(aux_ch);
1494 return DP_AUX_CH_CTL(AUX_CH_A);
1495 }
1496 }
1497
1498 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1499 {
1500 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1501 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1502 enum aux_ch aux_ch = dig_port->aux_ch;
1503
1504 switch (aux_ch) {
1505 case AUX_CH_A:
1506 return DP_AUX_CH_DATA(aux_ch, index);
1507 case AUX_CH_B:
1508 case AUX_CH_C:
1509 case AUX_CH_D:
1510 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1511 default:
1512 MISSING_CASE(aux_ch);
1513 return DP_AUX_CH_DATA(AUX_CH_A, index);
1514 }
1515 }
1516
1517 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1518 {
1519 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1520 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1521 enum aux_ch aux_ch = dig_port->aux_ch;
1522
1523 switch (aux_ch) {
1524 case AUX_CH_A:
1525 case AUX_CH_B:
1526 case AUX_CH_C:
1527 case AUX_CH_D:
1528 case AUX_CH_E:
1529 case AUX_CH_F:
1530 return DP_AUX_CH_CTL(aux_ch);
1531 default:
1532 MISSING_CASE(aux_ch);
1533 return DP_AUX_CH_CTL(AUX_CH_A);
1534 }
1535 }
1536
1537 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1538 {
1539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1540 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1541 enum aux_ch aux_ch = dig_port->aux_ch;
1542
1543 switch (aux_ch) {
1544 case AUX_CH_A:
1545 case AUX_CH_B:
1546 case AUX_CH_C:
1547 case AUX_CH_D:
1548 case AUX_CH_E:
1549 case AUX_CH_F:
1550 return DP_AUX_CH_DATA(aux_ch, index);
1551 default:
1552 MISSING_CASE(aux_ch);
1553 return DP_AUX_CH_DATA(AUX_CH_A, index);
1554 }
1555 }
1556
1557 static void
1558 intel_dp_aux_fini(struct intel_dp *intel_dp)
1559 {
1560 kfree(intel_dp->aux.name);
1561 }
1562
1563 static void
1564 intel_dp_aux_init(struct intel_dp *intel_dp)
1565 {
1566 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1567 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1568 struct intel_encoder *encoder = &dig_port->base;
1569
1570 if (INTEL_GEN(dev_priv) >= 9) {
1571 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1572 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1573 } else if (HAS_PCH_SPLIT(dev_priv)) {
1574 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1575 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1576 } else {
1577 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1578 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1579 }
1580
1581 if (INTEL_GEN(dev_priv) >= 9)
1582 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1583 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1584 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1585 else if (HAS_PCH_SPLIT(dev_priv))
1586 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1587 else
1588 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1589
1590 if (INTEL_GEN(dev_priv) >= 9)
1591 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1592 else
1593 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1594
1595 drm_dp_aux_init(&intel_dp->aux);
1596
1597 /* Failure to allocate our preferred name is not critical */
1598 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1599 port_name(encoder->port));
1600 intel_dp->aux.transfer = intel_dp_aux_transfer;
1601 }
1602
1603 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1604 {
1605 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1606
1607 return max_rate >= 540000;
1608 }
1609
1610 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1611 {
1612 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1613
1614 return max_rate >= 810000;
1615 }
1616
1617 static void
1618 intel_dp_set_clock(struct intel_encoder *encoder,
1619 struct intel_crtc_state *pipe_config)
1620 {
1621 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1622 const struct dp_link_dpll *divisor = NULL;
1623 int i, count = 0;
1624
1625 if (IS_G4X(dev_priv)) {
1626 divisor = g4x_dpll;
1627 count = ARRAY_SIZE(g4x_dpll);
1628 } else if (HAS_PCH_SPLIT(dev_priv)) {
1629 divisor = pch_dpll;
1630 count = ARRAY_SIZE(pch_dpll);
1631 } else if (IS_CHERRYVIEW(dev_priv)) {
1632 divisor = chv_dpll;
1633 count = ARRAY_SIZE(chv_dpll);
1634 } else if (IS_VALLEYVIEW(dev_priv)) {
1635 divisor = vlv_dpll;
1636 count = ARRAY_SIZE(vlv_dpll);
1637 }
1638
1639 if (divisor && count) {
1640 for (i = 0; i < count; i++) {
1641 if (pipe_config->port_clock == divisor[i].clock) {
1642 pipe_config->dpll = divisor[i].dpll;
1643 pipe_config->clock_set = true;
1644 break;
1645 }
1646 }
1647 }
1648 }
1649
1650 static void snprintf_int_array(char *str, size_t len,
1651 const int *array, int nelem)
1652 {
1653 int i;
1654
1655 str[0] = '\0';
1656
1657 for (i = 0; i < nelem; i++) {
1658 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1659 if (r >= len)
1660 return;
1661 str += r;
1662 len -= r;
1663 }
1664 }
1665
1666 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1667 {
1668 char str[128]; /* FIXME: too big for stack? */
1669
1670 if ((drm_debug & DRM_UT_KMS) == 0)
1671 return;
1672
1673 snprintf_int_array(str, sizeof(str),
1674 intel_dp->source_rates, intel_dp->num_source_rates);
1675 DRM_DEBUG_KMS("source rates: %s\n", str);
1676
1677 snprintf_int_array(str, sizeof(str),
1678 intel_dp->sink_rates, intel_dp->num_sink_rates);
1679 DRM_DEBUG_KMS("sink rates: %s\n", str);
1680
1681 snprintf_int_array(str, sizeof(str),
1682 intel_dp->common_rates, intel_dp->num_common_rates);
1683 DRM_DEBUG_KMS("common rates: %s\n", str);
1684 }
1685
1686 int
1687 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1688 {
1689 int len;
1690
1691 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1692 if (WARN_ON(len <= 0))
1693 return 162000;
1694
1695 return intel_dp->common_rates[len - 1];
1696 }
1697
1698 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1699 {
1700 int i = intel_dp_rate_index(intel_dp->sink_rates,
1701 intel_dp->num_sink_rates, rate);
1702
1703 if (WARN_ON(i < 0))
1704 i = 0;
1705
1706 return i;
1707 }
1708
1709 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1710 u8 *link_bw, u8 *rate_select)
1711 {
1712 /* eDP 1.4 rate select method. */
1713 if (intel_dp->use_rate_select) {
1714 *link_bw = 0;
1715 *rate_select =
1716 intel_dp_rate_select(intel_dp, port_clock);
1717 } else {
1718 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1719 *rate_select = 0;
1720 }
1721 }
1722
1723 struct link_config_limits {
1724 int min_clock, max_clock;
1725 int min_lane_count, max_lane_count;
1726 int min_bpp, max_bpp;
1727 };
1728
1729 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1730 const struct intel_crtc_state *pipe_config)
1731 {
1732 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1733
1734 return INTEL_GEN(dev_priv) >= 11 &&
1735 pipe_config->cpu_transcoder != TRANSCODER_A;
1736 }
1737
1738 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1739 const struct intel_crtc_state *pipe_config)
1740 {
1741 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1742 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1743 }
1744
1745 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1746 const struct intel_crtc_state *pipe_config)
1747 {
1748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1749
1750 return INTEL_GEN(dev_priv) >= 10 &&
1751 pipe_config->cpu_transcoder != TRANSCODER_A;
1752 }
1753
1754 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1755 const struct intel_crtc_state *pipe_config)
1756 {
1757 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1758 return false;
1759
1760 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1761 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1762 }
1763
1764 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1765 struct intel_crtc_state *pipe_config)
1766 {
1767 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1768 struct intel_connector *intel_connector = intel_dp->attached_connector;
1769 int bpp, bpc;
1770
1771 bpp = pipe_config->pipe_bpp;
1772 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1773
1774 if (bpc > 0)
1775 bpp = min(bpp, 3*bpc);
1776
1777 if (intel_dp_is_edp(intel_dp)) {
1778 /* Get bpp from vbt only for panels that dont have bpp in edid */
1779 if (intel_connector->base.display_info.bpc == 0 &&
1780 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1781 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1782 dev_priv->vbt.edp.bpp);
1783 bpp = dev_priv->vbt.edp.bpp;
1784 }
1785 }
1786
1787 return bpp;
1788 }
1789
1790 /* Adjust link config limits based on compliance test requests. */
1791 static void
1792 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1793 struct intel_crtc_state *pipe_config,
1794 struct link_config_limits *limits)
1795 {
1796 /* For DP Compliance we override the computed bpp for the pipe */
1797 if (intel_dp->compliance.test_data.bpc != 0) {
1798 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1799
1800 limits->min_bpp = limits->max_bpp = bpp;
1801 pipe_config->dither_force_disable = bpp == 6 * 3;
1802
1803 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1804 }
1805
1806 /* Use values requested by Compliance Test Request */
1807 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1808 int index;
1809
1810 /* Validate the compliance test data since max values
1811 * might have changed due to link train fallback.
1812 */
1813 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1814 intel_dp->compliance.test_lane_count)) {
1815 index = intel_dp_rate_index(intel_dp->common_rates,
1816 intel_dp->num_common_rates,
1817 intel_dp->compliance.test_link_rate);
1818 if (index >= 0)
1819 limits->min_clock = limits->max_clock = index;
1820 limits->min_lane_count = limits->max_lane_count =
1821 intel_dp->compliance.test_lane_count;
1822 }
1823 }
1824 }
1825
1826 /* Optimize link config in order: max bpp, min clock, min lanes */
1827 static int
1828 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1829 struct intel_crtc_state *pipe_config,
1830 const struct link_config_limits *limits)
1831 {
1832 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1833 int bpp, clock, lane_count;
1834 int mode_rate, link_clock, link_avail;
1835
1836 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1837 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1838 bpp);
1839
1840 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1841 for (lane_count = limits->min_lane_count;
1842 lane_count <= limits->max_lane_count;
1843 lane_count <<= 1) {
1844 link_clock = intel_dp->common_rates[clock];
1845 link_avail = intel_dp_max_data_rate(link_clock,
1846 lane_count);
1847
1848 if (mode_rate <= link_avail) {
1849 pipe_config->lane_count = lane_count;
1850 pipe_config->pipe_bpp = bpp;
1851 pipe_config->port_clock = link_clock;
1852
1853 return 0;
1854 }
1855 }
1856 }
1857 }
1858
1859 return -EINVAL;
1860 }
1861
1862 /* Optimize link config in order: max bpp, min lanes, min clock */
1863 static int
1864 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1865 struct intel_crtc_state *pipe_config,
1866 const struct link_config_limits *limits)
1867 {
1868 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1869 int bpp, clock, lane_count;
1870 int mode_rate, link_clock, link_avail;
1871
1872 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1873 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1874 bpp);
1875
1876 for (lane_count = limits->min_lane_count;
1877 lane_count <= limits->max_lane_count;
1878 lane_count <<= 1) {
1879 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1880 link_clock = intel_dp->common_rates[clock];
1881 link_avail = intel_dp_max_data_rate(link_clock,
1882 lane_count);
1883
1884 if (mode_rate <= link_avail) {
1885 pipe_config->lane_count = lane_count;
1886 pipe_config->pipe_bpp = bpp;
1887 pipe_config->port_clock = link_clock;
1888
1889 return 0;
1890 }
1891 }
1892 }
1893 }
1894
1895 return -EINVAL;
1896 }
1897
1898 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1899 {
1900 int i, num_bpc;
1901 u8 dsc_bpc[3] = {0};
1902
1903 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1904 dsc_bpc);
1905 for (i = 0; i < num_bpc; i++) {
1906 if (dsc_max_bpc >= dsc_bpc[i])
1907 return dsc_bpc[i] * 3;
1908 }
1909
1910 return 0;
1911 }
1912
1913 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1914 struct intel_crtc_state *pipe_config,
1915 struct drm_connector_state *conn_state,
1916 struct link_config_limits *limits)
1917 {
1918 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1919 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1920 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1921 u8 dsc_max_bpc;
1922 int pipe_bpp;
1923 int ret;
1924
1925 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1926 return -EINVAL;
1927
1928 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1929 conn_state->max_requested_bpc);
1930
1931 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1932 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1933 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1934 return -EINVAL;
1935 }
1936
1937 /*
1938 * For now enable DSC for max bpp, max link rate, max lane count.
1939 * Optimize this later for the minimum possible link rate/lane count
1940 * with DSC enabled for the requested mode.
1941 */
1942 pipe_config->pipe_bpp = pipe_bpp;
1943 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1944 pipe_config->lane_count = limits->max_lane_count;
1945
1946 if (intel_dp_is_edp(intel_dp)) {
1947 pipe_config->dsc_params.compressed_bpp =
1948 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1949 pipe_config->pipe_bpp);
1950 pipe_config->dsc_params.slice_count =
1951 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1952 true);
1953 } else {
1954 u16 dsc_max_output_bpp;
1955 u8 dsc_dp_slice_count;
1956
1957 dsc_max_output_bpp =
1958 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1959 pipe_config->lane_count,
1960 adjusted_mode->crtc_clock,
1961 adjusted_mode->crtc_hdisplay);
1962 dsc_dp_slice_count =
1963 intel_dp_dsc_get_slice_count(intel_dp,
1964 adjusted_mode->crtc_clock,
1965 adjusted_mode->crtc_hdisplay);
1966 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1967 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1968 return -EINVAL;
1969 }
1970 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1971 dsc_max_output_bpp >> 4,
1972 pipe_config->pipe_bpp);
1973 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1974 }
1975 /*
1976 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1977 * is greater than the maximum Cdclock and if slice count is even
1978 * then we need to use 2 VDSC instances.
1979 */
1980 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1981 if (pipe_config->dsc_params.slice_count > 1) {
1982 pipe_config->dsc_params.dsc_split = true;
1983 } else {
1984 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1985 return -EINVAL;
1986 }
1987 }
1988
1989 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1990 if (ret < 0) {
1991 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1992 "Compressed BPP = %d\n",
1993 pipe_config->pipe_bpp,
1994 pipe_config->dsc_params.compressed_bpp);
1995 return ret;
1996 }
1997
1998 pipe_config->dsc_params.compression_enable = true;
1999 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2000 "Compressed Bpp = %d Slice Count = %d\n",
2001 pipe_config->pipe_bpp,
2002 pipe_config->dsc_params.compressed_bpp,
2003 pipe_config->dsc_params.slice_count);
2004
2005 return 0;
2006 }
2007
2008 static int
2009 intel_dp_compute_link_config(struct intel_encoder *encoder,
2010 struct intel_crtc_state *pipe_config,
2011 struct drm_connector_state *conn_state)
2012 {
2013 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2014 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2015 struct link_config_limits limits;
2016 int common_len;
2017 int ret;
2018
2019 common_len = intel_dp_common_len_rate_limit(intel_dp,
2020 intel_dp->max_link_rate);
2021
2022 /* No common link rates between source and sink */
2023 WARN_ON(common_len <= 0);
2024
2025 limits.min_clock = 0;
2026 limits.max_clock = common_len - 1;
2027
2028 limits.min_lane_count = 1;
2029 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2030
2031 limits.min_bpp = 6 * 3;
2032 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2033
2034 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
2035 /*
2036 * Use the maximum clock and number of lanes the eDP panel
2037 * advertizes being capable of. The eDP 1.3 and earlier panels
2038 * are generally designed to support only a single clock and
2039 * lane configuration, and typically these values correspond to
2040 * the native resolution of the panel. With eDP 1.4 rate select
2041 * and DSC, this is decreasingly the case, and we need to be
2042 * able to select less than maximum link config.
2043 */
2044 limits.min_lane_count = limits.max_lane_count;
2045 limits.min_clock = limits.max_clock;
2046 }
2047
2048 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2049
2050 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2051 "max rate %d max bpp %d pixel clock %iKHz\n",
2052 limits.max_lane_count,
2053 intel_dp->common_rates[limits.max_clock],
2054 limits.max_bpp, adjusted_mode->crtc_clock);
2055
2056 if (intel_dp_is_edp(intel_dp))
2057 /*
2058 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2059 * section A.1: "It is recommended that the minimum number of
2060 * lanes be used, using the minimum link rate allowed for that
2061 * lane configuration."
2062 *
2063 * Note that we use the max clock and lane count for eDP 1.3 and
2064 * earlier, and fast vs. wide is irrelevant.
2065 */
2066 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
2067 &limits);
2068 else
2069 /* Optimize for slow and wide. */
2070 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2071 &limits);
2072
2073 /* enable compression if the mode doesn't fit available BW */
2074 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2075 if (ret || intel_dp->force_dsc_en) {
2076 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2077 conn_state, &limits);
2078 if (ret < 0)
2079 return ret;
2080 }
2081
2082 if (pipe_config->dsc_params.compression_enable) {
2083 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2084 pipe_config->lane_count, pipe_config->port_clock,
2085 pipe_config->pipe_bpp,
2086 pipe_config->dsc_params.compressed_bpp);
2087
2088 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2089 intel_dp_link_required(adjusted_mode->crtc_clock,
2090 pipe_config->dsc_params.compressed_bpp),
2091 intel_dp_max_data_rate(pipe_config->port_clock,
2092 pipe_config->lane_count));
2093 } else {
2094 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2095 pipe_config->lane_count, pipe_config->port_clock,
2096 pipe_config->pipe_bpp);
2097
2098 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2099 intel_dp_link_required(adjusted_mode->crtc_clock,
2100 pipe_config->pipe_bpp),
2101 intel_dp_max_data_rate(pipe_config->port_clock,
2102 pipe_config->lane_count));
2103 }
2104 return 0;
2105 }
2106
2107 int
2108 intel_dp_compute_config(struct intel_encoder *encoder,
2109 struct intel_crtc_state *pipe_config,
2110 struct drm_connector_state *conn_state)
2111 {
2112 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2113 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2114 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2115 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2116 enum port port = encoder->port;
2117 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2118 struct intel_connector *intel_connector = intel_dp->attached_connector;
2119 struct intel_digital_connector_state *intel_conn_state =
2120 to_intel_digital_connector_state(conn_state);
2121 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2122 DP_DPCD_QUIRK_CONSTANT_N);
2123 int ret;
2124
2125 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2126 pipe_config->has_pch_encoder = true;
2127
2128 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2129 if (lspcon->active)
2130 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2131
2132 pipe_config->has_drrs = false;
2133 if (IS_G4X(dev_priv) || port == PORT_A)
2134 pipe_config->has_audio = false;
2135 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2136 pipe_config->has_audio = intel_dp->has_audio;
2137 else
2138 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2139
2140 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2141 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2142 adjusted_mode);
2143
2144 if (INTEL_GEN(dev_priv) >= 9) {
2145 ret = skl_update_scaler_crtc(pipe_config);
2146 if (ret)
2147 return ret;
2148 }
2149
2150 if (HAS_GMCH(dev_priv))
2151 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2152 conn_state->scaling_mode);
2153 else
2154 intel_pch_panel_fitting(intel_crtc, pipe_config,
2155 conn_state->scaling_mode);
2156 }
2157
2158 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2159 return -EINVAL;
2160
2161 if (HAS_GMCH(dev_priv) &&
2162 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2163 return -EINVAL;
2164
2165 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2166 return -EINVAL;
2167
2168 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2169 intel_dp_supports_fec(intel_dp, pipe_config);
2170
2171 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2172 if (ret < 0)
2173 return ret;
2174
2175 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2176 /*
2177 * See:
2178 * CEA-861-E - 5.1 Default Encoding Parameters
2179 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2180 */
2181 pipe_config->limited_color_range =
2182 pipe_config->pipe_bpp != 18 &&
2183 drm_default_rgb_quant_range(adjusted_mode) ==
2184 HDMI_QUANTIZATION_RANGE_LIMITED;
2185 } else {
2186 pipe_config->limited_color_range =
2187 intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
2188 }
2189
2190 if (!pipe_config->dsc_params.compression_enable)
2191 intel_link_compute_m_n(pipe_config->pipe_bpp,
2192 pipe_config->lane_count,
2193 adjusted_mode->crtc_clock,
2194 pipe_config->port_clock,
2195 &pipe_config->dp_m_n,
2196 constant_n);
2197 else
2198 intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
2199 pipe_config->lane_count,
2200 adjusted_mode->crtc_clock,
2201 pipe_config->port_clock,
2202 &pipe_config->dp_m_n,
2203 constant_n);
2204
2205 if (intel_connector->panel.downclock_mode != NULL &&
2206 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2207 pipe_config->has_drrs = true;
2208 intel_link_compute_m_n(pipe_config->pipe_bpp,
2209 pipe_config->lane_count,
2210 intel_connector->panel.downclock_mode->clock,
2211 pipe_config->port_clock,
2212 &pipe_config->dp_m2_n2,
2213 constant_n);
2214 }
2215
2216 if (!HAS_DDI(dev_priv))
2217 intel_dp_set_clock(encoder, pipe_config);
2218
2219 intel_psr_compute_config(intel_dp, pipe_config);
2220
2221 return 0;
2222 }
2223
2224 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2225 int link_rate, u8 lane_count,
2226 bool link_mst)
2227 {
2228 intel_dp->link_trained = false;
2229 intel_dp->link_rate = link_rate;
2230 intel_dp->lane_count = lane_count;
2231 intel_dp->link_mst = link_mst;
2232 }
2233
2234 static void intel_dp_prepare(struct intel_encoder *encoder,
2235 const struct intel_crtc_state *pipe_config)
2236 {
2237 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2238 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2239 enum port port = encoder->port;
2240 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2241 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2242
2243 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2244 pipe_config->lane_count,
2245 intel_crtc_has_type(pipe_config,
2246 INTEL_OUTPUT_DP_MST));
2247
2248 /*
2249 * There are four kinds of DP registers:
2250 *
2251 * IBX PCH
2252 * SNB CPU
2253 * IVB CPU
2254 * CPT PCH
2255 *
2256 * IBX PCH and CPU are the same for almost everything,
2257 * except that the CPU DP PLL is configured in this
2258 * register
2259 *
2260 * CPT PCH is quite different, having many bits moved
2261 * to the TRANS_DP_CTL register instead. That
2262 * configuration happens (oddly) in ironlake_pch_enable
2263 */
2264
2265 /* Preserve the BIOS-computed detected bit. This is
2266 * supposed to be read-only.
2267 */
2268 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2269
2270 /* Handle DP bits in common between all three register formats */
2271 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2272 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2273
2274 /* Split out the IBX/CPU vs CPT settings */
2275
2276 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2277 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2278 intel_dp->DP |= DP_SYNC_HS_HIGH;
2279 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2280 intel_dp->DP |= DP_SYNC_VS_HIGH;
2281 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2282
2283 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2284 intel_dp->DP |= DP_ENHANCED_FRAMING;
2285
2286 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2287 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2288 u32 trans_dp;
2289
2290 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2291
2292 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2293 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2294 trans_dp |= TRANS_DP_ENH_FRAMING;
2295 else
2296 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2297 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2298 } else {
2299 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2300 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2301
2302 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2303 intel_dp->DP |= DP_SYNC_HS_HIGH;
2304 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2305 intel_dp->DP |= DP_SYNC_VS_HIGH;
2306 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2307
2308 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2309 intel_dp->DP |= DP_ENHANCED_FRAMING;
2310
2311 if (IS_CHERRYVIEW(dev_priv))
2312 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2313 else
2314 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2315 }
2316 }
2317
2318 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2319 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2320
2321 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2322 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2323
2324 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2325 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2326
2327 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2328
2329 static void wait_panel_status(struct intel_dp *intel_dp,
2330 u32 mask,
2331 u32 value)
2332 {
2333 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2334 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2335
2336 lockdep_assert_held(&dev_priv->pps_mutex);
2337
2338 intel_pps_verify_state(intel_dp);
2339
2340 pp_stat_reg = _pp_stat_reg(intel_dp);
2341 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2342
2343 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2344 mask, value,
2345 I915_READ(pp_stat_reg),
2346 I915_READ(pp_ctrl_reg));
2347
2348 if (intel_wait_for_register(dev_priv,
2349 pp_stat_reg, mask, value,
2350 5000))
2351 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2352 I915_READ(pp_stat_reg),
2353 I915_READ(pp_ctrl_reg));
2354
2355 DRM_DEBUG_KMS("Wait complete\n");
2356 }
2357
2358 static void wait_panel_on(struct intel_dp *intel_dp)
2359 {
2360 DRM_DEBUG_KMS("Wait for panel power on\n");
2361 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2362 }
2363
2364 static void wait_panel_off(struct intel_dp *intel_dp)
2365 {
2366 DRM_DEBUG_KMS("Wait for panel power off time\n");
2367 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2368 }
2369
2370 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2371 {
2372 ktime_t panel_power_on_time;
2373 s64 panel_power_off_duration;
2374
2375 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2376
2377 /* take the difference of currrent time and panel power off time
2378 * and then make panel wait for t11_t12 if needed. */
2379 panel_power_on_time = ktime_get_boottime();
2380 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2381
2382 /* When we disable the VDD override bit last we have to do the manual
2383 * wait. */
2384 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2385 wait_remaining_ms_from_jiffies(jiffies,
2386 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2387
2388 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2389 }
2390
2391 static void wait_backlight_on(struct intel_dp *intel_dp)
2392 {
2393 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2394 intel_dp->backlight_on_delay);
2395 }
2396
2397 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2398 {
2399 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2400 intel_dp->backlight_off_delay);
2401 }
2402
2403 /* Read the current pp_control value, unlocking the register if it
2404 * is locked
2405 */
2406
2407 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2408 {
2409 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2410 u32 control;
2411
2412 lockdep_assert_held(&dev_priv->pps_mutex);
2413
2414 control = I915_READ(_pp_ctrl_reg(intel_dp));
2415 if (WARN_ON(!HAS_DDI(dev_priv) &&
2416 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2417 control &= ~PANEL_UNLOCK_MASK;
2418 control |= PANEL_UNLOCK_REGS;
2419 }
2420 return control;
2421 }
2422
2423 /*
2424 * Must be paired with edp_panel_vdd_off().
2425 * Must hold pps_mutex around the whole on/off sequence.
2426 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2427 */
2428 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2429 {
2430 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2431 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2432 u32 pp;
2433 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2434 bool need_to_disable = !intel_dp->want_panel_vdd;
2435
2436 lockdep_assert_held(&dev_priv->pps_mutex);
2437
2438 if (!intel_dp_is_edp(intel_dp))
2439 return false;
2440
2441 cancel_delayed_work(&intel_dp->panel_vdd_work);
2442 intel_dp->want_panel_vdd = true;
2443
2444 if (edp_have_panel_vdd(intel_dp))
2445 return need_to_disable;
2446
2447 intel_display_power_get(dev_priv,
2448 intel_aux_power_domain(intel_dig_port));
2449
2450 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2451 port_name(intel_dig_port->base.port));
2452
2453 if (!edp_have_panel_power(intel_dp))
2454 wait_panel_power_cycle(intel_dp);
2455
2456 pp = ironlake_get_pp_control(intel_dp);
2457 pp |= EDP_FORCE_VDD;
2458
2459 pp_stat_reg = _pp_stat_reg(intel_dp);
2460 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2461
2462 I915_WRITE(pp_ctrl_reg, pp);
2463 POSTING_READ(pp_ctrl_reg);
2464 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2465 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2466 /*
2467 * If the panel wasn't on, delay before accessing aux channel
2468 */
2469 if (!edp_have_panel_power(intel_dp)) {
2470 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2471 port_name(intel_dig_port->base.port));
2472 msleep(intel_dp->panel_power_up_delay);
2473 }
2474
2475 return need_to_disable;
2476 }
2477
2478 /*
2479 * Must be paired with intel_edp_panel_vdd_off() or
2480 * intel_edp_panel_off().
2481 * Nested calls to these functions are not allowed since
2482 * we drop the lock. Caller must use some higher level
2483 * locking to prevent nested calls from other threads.
2484 */
2485 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2486 {
2487 intel_wakeref_t wakeref;
2488 bool vdd;
2489
2490 if (!intel_dp_is_edp(intel_dp))
2491 return;
2492
2493 vdd = false;
2494 with_pps_lock(intel_dp, wakeref)
2495 vdd = edp_panel_vdd_on(intel_dp);
2496 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2497 port_name(dp_to_dig_port(intel_dp)->base.port));
2498 }
2499
2500 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2501 {
2502 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2503 struct intel_digital_port *intel_dig_port =
2504 dp_to_dig_port(intel_dp);
2505 u32 pp;
2506 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2507
2508 lockdep_assert_held(&dev_priv->pps_mutex);
2509
2510 WARN_ON(intel_dp->want_panel_vdd);
2511
2512 if (!edp_have_panel_vdd(intel_dp))
2513 return;
2514
2515 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2516 port_name(intel_dig_port->base.port));
2517
2518 pp = ironlake_get_pp_control(intel_dp);
2519 pp &= ~EDP_FORCE_VDD;
2520
2521 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2522 pp_stat_reg = _pp_stat_reg(intel_dp);
2523
2524 I915_WRITE(pp_ctrl_reg, pp);
2525 POSTING_READ(pp_ctrl_reg);
2526
2527 /* Make sure sequencer is idle before allowing subsequent activity */
2528 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2529 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2530
2531 if ((pp & PANEL_POWER_ON) == 0)
2532 intel_dp->panel_power_off_time = ktime_get_boottime();
2533
2534 intel_display_power_put_unchecked(dev_priv,
2535 intel_aux_power_domain(intel_dig_port));
2536 }
2537
2538 static void edp_panel_vdd_work(struct work_struct *__work)
2539 {
2540 struct intel_dp *intel_dp =
2541 container_of(to_delayed_work(__work),
2542 struct intel_dp, panel_vdd_work);
2543 intel_wakeref_t wakeref;
2544
2545 with_pps_lock(intel_dp, wakeref) {
2546 if (!intel_dp->want_panel_vdd)
2547 edp_panel_vdd_off_sync(intel_dp);
2548 }
2549 }
2550
2551 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2552 {
2553 unsigned long delay;
2554
2555 /*
2556 * Queue the timer to fire a long time from now (relative to the power
2557 * down delay) to keep the panel power up across a sequence of
2558 * operations.
2559 */
2560 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2561 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2562 }
2563
2564 /*
2565 * Must be paired with edp_panel_vdd_on().
2566 * Must hold pps_mutex around the whole on/off sequence.
2567 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2568 */
2569 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2570 {
2571 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2572
2573 lockdep_assert_held(&dev_priv->pps_mutex);
2574
2575 if (!intel_dp_is_edp(intel_dp))
2576 return;
2577
2578 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2579 port_name(dp_to_dig_port(intel_dp)->base.port));
2580
2581 intel_dp->want_panel_vdd = false;
2582
2583 if (sync)
2584 edp_panel_vdd_off_sync(intel_dp);
2585 else
2586 edp_panel_vdd_schedule_off(intel_dp);
2587 }
2588
2589 static void edp_panel_on(struct intel_dp *intel_dp)
2590 {
2591 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2592 u32 pp;
2593 i915_reg_t pp_ctrl_reg;
2594
2595 lockdep_assert_held(&dev_priv->pps_mutex);
2596
2597 if (!intel_dp_is_edp(intel_dp))
2598 return;
2599
2600 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2601 port_name(dp_to_dig_port(intel_dp)->base.port));
2602
2603 if (WARN(edp_have_panel_power(intel_dp),
2604 "eDP port %c panel power already on\n",
2605 port_name(dp_to_dig_port(intel_dp)->base.port)))
2606 return;
2607
2608 wait_panel_power_cycle(intel_dp);
2609
2610 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2611 pp = ironlake_get_pp_control(intel_dp);
2612 if (IS_GEN(dev_priv, 5)) {
2613 /* ILK workaround: disable reset around power sequence */
2614 pp &= ~PANEL_POWER_RESET;
2615 I915_WRITE(pp_ctrl_reg, pp);
2616 POSTING_READ(pp_ctrl_reg);
2617 }
2618
2619 pp |= PANEL_POWER_ON;
2620 if (!IS_GEN(dev_priv, 5))
2621 pp |= PANEL_POWER_RESET;
2622
2623 I915_WRITE(pp_ctrl_reg, pp);
2624 POSTING_READ(pp_ctrl_reg);
2625
2626 wait_panel_on(intel_dp);
2627 intel_dp->last_power_on = jiffies;
2628
2629 if (IS_GEN(dev_priv, 5)) {
2630 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2631 I915_WRITE(pp_ctrl_reg, pp);
2632 POSTING_READ(pp_ctrl_reg);
2633 }
2634 }
2635
2636 void intel_edp_panel_on(struct intel_dp *intel_dp)
2637 {
2638 intel_wakeref_t wakeref;
2639
2640 if (!intel_dp_is_edp(intel_dp))
2641 return;
2642
2643 with_pps_lock(intel_dp, wakeref)
2644 edp_panel_on(intel_dp);
2645 }
2646
2647
2648 static void edp_panel_off(struct intel_dp *intel_dp)
2649 {
2650 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2651 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2652 u32 pp;
2653 i915_reg_t pp_ctrl_reg;
2654
2655 lockdep_assert_held(&dev_priv->pps_mutex);
2656
2657 if (!intel_dp_is_edp(intel_dp))
2658 return;
2659
2660 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2661 port_name(dig_port->base.port));
2662
2663 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2664 port_name(dig_port->base.port));
2665
2666 pp = ironlake_get_pp_control(intel_dp);
2667 /* We need to switch off panel power _and_ force vdd, for otherwise some
2668 * panels get very unhappy and cease to work. */
2669 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2670 EDP_BLC_ENABLE);
2671
2672 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2673
2674 intel_dp->want_panel_vdd = false;
2675
2676 I915_WRITE(pp_ctrl_reg, pp);
2677 POSTING_READ(pp_ctrl_reg);
2678
2679 wait_panel_off(intel_dp);
2680 intel_dp->panel_power_off_time = ktime_get_boottime();
2681
2682 /* We got a reference when we enabled the VDD. */
2683 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2684 }
2685
2686 void intel_edp_panel_off(struct intel_dp *intel_dp)
2687 {
2688 intel_wakeref_t wakeref;
2689
2690 if (!intel_dp_is_edp(intel_dp))
2691 return;
2692
2693 with_pps_lock(intel_dp, wakeref)
2694 edp_panel_off(intel_dp);
2695 }
2696
2697 /* Enable backlight in the panel power control. */
2698 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2699 {
2700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2701 intel_wakeref_t wakeref;
2702
2703 /*
2704 * If we enable the backlight right away following a panel power
2705 * on, we may see slight flicker as the panel syncs with the eDP
2706 * link. So delay a bit to make sure the image is solid before
2707 * allowing it to appear.
2708 */
2709 wait_backlight_on(intel_dp);
2710
2711 with_pps_lock(intel_dp, wakeref) {
2712 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2713 u32 pp;
2714
2715 pp = ironlake_get_pp_control(intel_dp);
2716 pp |= EDP_BLC_ENABLE;
2717
2718 I915_WRITE(pp_ctrl_reg, pp);
2719 POSTING_READ(pp_ctrl_reg);
2720 }
2721 }
2722
2723 /* Enable backlight PWM and backlight PP control. */
2724 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2725 const struct drm_connector_state *conn_state)
2726 {
2727 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2728
2729 if (!intel_dp_is_edp(intel_dp))
2730 return;
2731
2732 DRM_DEBUG_KMS("\n");
2733
2734 intel_panel_enable_backlight(crtc_state, conn_state);
2735 _intel_edp_backlight_on(intel_dp);
2736 }
2737
2738 /* Disable backlight in the panel power control. */
2739 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2740 {
2741 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2742 intel_wakeref_t wakeref;
2743
2744 if (!intel_dp_is_edp(intel_dp))
2745 return;
2746
2747 with_pps_lock(intel_dp, wakeref) {
2748 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2749 u32 pp;
2750
2751 pp = ironlake_get_pp_control(intel_dp);
2752 pp &= ~EDP_BLC_ENABLE;
2753
2754 I915_WRITE(pp_ctrl_reg, pp);
2755 POSTING_READ(pp_ctrl_reg);
2756 }
2757
2758 intel_dp->last_backlight_off = jiffies;
2759 edp_wait_backlight_off(intel_dp);
2760 }
2761
2762 /* Disable backlight PP control and backlight PWM. */
2763 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2764 {
2765 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2766
2767 if (!intel_dp_is_edp(intel_dp))
2768 return;
2769
2770 DRM_DEBUG_KMS("\n");
2771
2772 _intel_edp_backlight_off(intel_dp);
2773 intel_panel_disable_backlight(old_conn_state);
2774 }
2775
2776 /*
2777 * Hook for controlling the panel power control backlight through the bl_power
2778 * sysfs attribute. Take care to handle multiple calls.
2779 */
2780 static void intel_edp_backlight_power(struct intel_connector *connector,
2781 bool enable)
2782 {
2783 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2784 intel_wakeref_t wakeref;
2785 bool is_enabled;
2786
2787 is_enabled = false;
2788 with_pps_lock(intel_dp, wakeref)
2789 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2790 if (is_enabled == enable)
2791 return;
2792
2793 DRM_DEBUG_KMS("panel power control backlight %s\n",
2794 enable ? "enable" : "disable");
2795
2796 if (enable)
2797 _intel_edp_backlight_on(intel_dp);
2798 else
2799 _intel_edp_backlight_off(intel_dp);
2800 }
2801
2802 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2803 {
2804 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2805 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2806 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2807
2808 I915_STATE_WARN(cur_state != state,
2809 "DP port %c state assertion failure (expected %s, current %s)\n",
2810 port_name(dig_port->base.port),
2811 onoff(state), onoff(cur_state));
2812 }
2813 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2814
2815 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2816 {
2817 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2818
2819 I915_STATE_WARN(cur_state != state,
2820 "eDP PLL state assertion failure (expected %s, current %s)\n",
2821 onoff(state), onoff(cur_state));
2822 }
2823 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2824 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2825
2826 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2827 const struct intel_crtc_state *pipe_config)
2828 {
2829 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2830 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2831
2832 assert_pipe_disabled(dev_priv, crtc->pipe);
2833 assert_dp_port_disabled(intel_dp);
2834 assert_edp_pll_disabled(dev_priv);
2835
2836 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2837 pipe_config->port_clock);
2838
2839 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2840
2841 if (pipe_config->port_clock == 162000)
2842 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2843 else
2844 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2845
2846 I915_WRITE(DP_A, intel_dp->DP);
2847 POSTING_READ(DP_A);
2848 udelay(500);
2849
2850 /*
2851 * [DevILK] Work around required when enabling DP PLL
2852 * while a pipe is enabled going to FDI:
2853 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2854 * 2. Program DP PLL enable
2855 */
2856 if (IS_GEN(dev_priv, 5))
2857 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2858
2859 intel_dp->DP |= DP_PLL_ENABLE;
2860
2861 I915_WRITE(DP_A, intel_dp->DP);
2862 POSTING_READ(DP_A);
2863 udelay(200);
2864 }
2865
2866 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2867 const struct intel_crtc_state *old_crtc_state)
2868 {
2869 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2870 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2871
2872 assert_pipe_disabled(dev_priv, crtc->pipe);
2873 assert_dp_port_disabled(intel_dp);
2874 assert_edp_pll_enabled(dev_priv);
2875
2876 DRM_DEBUG_KMS("disabling eDP PLL\n");
2877
2878 intel_dp->DP &= ~DP_PLL_ENABLE;
2879
2880 I915_WRITE(DP_A, intel_dp->DP);
2881 POSTING_READ(DP_A);
2882 udelay(200);
2883 }
2884
2885 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2886 {
2887 /*
2888 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2889 * be capable of signalling downstream hpd with a long pulse.
2890 * Whether or not that means D3 is safe to use is not clear,
2891 * but let's assume so until proven otherwise.
2892 *
2893 * FIXME should really check all downstream ports...
2894 */
2895 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2896 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2897 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2898 }
2899
2900 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2901 const struct intel_crtc_state *crtc_state,
2902 bool enable)
2903 {
2904 int ret;
2905
2906 if (!crtc_state->dsc_params.compression_enable)
2907 return;
2908
2909 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2910 enable ? DP_DECOMPRESSION_EN : 0);
2911 if (ret < 0)
2912 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2913 enable ? "enable" : "disable");
2914 }
2915
2916 /* If the sink supports it, try to set the power state appropriately */
2917 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2918 {
2919 int ret, i;
2920
2921 /* Should have a valid DPCD by this point */
2922 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2923 return;
2924
2925 if (mode != DRM_MODE_DPMS_ON) {
2926 if (downstream_hpd_needs_d0(intel_dp))
2927 return;
2928
2929 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2930 DP_SET_POWER_D3);
2931 } else {
2932 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2933
2934 /*
2935 * When turning on, we need to retry for 1ms to give the sink
2936 * time to wake up.
2937 */
2938 for (i = 0; i < 3; i++) {
2939 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2940 DP_SET_POWER_D0);
2941 if (ret == 1)
2942 break;
2943 msleep(1);
2944 }
2945
2946 if (ret == 1 && lspcon->active)
2947 lspcon_wait_pcon_mode(lspcon);
2948 }
2949
2950 if (ret != 1)
2951 DRM_DEBUG_KMS("failed to %s sink power state\n",
2952 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2953 }
2954
2955 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2956 enum port port, enum pipe *pipe)
2957 {
2958 enum pipe p;
2959
2960 for_each_pipe(dev_priv, p) {
2961 u32 val = I915_READ(TRANS_DP_CTL(p));
2962
2963 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2964 *pipe = p;
2965 return true;
2966 }
2967 }
2968
2969 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2970
2971 /* must initialize pipe to something for the asserts */
2972 *pipe = PIPE_A;
2973
2974 return false;
2975 }
2976
2977 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2978 i915_reg_t dp_reg, enum port port,
2979 enum pipe *pipe)
2980 {
2981 bool ret;
2982 u32 val;
2983
2984 val = I915_READ(dp_reg);
2985
2986 ret = val & DP_PORT_EN;
2987
2988 /* asserts want to know the pipe even if the port is disabled */
2989 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
2990 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
2991 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
2992 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
2993 else if (IS_CHERRYVIEW(dev_priv))
2994 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
2995 else
2996 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
2997
2998 return ret;
2999 }
3000
3001 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3002 enum pipe *pipe)
3003 {
3004 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3005 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3006 intel_wakeref_t wakeref;
3007 bool ret;
3008
3009 wakeref = intel_display_power_get_if_enabled(dev_priv,
3010 encoder->power_domain);
3011 if (!wakeref)
3012 return false;
3013
3014 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3015 encoder->port, pipe);
3016
3017 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3018
3019 return ret;
3020 }
3021
3022 static void intel_dp_get_config(struct intel_encoder *encoder,
3023 struct intel_crtc_state *pipe_config)
3024 {
3025 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3026 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3027 u32 tmp, flags = 0;
3028 enum port port = encoder->port;
3029 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3030
3031 if (encoder->type == INTEL_OUTPUT_EDP)
3032 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3033 else
3034 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3035
3036 tmp = I915_READ(intel_dp->output_reg);
3037
3038 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3039
3040 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3041 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3042
3043 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3044 flags |= DRM_MODE_FLAG_PHSYNC;
3045 else
3046 flags |= DRM_MODE_FLAG_NHSYNC;
3047
3048 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3049 flags |= DRM_MODE_FLAG_PVSYNC;
3050 else
3051 flags |= DRM_MODE_FLAG_NVSYNC;
3052 } else {
3053 if (tmp & DP_SYNC_HS_HIGH)
3054 flags |= DRM_MODE_FLAG_PHSYNC;
3055 else
3056 flags |= DRM_MODE_FLAG_NHSYNC;
3057
3058 if (tmp & DP_SYNC_VS_HIGH)
3059 flags |= DRM_MODE_FLAG_PVSYNC;
3060 else
3061 flags |= DRM_MODE_FLAG_NVSYNC;
3062 }
3063
3064 pipe_config->base.adjusted_mode.flags |= flags;
3065
3066 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3067 pipe_config->limited_color_range = true;
3068
3069 pipe_config->lane_count =
3070 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3071
3072 intel_dp_get_m_n(crtc, pipe_config);
3073
3074 if (port == PORT_A) {
3075 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3076 pipe_config->port_clock = 162000;
3077 else
3078 pipe_config->port_clock = 270000;
3079 }
3080
3081 pipe_config->base.adjusted_mode.crtc_clock =
3082 intel_dotclock_calculate(pipe_config->port_clock,
3083 &pipe_config->dp_m_n);
3084
3085 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3086 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3087 /*
3088 * This is a big fat ugly hack.
3089 *
3090 * Some machines in UEFI boot mode provide us a VBT that has 18
3091 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3092 * unknown we fail to light up. Yet the same BIOS boots up with
3093 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3094 * max, not what it tells us to use.
3095 *
3096 * Note: This will still be broken if the eDP panel is not lit
3097 * up by the BIOS, and thus we can't get the mode at module
3098 * load.
3099 */
3100 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3101 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3102 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3103 }
3104 }
3105
3106 static void intel_disable_dp(struct intel_encoder *encoder,
3107 const struct intel_crtc_state *old_crtc_state,
3108 const struct drm_connector_state *old_conn_state)
3109 {
3110 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3111
3112 intel_dp->link_trained = false;
3113
3114 if (old_crtc_state->has_audio)
3115 intel_audio_codec_disable(encoder,
3116 old_crtc_state, old_conn_state);
3117
3118 /* Make sure the panel is off before trying to change the mode. But also
3119 * ensure that we have vdd while we switch off the panel. */
3120 intel_edp_panel_vdd_on(intel_dp);
3121 intel_edp_backlight_off(old_conn_state);
3122 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3123 intel_edp_panel_off(intel_dp);
3124 }
3125
3126 static void g4x_disable_dp(struct intel_encoder *encoder,
3127 const struct intel_crtc_state *old_crtc_state,
3128 const struct drm_connector_state *old_conn_state)
3129 {
3130 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3131 }
3132
3133 static void vlv_disable_dp(struct intel_encoder *encoder,
3134 const struct intel_crtc_state *old_crtc_state,
3135 const struct drm_connector_state *old_conn_state)
3136 {
3137 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3138 }
3139
3140 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3141 const struct intel_crtc_state *old_crtc_state,
3142 const struct drm_connector_state *old_conn_state)
3143 {
3144 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3145 enum port port = encoder->port;
3146
3147 /*
3148 * Bspec does not list a specific disable sequence for g4x DP.
3149 * Follow the ilk+ sequence (disable pipe before the port) for
3150 * g4x DP as it does not suffer from underruns like the normal
3151 * g4x modeset sequence (disable pipe after the port).
3152 */
3153 intel_dp_link_down(encoder, old_crtc_state);
3154
3155 /* Only ilk+ has port A */
3156 if (port == PORT_A)
3157 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3158 }
3159
3160 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3161 const struct intel_crtc_state *old_crtc_state,
3162 const struct drm_connector_state *old_conn_state)
3163 {
3164 intel_dp_link_down(encoder, old_crtc_state);
3165 }
3166
3167 static void chv_post_disable_dp(struct intel_encoder *encoder,
3168 const struct intel_crtc_state *old_crtc_state,
3169 const struct drm_connector_state *old_conn_state)
3170 {
3171 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3172
3173 intel_dp_link_down(encoder, old_crtc_state);
3174
3175 mutex_lock(&dev_priv->sb_lock);
3176
3177 /* Assert data lane reset */
3178 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3179
3180 mutex_unlock(&dev_priv->sb_lock);
3181 }
3182
3183 static void
3184 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3185 u32 *DP,
3186 u8 dp_train_pat)
3187 {
3188 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3189 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3190 enum port port = intel_dig_port->base.port;
3191 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3192
3193 if (dp_train_pat & train_pat_mask)
3194 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3195 dp_train_pat & train_pat_mask);
3196
3197 if (HAS_DDI(dev_priv)) {
3198 u32 temp = I915_READ(DP_TP_CTL(port));
3199
3200 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3201 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3202 else
3203 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3204
3205 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3206 switch (dp_train_pat & train_pat_mask) {
3207 case DP_TRAINING_PATTERN_DISABLE:
3208 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3209
3210 break;
3211 case DP_TRAINING_PATTERN_1:
3212 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3213 break;
3214 case DP_TRAINING_PATTERN_2:
3215 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3216 break;
3217 case DP_TRAINING_PATTERN_3:
3218 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3219 break;
3220 case DP_TRAINING_PATTERN_4:
3221 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3222 break;
3223 }
3224 I915_WRITE(DP_TP_CTL(port), temp);
3225
3226 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3227 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3228 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3229
3230 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3231 case DP_TRAINING_PATTERN_DISABLE:
3232 *DP |= DP_LINK_TRAIN_OFF_CPT;
3233 break;
3234 case DP_TRAINING_PATTERN_1:
3235 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3236 break;
3237 case DP_TRAINING_PATTERN_2:
3238 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3239 break;
3240 case DP_TRAINING_PATTERN_3:
3241 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3242 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3243 break;
3244 }
3245
3246 } else {
3247 *DP &= ~DP_LINK_TRAIN_MASK;
3248
3249 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3250 case DP_TRAINING_PATTERN_DISABLE:
3251 *DP |= DP_LINK_TRAIN_OFF;
3252 break;
3253 case DP_TRAINING_PATTERN_1:
3254 *DP |= DP_LINK_TRAIN_PAT_1;
3255 break;
3256 case DP_TRAINING_PATTERN_2:
3257 *DP |= DP_LINK_TRAIN_PAT_2;
3258 break;
3259 case DP_TRAINING_PATTERN_3:
3260 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3261 *DP |= DP_LINK_TRAIN_PAT_2;
3262 break;
3263 }
3264 }
3265 }
3266
3267 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3268 const struct intel_crtc_state *old_crtc_state)
3269 {
3270 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3271
3272 /* enable with pattern 1 (as per spec) */
3273
3274 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3275
3276 /*
3277 * Magic for VLV/CHV. We _must_ first set up the register
3278 * without actually enabling the port, and then do another
3279 * write to enable the port. Otherwise link training will
3280 * fail when the power sequencer is freshly used for this port.
3281 */
3282 intel_dp->DP |= DP_PORT_EN;
3283 if (old_crtc_state->has_audio)
3284 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3285
3286 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3287 POSTING_READ(intel_dp->output_reg);
3288 }
3289
3290 static void intel_enable_dp(struct intel_encoder *encoder,
3291 const struct intel_crtc_state *pipe_config,
3292 const struct drm_connector_state *conn_state)
3293 {
3294 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3295 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3296 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3297 u32 dp_reg = I915_READ(intel_dp->output_reg);
3298 enum pipe pipe = crtc->pipe;
3299 intel_wakeref_t wakeref;
3300
3301 if (WARN_ON(dp_reg & DP_PORT_EN))
3302 return;
3303
3304 with_pps_lock(intel_dp, wakeref) {
3305 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3306 vlv_init_panel_power_sequencer(encoder, pipe_config);
3307
3308 intel_dp_enable_port(intel_dp, pipe_config);
3309
3310 edp_panel_vdd_on(intel_dp);
3311 edp_panel_on(intel_dp);
3312 edp_panel_vdd_off(intel_dp, true);
3313 }
3314
3315 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3316 unsigned int lane_mask = 0x0;
3317
3318 if (IS_CHERRYVIEW(dev_priv))
3319 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3320
3321 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3322 lane_mask);
3323 }
3324
3325 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3326 intel_dp_start_link_train(intel_dp);
3327 intel_dp_stop_link_train(intel_dp);
3328
3329 if (pipe_config->has_audio) {
3330 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3331 pipe_name(pipe));
3332 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3333 }
3334 }
3335
3336 static void g4x_enable_dp(struct intel_encoder *encoder,
3337 const struct intel_crtc_state *pipe_config,
3338 const struct drm_connector_state *conn_state)
3339 {
3340 intel_enable_dp(encoder, pipe_config, conn_state);
3341 intel_edp_backlight_on(pipe_config, conn_state);
3342 }
3343
3344 static void vlv_enable_dp(struct intel_encoder *encoder,
3345 const struct intel_crtc_state *pipe_config,
3346 const struct drm_connector_state *conn_state)
3347 {
3348 intel_edp_backlight_on(pipe_config, conn_state);
3349 }
3350
3351 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3352 const struct intel_crtc_state *pipe_config,
3353 const struct drm_connector_state *conn_state)
3354 {
3355 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3356 enum port port = encoder->port;
3357
3358 intel_dp_prepare(encoder, pipe_config);
3359
3360 /* Only ilk+ has port A */
3361 if (port == PORT_A)
3362 ironlake_edp_pll_on(intel_dp, pipe_config);
3363 }
3364
3365 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3366 {
3367 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3368 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3369 enum pipe pipe = intel_dp->pps_pipe;
3370 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3371
3372 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3373
3374 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3375 return;
3376
3377 edp_panel_vdd_off_sync(intel_dp);
3378
3379 /*
3380 * VLV seems to get confused when multiple power sequencers
3381 * have the same port selected (even if only one has power/vdd
3382 * enabled). The failure manifests as vlv_wait_port_ready() failing
3383 * CHV on the other hand doesn't seem to mind having the same port
3384 * selected in multiple power sequencers, but let's clear the
3385 * port select always when logically disconnecting a power sequencer
3386 * from a port.
3387 */
3388 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3389 pipe_name(pipe), port_name(intel_dig_port->base.port));
3390 I915_WRITE(pp_on_reg, 0);
3391 POSTING_READ(pp_on_reg);
3392
3393 intel_dp->pps_pipe = INVALID_PIPE;
3394 }
3395
3396 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3397 enum pipe pipe)
3398 {
3399 struct intel_encoder *encoder;
3400
3401 lockdep_assert_held(&dev_priv->pps_mutex);
3402
3403 for_each_intel_dp(&dev_priv->drm, encoder) {
3404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3405 enum port port = encoder->port;
3406
3407 WARN(intel_dp->active_pipe == pipe,
3408 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3409 pipe_name(pipe), port_name(port));
3410
3411 if (intel_dp->pps_pipe != pipe)
3412 continue;
3413
3414 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3415 pipe_name(pipe), port_name(port));
3416
3417 /* make sure vdd is off before we steal it */
3418 vlv_detach_power_sequencer(intel_dp);
3419 }
3420 }
3421
3422 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3423 const struct intel_crtc_state *crtc_state)
3424 {
3425 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3427 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3428
3429 lockdep_assert_held(&dev_priv->pps_mutex);
3430
3431 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3432
3433 if (intel_dp->pps_pipe != INVALID_PIPE &&
3434 intel_dp->pps_pipe != crtc->pipe) {
3435 /*
3436 * If another power sequencer was being used on this
3437 * port previously make sure to turn off vdd there while
3438 * we still have control of it.
3439 */
3440 vlv_detach_power_sequencer(intel_dp);
3441 }
3442
3443 /*
3444 * We may be stealing the power
3445 * sequencer from another port.
3446 */
3447 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3448
3449 intel_dp->active_pipe = crtc->pipe;
3450
3451 if (!intel_dp_is_edp(intel_dp))
3452 return;
3453
3454 /* now it's all ours */
3455 intel_dp->pps_pipe = crtc->pipe;
3456
3457 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3458 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3459
3460 /* init power sequencer on this pipe and port */
3461 intel_dp_init_panel_power_sequencer(intel_dp);
3462 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3463 }
3464
3465 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3466 const struct intel_crtc_state *pipe_config,
3467 const struct drm_connector_state *conn_state)
3468 {
3469 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3470
3471 intel_enable_dp(encoder, pipe_config, conn_state);
3472 }
3473
3474 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3475 const struct intel_crtc_state *pipe_config,
3476 const struct drm_connector_state *conn_state)
3477 {
3478 intel_dp_prepare(encoder, pipe_config);
3479
3480 vlv_phy_pre_pll_enable(encoder, pipe_config);
3481 }
3482
3483 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3484 const struct intel_crtc_state *pipe_config,
3485 const struct drm_connector_state *conn_state)
3486 {
3487 chv_phy_pre_encoder_enable(encoder, pipe_config);
3488
3489 intel_enable_dp(encoder, pipe_config, conn_state);
3490
3491 /* Second common lane will stay alive on its own now */
3492 chv_phy_release_cl2_override(encoder);
3493 }
3494
3495 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3496 const struct intel_crtc_state *pipe_config,
3497 const struct drm_connector_state *conn_state)
3498 {
3499 intel_dp_prepare(encoder, pipe_config);
3500
3501 chv_phy_pre_pll_enable(encoder, pipe_config);
3502 }
3503
3504 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3505 const struct intel_crtc_state *old_crtc_state,
3506 const struct drm_connector_state *old_conn_state)
3507 {
3508 chv_phy_post_pll_disable(encoder, old_crtc_state);
3509 }
3510
3511 /*
3512 * Fetch AUX CH registers 0x202 - 0x207 which contain
3513 * link status information
3514 */
3515 bool
3516 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3517 {
3518 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3519 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3520 }
3521
3522 /* These are source-specific values. */
3523 u8
3524 intel_dp_voltage_max(struct intel_dp *intel_dp)
3525 {
3526 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3527 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3528 enum port port = encoder->port;
3529
3530 if (HAS_DDI(dev_priv))
3531 return intel_ddi_dp_voltage_max(encoder);
3532 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3533 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3534 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3535 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3536 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3537 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3538 else
3539 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3540 }
3541
3542 u8
3543 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3544 {
3545 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3546 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3547 enum port port = encoder->port;
3548
3549 if (HAS_DDI(dev_priv)) {
3550 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3551 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3552 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3553 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3554 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3555 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3556 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3557 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3558 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3559 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3560 default:
3561 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3562 }
3563 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3564 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3565 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3566 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3567 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3569 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3570 default:
3571 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3572 }
3573 } else {
3574 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3575 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3576 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3577 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3578 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3579 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3580 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3581 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3582 default:
3583 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3584 }
3585 }
3586 }
3587
3588 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3589 {
3590 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3591 unsigned long demph_reg_value, preemph_reg_value,
3592 uniqtranscale_reg_value;
3593 u8 train_set = intel_dp->train_set[0];
3594
3595 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3596 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3597 preemph_reg_value = 0x0004000;
3598 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3599 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3600 demph_reg_value = 0x2B405555;
3601 uniqtranscale_reg_value = 0x552AB83A;
3602 break;
3603 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3604 demph_reg_value = 0x2B404040;
3605 uniqtranscale_reg_value = 0x5548B83A;
3606 break;
3607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3608 demph_reg_value = 0x2B245555;
3609 uniqtranscale_reg_value = 0x5560B83A;
3610 break;
3611 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3612 demph_reg_value = 0x2B405555;
3613 uniqtranscale_reg_value = 0x5598DA3A;
3614 break;
3615 default:
3616 return 0;
3617 }
3618 break;
3619 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3620 preemph_reg_value = 0x0002000;
3621 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3623 demph_reg_value = 0x2B404040;
3624 uniqtranscale_reg_value = 0x5552B83A;
3625 break;
3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3627 demph_reg_value = 0x2B404848;
3628 uniqtranscale_reg_value = 0x5580B83A;
3629 break;
3630 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3631 demph_reg_value = 0x2B404040;
3632 uniqtranscale_reg_value = 0x55ADDA3A;
3633 break;
3634 default:
3635 return 0;
3636 }
3637 break;
3638 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3639 preemph_reg_value = 0x0000000;
3640 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3642 demph_reg_value = 0x2B305555;
3643 uniqtranscale_reg_value = 0x5570B83A;
3644 break;
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3646 demph_reg_value = 0x2B2B4040;
3647 uniqtranscale_reg_value = 0x55ADDA3A;
3648 break;
3649 default:
3650 return 0;
3651 }
3652 break;
3653 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3654 preemph_reg_value = 0x0006000;
3655 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3657 demph_reg_value = 0x1B405555;
3658 uniqtranscale_reg_value = 0x55ADDA3A;
3659 break;
3660 default:
3661 return 0;
3662 }
3663 break;
3664 default:
3665 return 0;
3666 }
3667
3668 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3669 uniqtranscale_reg_value, 0);
3670
3671 return 0;
3672 }
3673
3674 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3675 {
3676 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3677 u32 deemph_reg_value, margin_reg_value;
3678 bool uniq_trans_scale = false;
3679 u8 train_set = intel_dp->train_set[0];
3680
3681 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3682 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3683 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3685 deemph_reg_value = 128;
3686 margin_reg_value = 52;
3687 break;
3688 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3689 deemph_reg_value = 128;
3690 margin_reg_value = 77;
3691 break;
3692 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3693 deemph_reg_value = 128;
3694 margin_reg_value = 102;
3695 break;
3696 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3697 deemph_reg_value = 128;
3698 margin_reg_value = 154;
3699 uniq_trans_scale = true;
3700 break;
3701 default:
3702 return 0;
3703 }
3704 break;
3705 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3706 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3707 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3708 deemph_reg_value = 85;
3709 margin_reg_value = 78;
3710 break;
3711 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3712 deemph_reg_value = 85;
3713 margin_reg_value = 116;
3714 break;
3715 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3716 deemph_reg_value = 85;
3717 margin_reg_value = 154;
3718 break;
3719 default:
3720 return 0;
3721 }
3722 break;
3723 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3724 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3725 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3726 deemph_reg_value = 64;
3727 margin_reg_value = 104;
3728 break;
3729 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3730 deemph_reg_value = 64;
3731 margin_reg_value = 154;
3732 break;
3733 default:
3734 return 0;
3735 }
3736 break;
3737 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3738 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3739 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3740 deemph_reg_value = 43;
3741 margin_reg_value = 154;
3742 break;
3743 default:
3744 return 0;
3745 }
3746 break;
3747 default:
3748 return 0;
3749 }
3750
3751 chv_set_phy_signal_level(encoder, deemph_reg_value,
3752 margin_reg_value, uniq_trans_scale);
3753
3754 return 0;
3755 }
3756
3757 static u32
3758 g4x_signal_levels(u8 train_set)
3759 {
3760 u32 signal_levels = 0;
3761
3762 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3763 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3764 default:
3765 signal_levels |= DP_VOLTAGE_0_4;
3766 break;
3767 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3768 signal_levels |= DP_VOLTAGE_0_6;
3769 break;
3770 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3771 signal_levels |= DP_VOLTAGE_0_8;
3772 break;
3773 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3774 signal_levels |= DP_VOLTAGE_1_2;
3775 break;
3776 }
3777 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3778 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3779 default:
3780 signal_levels |= DP_PRE_EMPHASIS_0;
3781 break;
3782 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3783 signal_levels |= DP_PRE_EMPHASIS_3_5;
3784 break;
3785 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3786 signal_levels |= DP_PRE_EMPHASIS_6;
3787 break;
3788 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3789 signal_levels |= DP_PRE_EMPHASIS_9_5;
3790 break;
3791 }
3792 return signal_levels;
3793 }
3794
3795 /* SNB CPU eDP voltage swing and pre-emphasis control */
3796 static u32
3797 snb_cpu_edp_signal_levels(u8 train_set)
3798 {
3799 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3800 DP_TRAIN_PRE_EMPHASIS_MASK);
3801 switch (signal_levels) {
3802 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3803 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3804 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3805 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3806 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3807 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3808 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3809 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3810 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3811 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3812 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3813 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3814 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3815 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3816 default:
3817 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3818 "0x%x\n", signal_levels);
3819 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3820 }
3821 }
3822
3823 /* IVB CPU eDP voltage swing and pre-emphasis control */
3824 static u32
3825 ivb_cpu_edp_signal_levels(u8 train_set)
3826 {
3827 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3828 DP_TRAIN_PRE_EMPHASIS_MASK);
3829 switch (signal_levels) {
3830 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3831 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3832 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3833 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3834 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3835 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3836
3837 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3838 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3839 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3840 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3841
3842 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3843 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3844 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3845 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3846
3847 default:
3848 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3849 "0x%x\n", signal_levels);
3850 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3851 }
3852 }
3853
3854 void
3855 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3856 {
3857 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3858 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3859 enum port port = intel_dig_port->base.port;
3860 u32 signal_levels, mask = 0;
3861 u8 train_set = intel_dp->train_set[0];
3862
3863 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3864 signal_levels = bxt_signal_levels(intel_dp);
3865 } else if (HAS_DDI(dev_priv)) {
3866 signal_levels = ddi_signal_levels(intel_dp);
3867 mask = DDI_BUF_EMP_MASK;
3868 } else if (IS_CHERRYVIEW(dev_priv)) {
3869 signal_levels = chv_signal_levels(intel_dp);
3870 } else if (IS_VALLEYVIEW(dev_priv)) {
3871 signal_levels = vlv_signal_levels(intel_dp);
3872 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3873 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3874 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3875 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3876 signal_levels = snb_cpu_edp_signal_levels(train_set);
3877 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3878 } else {
3879 signal_levels = g4x_signal_levels(train_set);
3880 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3881 }
3882
3883 if (mask)
3884 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3885
3886 DRM_DEBUG_KMS("Using vswing level %d\n",
3887 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3888 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3889 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3890 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3891
3892 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3893
3894 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3895 POSTING_READ(intel_dp->output_reg);
3896 }
3897
3898 void
3899 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3900 u8 dp_train_pat)
3901 {
3902 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3903 struct drm_i915_private *dev_priv =
3904 to_i915(intel_dig_port->base.base.dev);
3905
3906 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3907
3908 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3909 POSTING_READ(intel_dp->output_reg);
3910 }
3911
3912 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3913 {
3914 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3915 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3916 enum port port = intel_dig_port->base.port;
3917 u32 val;
3918
3919 if (!HAS_DDI(dev_priv))
3920 return;
3921
3922 val = I915_READ(DP_TP_CTL(port));
3923 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3924 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3925 I915_WRITE(DP_TP_CTL(port), val);
3926
3927 /*
3928 * On PORT_A we can have only eDP in SST mode. There the only reason
3929 * we need to set idle transmission mode is to work around a HW issue
3930 * where we enable the pipe while not in idle link-training mode.
3931 * In this case there is requirement to wait for a minimum number of
3932 * idle patterns to be sent.
3933 */
3934 if (port == PORT_A)
3935 return;
3936
3937 if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3938 DP_TP_STATUS_IDLE_DONE,
3939 DP_TP_STATUS_IDLE_DONE,
3940 1))
3941 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3942 }
3943
3944 static void
3945 intel_dp_link_down(struct intel_encoder *encoder,
3946 const struct intel_crtc_state *old_crtc_state)
3947 {
3948 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3949 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3950 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3951 enum port port = encoder->port;
3952 u32 DP = intel_dp->DP;
3953
3954 if (WARN_ON(HAS_DDI(dev_priv)))
3955 return;
3956
3957 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3958 return;
3959
3960 DRM_DEBUG_KMS("\n");
3961
3962 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3963 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3964 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3965 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3966 } else {
3967 DP &= ~DP_LINK_TRAIN_MASK;
3968 DP |= DP_LINK_TRAIN_PAT_IDLE;
3969 }
3970 I915_WRITE(intel_dp->output_reg, DP);
3971 POSTING_READ(intel_dp->output_reg);
3972
3973 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3974 I915_WRITE(intel_dp->output_reg, DP);
3975 POSTING_READ(intel_dp->output_reg);
3976
3977 /*
3978 * HW workaround for IBX, we need to move the port
3979 * to transcoder A after disabling it to allow the
3980 * matching HDMI port to be enabled on transcoder A.
3981 */
3982 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3983 /*
3984 * We get CPU/PCH FIFO underruns on the other pipe when
3985 * doing the workaround. Sweep them under the rug.
3986 */
3987 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3988 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3989
3990 /* always enable with pattern 1 (as per spec) */
3991 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
3992 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
3993 DP_LINK_TRAIN_PAT_1;
3994 I915_WRITE(intel_dp->output_reg, DP);
3995 POSTING_READ(intel_dp->output_reg);
3996
3997 DP &= ~DP_PORT_EN;
3998 I915_WRITE(intel_dp->output_reg, DP);
3999 POSTING_READ(intel_dp->output_reg);
4000
4001 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4002 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4003 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4004 }
4005
4006 msleep(intel_dp->panel_power_down_delay);
4007
4008 intel_dp->DP = DP;
4009
4010 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4011 intel_wakeref_t wakeref;
4012
4013 with_pps_lock(intel_dp, wakeref)
4014 intel_dp->active_pipe = INVALID_PIPE;
4015 }
4016 }
4017
4018 static void
4019 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4020 {
4021 u8 dpcd_ext[6];
4022
4023 /*
4024 * Prior to DP1.3 the bit represented by
4025 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4026 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4027 * the true capability of the panel. The only way to check is to
4028 * then compare 0000h and 2200h.
4029 */
4030 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4031 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4032 return;
4033
4034 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4035 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4036 DRM_ERROR("DPCD failed read at extended capabilities\n");
4037 return;
4038 }
4039
4040 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4041 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4042 return;
4043 }
4044
4045 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4046 return;
4047
4048 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4049 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4050
4051 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4052 }
4053
4054 bool
4055 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4056 {
4057 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4058 sizeof(intel_dp->dpcd)) < 0)
4059 return false; /* aux transfer failed */
4060
4061 intel_dp_extended_receiver_capabilities(intel_dp);
4062
4063 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4064
4065 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4066 }
4067
4068 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4069 {
4070 /*
4071 * Clear the cached register set to avoid using stale values
4072 * for the sinks that do not support DSC.
4073 */
4074 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4075
4076 /* Clear fec_capable to avoid using stale values */
4077 intel_dp->fec_capable = 0;
4078
4079 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4080 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4081 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4082 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4083 intel_dp->dsc_dpcd,
4084 sizeof(intel_dp->dsc_dpcd)) < 0)
4085 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4086 DP_DSC_SUPPORT);
4087
4088 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4089 (int)sizeof(intel_dp->dsc_dpcd),
4090 intel_dp->dsc_dpcd);
4091
4092 /* FEC is supported only on DP 1.4 */
4093 if (!intel_dp_is_edp(intel_dp) &&
4094 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4095 &intel_dp->fec_capable) < 0)
4096 DRM_ERROR("Failed to read FEC DPCD register\n");
4097
4098 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4099 }
4100 }
4101
4102 static bool
4103 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4104 {
4105 struct drm_i915_private *dev_priv =
4106 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4107
4108 /* this function is meant to be called only once */
4109 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4110
4111 if (!intel_dp_read_dpcd(intel_dp))
4112 return false;
4113
4114 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4115 drm_dp_is_branch(intel_dp->dpcd));
4116
4117 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4118 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4119 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4120
4121 /*
4122 * Read the eDP display control registers.
4123 *
4124 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4125 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4126 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4127 * method). The display control registers should read zero if they're
4128 * not supported anyway.
4129 */
4130 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4131 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4132 sizeof(intel_dp->edp_dpcd))
4133 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4134 intel_dp->edp_dpcd);
4135
4136 /*
4137 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4138 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4139 */
4140 intel_psr_init_dpcd(intel_dp);
4141
4142 /* Read the eDP 1.4+ supported link rates. */
4143 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4144 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4145 int i;
4146
4147 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4148 sink_rates, sizeof(sink_rates));
4149
4150 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4151 int val = le16_to_cpu(sink_rates[i]);
4152
4153 if (val == 0)
4154 break;
4155
4156 /* Value read multiplied by 200kHz gives the per-lane
4157 * link rate in kHz. The source rates are, however,
4158 * stored in terms of LS_Clk kHz. The full conversion
4159 * back to symbols is
4160 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4161 */
4162 intel_dp->sink_rates[i] = (val * 200) / 10;
4163 }
4164 intel_dp->num_sink_rates = i;
4165 }
4166
4167 /*
4168 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4169 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4170 */
4171 if (intel_dp->num_sink_rates)
4172 intel_dp->use_rate_select = true;
4173 else
4174 intel_dp_set_sink_rates(intel_dp);
4175
4176 intel_dp_set_common_rates(intel_dp);
4177
4178 /* Read the eDP DSC DPCD registers */
4179 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4180 intel_dp_get_dsc_sink_cap(intel_dp);
4181
4182 return true;
4183 }
4184
4185
4186 static bool
4187 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4188 {
4189 if (!intel_dp_read_dpcd(intel_dp))
4190 return false;
4191
4192 /* Don't clobber cached eDP rates. */
4193 if (!intel_dp_is_edp(intel_dp)) {
4194 intel_dp_set_sink_rates(intel_dp);
4195 intel_dp_set_common_rates(intel_dp);
4196 }
4197
4198 /*
4199 * Some eDP panels do not set a valid value for sink count, that is why
4200 * it don't care about read it here and in intel_edp_init_dpcd().
4201 */
4202 if (!intel_dp_is_edp(intel_dp)) {
4203 u8 count;
4204 ssize_t r;
4205
4206 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4207 if (r < 1)
4208 return false;
4209
4210 /*
4211 * Sink count can change between short pulse hpd hence
4212 * a member variable in intel_dp will track any changes
4213 * between short pulse interrupts.
4214 */
4215 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4216
4217 /*
4218 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4219 * a dongle is present but no display. Unless we require to know
4220 * if a dongle is present or not, we don't need to update
4221 * downstream port information. So, an early return here saves
4222 * time from performing other operations which are not required.
4223 */
4224 if (!intel_dp->sink_count)
4225 return false;
4226 }
4227
4228 if (!drm_dp_is_branch(intel_dp->dpcd))
4229 return true; /* native DP sink */
4230
4231 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4232 return true; /* no per-port downstream info */
4233
4234 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4235 intel_dp->downstream_ports,
4236 DP_MAX_DOWNSTREAM_PORTS) < 0)
4237 return false; /* downstream port status fetch failed */
4238
4239 return true;
4240 }
4241
4242 static bool
4243 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4244 {
4245 u8 mstm_cap;
4246
4247 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4248 return false;
4249
4250 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4251 return false;
4252
4253 return mstm_cap & DP_MST_CAP;
4254 }
4255
4256 static bool
4257 intel_dp_can_mst(struct intel_dp *intel_dp)
4258 {
4259 return i915_modparams.enable_dp_mst &&
4260 intel_dp->can_mst &&
4261 intel_dp_sink_can_mst(intel_dp);
4262 }
4263
4264 static void
4265 intel_dp_configure_mst(struct intel_dp *intel_dp)
4266 {
4267 struct intel_encoder *encoder =
4268 &dp_to_dig_port(intel_dp)->base;
4269 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4270
4271 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4272 port_name(encoder->port), yesno(intel_dp->can_mst),
4273 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4274
4275 if (!intel_dp->can_mst)
4276 return;
4277
4278 intel_dp->is_mst = sink_can_mst &&
4279 i915_modparams.enable_dp_mst;
4280
4281 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4282 intel_dp->is_mst);
4283 }
4284
4285 static bool
4286 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4287 {
4288 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4289 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4290 DP_DPRX_ESI_LEN;
4291 }
4292
4293 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4294 int mode_clock, int mode_hdisplay)
4295 {
4296 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4297 int i;
4298
4299 /*
4300 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4301 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4302 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4303 * for MST -> TimeSlotsPerMTP has to be calculated
4304 */
4305 bits_per_pixel = (link_clock * lane_count * 8 *
4306 DP_DSC_FEC_OVERHEAD_FACTOR) /
4307 mode_clock;
4308
4309 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4310 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4311 mode_hdisplay;
4312
4313 /*
4314 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4315 * check, output bpp from small joiner RAM check)
4316 */
4317 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4318
4319 /* Error out if the max bpp is less than smallest allowed valid bpp */
4320 if (bits_per_pixel < valid_dsc_bpp[0]) {
4321 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4322 return 0;
4323 }
4324
4325 /* Find the nearest match in the array of known BPPs from VESA */
4326 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4327 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4328 break;
4329 }
4330 bits_per_pixel = valid_dsc_bpp[i];
4331
4332 /*
4333 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4334 * fractional part is 0
4335 */
4336 return bits_per_pixel << 4;
4337 }
4338
4339 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4340 int mode_clock,
4341 int mode_hdisplay)
4342 {
4343 u8 min_slice_count, i;
4344 int max_slice_width;
4345
4346 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4347 min_slice_count = DIV_ROUND_UP(mode_clock,
4348 DP_DSC_MAX_ENC_THROUGHPUT_0);
4349 else
4350 min_slice_count = DIV_ROUND_UP(mode_clock,
4351 DP_DSC_MAX_ENC_THROUGHPUT_1);
4352
4353 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4354 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4355 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4356 max_slice_width);
4357 return 0;
4358 }
4359 /* Also take into account max slice width */
4360 min_slice_count = min_t(u8, min_slice_count,
4361 DIV_ROUND_UP(mode_hdisplay,
4362 max_slice_width));
4363
4364 /* Find the closest match to the valid slice count values */
4365 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4366 if (valid_dsc_slicecount[i] >
4367 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4368 false))
4369 break;
4370 if (min_slice_count <= valid_dsc_slicecount[i])
4371 return valid_dsc_slicecount[i];
4372 }
4373
4374 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4375 return 0;
4376 }
4377
4378 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4379 {
4380 int status = 0;
4381 int test_link_rate;
4382 u8 test_lane_count, test_link_bw;
4383 /* (DP CTS 1.2)
4384 * 4.3.1.11
4385 */
4386 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4387 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4388 &test_lane_count);
4389
4390 if (status <= 0) {
4391 DRM_DEBUG_KMS("Lane count read failed\n");
4392 return DP_TEST_NAK;
4393 }
4394 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4395
4396 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4397 &test_link_bw);
4398 if (status <= 0) {
4399 DRM_DEBUG_KMS("Link Rate read failed\n");
4400 return DP_TEST_NAK;
4401 }
4402 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4403
4404 /* Validate the requested link rate and lane count */
4405 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4406 test_lane_count))
4407 return DP_TEST_NAK;
4408
4409 intel_dp->compliance.test_lane_count = test_lane_count;
4410 intel_dp->compliance.test_link_rate = test_link_rate;
4411
4412 return DP_TEST_ACK;
4413 }
4414
4415 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4416 {
4417 u8 test_pattern;
4418 u8 test_misc;
4419 __be16 h_width, v_height;
4420 int status = 0;
4421
4422 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4423 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4424 &test_pattern);
4425 if (status <= 0) {
4426 DRM_DEBUG_KMS("Test pattern read failed\n");
4427 return DP_TEST_NAK;
4428 }
4429 if (test_pattern != DP_COLOR_RAMP)
4430 return DP_TEST_NAK;
4431
4432 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4433 &h_width, 2);
4434 if (status <= 0) {
4435 DRM_DEBUG_KMS("H Width read failed\n");
4436 return DP_TEST_NAK;
4437 }
4438
4439 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4440 &v_height, 2);
4441 if (status <= 0) {
4442 DRM_DEBUG_KMS("V Height read failed\n");
4443 return DP_TEST_NAK;
4444 }
4445
4446 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4447 &test_misc);
4448 if (status <= 0) {
4449 DRM_DEBUG_KMS("TEST MISC read failed\n");
4450 return DP_TEST_NAK;
4451 }
4452 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4453 return DP_TEST_NAK;
4454 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4455 return DP_TEST_NAK;
4456 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4457 case DP_TEST_BIT_DEPTH_6:
4458 intel_dp->compliance.test_data.bpc = 6;
4459 break;
4460 case DP_TEST_BIT_DEPTH_8:
4461 intel_dp->compliance.test_data.bpc = 8;
4462 break;
4463 default:
4464 return DP_TEST_NAK;
4465 }
4466
4467 intel_dp->compliance.test_data.video_pattern = test_pattern;
4468 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4469 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4470 /* Set test active flag here so userspace doesn't interrupt things */
4471 intel_dp->compliance.test_active = 1;
4472
4473 return DP_TEST_ACK;
4474 }
4475
4476 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4477 {
4478 u8 test_result = DP_TEST_ACK;
4479 struct intel_connector *intel_connector = intel_dp->attached_connector;
4480 struct drm_connector *connector = &intel_connector->base;
4481
4482 if (intel_connector->detect_edid == NULL ||
4483 connector->edid_corrupt ||
4484 intel_dp->aux.i2c_defer_count > 6) {
4485 /* Check EDID read for NACKs, DEFERs and corruption
4486 * (DP CTS 1.2 Core r1.1)
4487 * 4.2.2.4 : Failed EDID read, I2C_NAK
4488 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4489 * 4.2.2.6 : EDID corruption detected
4490 * Use failsafe mode for all cases
4491 */
4492 if (intel_dp->aux.i2c_nack_count > 0 ||
4493 intel_dp->aux.i2c_defer_count > 0)
4494 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4495 intel_dp->aux.i2c_nack_count,
4496 intel_dp->aux.i2c_defer_count);
4497 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4498 } else {
4499 struct edid *block = intel_connector->detect_edid;
4500
4501 /* We have to write the checksum
4502 * of the last block read
4503 */
4504 block += intel_connector->detect_edid->extensions;
4505
4506 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4507 block->checksum) <= 0)
4508 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4509
4510 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4511 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4512 }
4513
4514 /* Set test active flag here so userspace doesn't interrupt things */
4515 intel_dp->compliance.test_active = 1;
4516
4517 return test_result;
4518 }
4519
4520 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4521 {
4522 u8 test_result = DP_TEST_NAK;
4523 return test_result;
4524 }
4525
4526 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4527 {
4528 u8 response = DP_TEST_NAK;
4529 u8 request = 0;
4530 int status;
4531
4532 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4533 if (status <= 0) {
4534 DRM_DEBUG_KMS("Could not read test request from sink\n");
4535 goto update_status;
4536 }
4537
4538 switch (request) {
4539 case DP_TEST_LINK_TRAINING:
4540 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4541 response = intel_dp_autotest_link_training(intel_dp);
4542 break;
4543 case DP_TEST_LINK_VIDEO_PATTERN:
4544 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4545 response = intel_dp_autotest_video_pattern(intel_dp);
4546 break;
4547 case DP_TEST_LINK_EDID_READ:
4548 DRM_DEBUG_KMS("EDID test requested\n");
4549 response = intel_dp_autotest_edid(intel_dp);
4550 break;
4551 case DP_TEST_LINK_PHY_TEST_PATTERN:
4552 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4553 response = intel_dp_autotest_phy_pattern(intel_dp);
4554 break;
4555 default:
4556 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4557 break;
4558 }
4559
4560 if (response & DP_TEST_ACK)
4561 intel_dp->compliance.test_type = request;
4562
4563 update_status:
4564 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4565 if (status <= 0)
4566 DRM_DEBUG_KMS("Could not write test response to sink\n");
4567 }
4568
4569 static int
4570 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4571 {
4572 bool bret;
4573
4574 if (intel_dp->is_mst) {
4575 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4576 int ret = 0;
4577 int retry;
4578 bool handled;
4579
4580 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4581 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4582 go_again:
4583 if (bret == true) {
4584
4585 /* check link status - esi[10] = 0x200c */
4586 if (intel_dp->active_mst_links > 0 &&
4587 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4588 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4589 intel_dp_start_link_train(intel_dp);
4590 intel_dp_stop_link_train(intel_dp);
4591 }
4592
4593 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4594 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4595
4596 if (handled) {
4597 for (retry = 0; retry < 3; retry++) {
4598 int wret;
4599 wret = drm_dp_dpcd_write(&intel_dp->aux,
4600 DP_SINK_COUNT_ESI+1,
4601 &esi[1], 3);
4602 if (wret == 3) {
4603 break;
4604 }
4605 }
4606
4607 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4608 if (bret == true) {
4609 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4610 goto go_again;
4611 }
4612 } else
4613 ret = 0;
4614
4615 return ret;
4616 } else {
4617 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4618 intel_dp->is_mst = false;
4619 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4620 intel_dp->is_mst);
4621 }
4622 }
4623 return -EINVAL;
4624 }
4625
4626 static bool
4627 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4628 {
4629 u8 link_status[DP_LINK_STATUS_SIZE];
4630
4631 if (!intel_dp->link_trained)
4632 return false;
4633
4634 /*
4635 * While PSR source HW is enabled, it will control main-link sending
4636 * frames, enabling and disabling it so trying to do a retrain will fail
4637 * as the link would or not be on or it could mix training patterns
4638 * and frame data at the same time causing retrain to fail.
4639 * Also when exiting PSR, HW will retrain the link anyways fixing
4640 * any link status error.
4641 */
4642 if (intel_psr_enabled(intel_dp))
4643 return false;
4644
4645 if (!intel_dp_get_link_status(intel_dp, link_status))
4646 return false;
4647
4648 /*
4649 * Validate the cached values of intel_dp->link_rate and
4650 * intel_dp->lane_count before attempting to retrain.
4651 */
4652 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4653 intel_dp->lane_count))
4654 return false;
4655
4656 /* Retrain if Channel EQ or CR not ok */
4657 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4658 }
4659
4660 int intel_dp_retrain_link(struct intel_encoder *encoder,
4661 struct drm_modeset_acquire_ctx *ctx)
4662 {
4663 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4664 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4665 struct intel_connector *connector = intel_dp->attached_connector;
4666 struct drm_connector_state *conn_state;
4667 struct intel_crtc_state *crtc_state;
4668 struct intel_crtc *crtc;
4669 int ret;
4670
4671 /* FIXME handle the MST connectors as well */
4672
4673 if (!connector || connector->base.status != connector_status_connected)
4674 return 0;
4675
4676 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4677 ctx);
4678 if (ret)
4679 return ret;
4680
4681 conn_state = connector->base.state;
4682
4683 crtc = to_intel_crtc(conn_state->crtc);
4684 if (!crtc)
4685 return 0;
4686
4687 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4688 if (ret)
4689 return ret;
4690
4691 crtc_state = to_intel_crtc_state(crtc->base.state);
4692
4693 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4694
4695 if (!crtc_state->base.active)
4696 return 0;
4697
4698 if (conn_state->commit &&
4699 !try_wait_for_completion(&conn_state->commit->hw_done))
4700 return 0;
4701
4702 if (!intel_dp_needs_link_retrain(intel_dp))
4703 return 0;
4704
4705 /* Suppress underruns caused by re-training */
4706 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4707 if (crtc_state->has_pch_encoder)
4708 intel_set_pch_fifo_underrun_reporting(dev_priv,
4709 intel_crtc_pch_transcoder(crtc), false);
4710
4711 intel_dp_start_link_train(intel_dp);
4712 intel_dp_stop_link_train(intel_dp);
4713
4714 /* Keep underrun reporting disabled until things are stable */
4715 intel_wait_for_vblank(dev_priv, crtc->pipe);
4716
4717 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4718 if (crtc_state->has_pch_encoder)
4719 intel_set_pch_fifo_underrun_reporting(dev_priv,
4720 intel_crtc_pch_transcoder(crtc), true);
4721
4722 return 0;
4723 }
4724
4725 /*
4726 * If display is now connected check links status,
4727 * there has been known issues of link loss triggering
4728 * long pulse.
4729 *
4730 * Some sinks (eg. ASUS PB287Q) seem to perform some
4731 * weird HPD ping pong during modesets. So we can apparently
4732 * end up with HPD going low during a modeset, and then
4733 * going back up soon after. And once that happens we must
4734 * retrain the link to get a picture. That's in case no
4735 * userspace component reacted to intermittent HPD dip.
4736 */
4737 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4738 struct intel_connector *connector)
4739 {
4740 struct drm_modeset_acquire_ctx ctx;
4741 bool changed;
4742 int ret;
4743
4744 changed = intel_encoder_hotplug(encoder, connector);
4745
4746 drm_modeset_acquire_init(&ctx, 0);
4747
4748 for (;;) {
4749 ret = intel_dp_retrain_link(encoder, &ctx);
4750
4751 if (ret == -EDEADLK) {
4752 drm_modeset_backoff(&ctx);
4753 continue;
4754 }
4755
4756 break;
4757 }
4758
4759 drm_modeset_drop_locks(&ctx);
4760 drm_modeset_acquire_fini(&ctx);
4761 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4762
4763 return changed;
4764 }
4765
4766 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4767 {
4768 u8 val;
4769
4770 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4771 return;
4772
4773 if (drm_dp_dpcd_readb(&intel_dp->aux,
4774 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4775 return;
4776
4777 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4778
4779 if (val & DP_AUTOMATED_TEST_REQUEST)
4780 intel_dp_handle_test_request(intel_dp);
4781
4782 if (val & DP_CP_IRQ)
4783 intel_hdcp_check_link(intel_dp->attached_connector);
4784
4785 if (val & DP_SINK_SPECIFIC_IRQ)
4786 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4787 }
4788
4789 /*
4790 * According to DP spec
4791 * 5.1.2:
4792 * 1. Read DPCD
4793 * 2. Configure link according to Receiver Capabilities
4794 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4795 * 4. Check link status on receipt of hot-plug interrupt
4796 *
4797 * intel_dp_short_pulse - handles short pulse interrupts
4798 * when full detection is not required.
4799 * Returns %true if short pulse is handled and full detection
4800 * is NOT required and %false otherwise.
4801 */
4802 static bool
4803 intel_dp_short_pulse(struct intel_dp *intel_dp)
4804 {
4805 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4806 u8 old_sink_count = intel_dp->sink_count;
4807 bool ret;
4808
4809 /*
4810 * Clearing compliance test variables to allow capturing
4811 * of values for next automated test request.
4812 */
4813 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4814
4815 /*
4816 * Now read the DPCD to see if it's actually running
4817 * If the current value of sink count doesn't match with
4818 * the value that was stored earlier or dpcd read failed
4819 * we need to do full detection
4820 */
4821 ret = intel_dp_get_dpcd(intel_dp);
4822
4823 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4824 /* No need to proceed if we are going to do full detect */
4825 return false;
4826 }
4827
4828 intel_dp_check_service_irq(intel_dp);
4829
4830 /* Handle CEC interrupts, if any */
4831 drm_dp_cec_irq(&intel_dp->aux);
4832
4833 /* defer to the hotplug work for link retraining if needed */
4834 if (intel_dp_needs_link_retrain(intel_dp))
4835 return false;
4836
4837 intel_psr_short_pulse(intel_dp);
4838
4839 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4840 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4841 /* Send a Hotplug Uevent to userspace to start modeset */
4842 drm_kms_helper_hotplug_event(&dev_priv->drm);
4843 }
4844
4845 return true;
4846 }
4847
4848 /* XXX this is probably wrong for multiple downstream ports */
4849 static enum drm_connector_status
4850 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4851 {
4852 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4853 u8 *dpcd = intel_dp->dpcd;
4854 u8 type;
4855
4856 if (lspcon->active)
4857 lspcon_resume(lspcon);
4858
4859 if (!intel_dp_get_dpcd(intel_dp))
4860 return connector_status_disconnected;
4861
4862 if (intel_dp_is_edp(intel_dp))
4863 return connector_status_connected;
4864
4865 /* if there's no downstream port, we're done */
4866 if (!drm_dp_is_branch(dpcd))
4867 return connector_status_connected;
4868
4869 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4870 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4871 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4872
4873 return intel_dp->sink_count ?
4874 connector_status_connected : connector_status_disconnected;
4875 }
4876
4877 if (intel_dp_can_mst(intel_dp))
4878 return connector_status_connected;
4879
4880 /* If no HPD, poke DDC gently */
4881 if (drm_probe_ddc(&intel_dp->aux.ddc))
4882 return connector_status_connected;
4883
4884 /* Well we tried, say unknown for unreliable port types */
4885 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4886 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4887 if (type == DP_DS_PORT_TYPE_VGA ||
4888 type == DP_DS_PORT_TYPE_NON_EDID)
4889 return connector_status_unknown;
4890 } else {
4891 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4892 DP_DWN_STRM_PORT_TYPE_MASK;
4893 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4894 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4895 return connector_status_unknown;
4896 }
4897
4898 /* Anything else is out of spec, warn and ignore */
4899 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4900 return connector_status_disconnected;
4901 }
4902
4903 static enum drm_connector_status
4904 edp_detect(struct intel_dp *intel_dp)
4905 {
4906 return connector_status_connected;
4907 }
4908
4909 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4910 {
4911 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4912 u32 bit;
4913
4914 switch (encoder->hpd_pin) {
4915 case HPD_PORT_B:
4916 bit = SDE_PORTB_HOTPLUG;
4917 break;
4918 case HPD_PORT_C:
4919 bit = SDE_PORTC_HOTPLUG;
4920 break;
4921 case HPD_PORT_D:
4922 bit = SDE_PORTD_HOTPLUG;
4923 break;
4924 default:
4925 MISSING_CASE(encoder->hpd_pin);
4926 return false;
4927 }
4928
4929 return I915_READ(SDEISR) & bit;
4930 }
4931
4932 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
4933 {
4934 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4935 u32 bit;
4936
4937 switch (encoder->hpd_pin) {
4938 case HPD_PORT_B:
4939 bit = SDE_PORTB_HOTPLUG_CPT;
4940 break;
4941 case HPD_PORT_C:
4942 bit = SDE_PORTC_HOTPLUG_CPT;
4943 break;
4944 case HPD_PORT_D:
4945 bit = SDE_PORTD_HOTPLUG_CPT;
4946 break;
4947 default:
4948 MISSING_CASE(encoder->hpd_pin);
4949 return false;
4950 }
4951
4952 return I915_READ(SDEISR) & bit;
4953 }
4954
4955 static bool spt_digital_port_connected(struct intel_encoder *encoder)
4956 {
4957 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4958 u32 bit;
4959
4960 switch (encoder->hpd_pin) {
4961 case HPD_PORT_A:
4962 bit = SDE_PORTA_HOTPLUG_SPT;
4963 break;
4964 case HPD_PORT_E:
4965 bit = SDE_PORTE_HOTPLUG_SPT;
4966 break;
4967 default:
4968 return cpt_digital_port_connected(encoder);
4969 }
4970
4971 return I915_READ(SDEISR) & bit;
4972 }
4973
4974 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4975 {
4976 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4977 u32 bit;
4978
4979 switch (encoder->hpd_pin) {
4980 case HPD_PORT_B:
4981 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4982 break;
4983 case HPD_PORT_C:
4984 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4985 break;
4986 case HPD_PORT_D:
4987 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4988 break;
4989 default:
4990 MISSING_CASE(encoder->hpd_pin);
4991 return false;
4992 }
4993
4994 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4995 }
4996
4997 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
4998 {
4999 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5000 u32 bit;
5001
5002 switch (encoder->hpd_pin) {
5003 case HPD_PORT_B:
5004 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5005 break;
5006 case HPD_PORT_C:
5007 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5008 break;
5009 case HPD_PORT_D:
5010 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5011 break;
5012 default:
5013 MISSING_CASE(encoder->hpd_pin);
5014 return false;
5015 }
5016
5017 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5018 }
5019
5020 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5021 {
5022 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5023
5024 if (encoder->hpd_pin == HPD_PORT_A)
5025 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5026 else
5027 return ibx_digital_port_connected(encoder);
5028 }
5029
5030 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5031 {
5032 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5033
5034 if (encoder->hpd_pin == HPD_PORT_A)
5035 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5036 else
5037 return cpt_digital_port_connected(encoder);
5038 }
5039
5040 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5041 {
5042 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5043
5044 if (encoder->hpd_pin == HPD_PORT_A)
5045 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5046 else
5047 return cpt_digital_port_connected(encoder);
5048 }
5049
5050 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5051 {
5052 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5053
5054 if (encoder->hpd_pin == HPD_PORT_A)
5055 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5056 else
5057 return cpt_digital_port_connected(encoder);
5058 }
5059
5060 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5061 {
5062 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5063 u32 bit;
5064
5065 switch (encoder->hpd_pin) {
5066 case HPD_PORT_A:
5067 bit = BXT_DE_PORT_HP_DDIA;
5068 break;
5069 case HPD_PORT_B:
5070 bit = BXT_DE_PORT_HP_DDIB;
5071 break;
5072 case HPD_PORT_C:
5073 bit = BXT_DE_PORT_HP_DDIC;
5074 break;
5075 default:
5076 MISSING_CASE(encoder->hpd_pin);
5077 return false;
5078 }
5079
5080 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5081 }
5082
5083 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5084 struct intel_digital_port *intel_dig_port)
5085 {
5086 enum port port = intel_dig_port->base.port;
5087
5088 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5089 }
5090
5091 static const char *tc_type_name(enum tc_port_type type)
5092 {
5093 static const char * const names[] = {
5094 [TC_PORT_UNKNOWN] = "unknown",
5095 [TC_PORT_LEGACY] = "legacy",
5096 [TC_PORT_TYPEC] = "typec",
5097 [TC_PORT_TBT] = "tbt",
5098 };
5099
5100 if (WARN_ON(type >= ARRAY_SIZE(names)))
5101 type = TC_PORT_UNKNOWN;
5102
5103 return names[type];
5104 }
5105
5106 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5107 struct intel_digital_port *intel_dig_port,
5108 bool is_legacy, bool is_typec, bool is_tbt)
5109 {
5110 enum port port = intel_dig_port->base.port;
5111 enum tc_port_type old_type = intel_dig_port->tc_type;
5112
5113 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5114
5115 if (is_legacy)
5116 intel_dig_port->tc_type = TC_PORT_LEGACY;
5117 else if (is_typec)
5118 intel_dig_port->tc_type = TC_PORT_TYPEC;
5119 else if (is_tbt)
5120 intel_dig_port->tc_type = TC_PORT_TBT;
5121 else
5122 return;
5123
5124 /* Types are not supposed to be changed at runtime. */
5125 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5126 old_type != intel_dig_port->tc_type);
5127
5128 if (old_type != intel_dig_port->tc_type)
5129 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5130 tc_type_name(intel_dig_port->tc_type));
5131 }
5132
5133 /*
5134 * This function implements the first part of the Connect Flow described by our
5135 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5136 * lanes, EDID, etc) is done as needed in the typical places.
5137 *
5138 * Unlike the other ports, type-C ports are not available to use as soon as we
5139 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5140 * display, USB, etc. As a result, handshaking through FIA is required around
5141 * connect and disconnect to cleanly transfer ownership with the controller and
5142 * set the type-C power state.
5143 *
5144 * We could opt to only do the connect flow when we actually try to use the AUX
5145 * channels or do a modeset, then immediately run the disconnect flow after
5146 * usage, but there are some implications on this for a dynamic environment:
5147 * things may go away or change behind our backs. So for now our driver is
5148 * always trying to acquire ownership of the controller as soon as it gets an
5149 * interrupt (or polls state and sees a port is connected) and only gives it
5150 * back when it sees a disconnect. Implementation of a more fine-grained model
5151 * will require a lot of coordination with user space and thorough testing for
5152 * the extra possible cases.
5153 */
5154 static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5155 struct intel_digital_port *dig_port)
5156 {
5157 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5158 u32 val;
5159
5160 if (dig_port->tc_type != TC_PORT_LEGACY &&
5161 dig_port->tc_type != TC_PORT_TYPEC)
5162 return true;
5163
5164 val = I915_READ(PORT_TX_DFLEXDPPMS);
5165 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5166 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5167 WARN_ON(dig_port->tc_legacy_port);
5168 return false;
5169 }
5170
5171 /*
5172 * This function may be called many times in a row without an HPD event
5173 * in between, so try to avoid the write when we can.
5174 */
5175 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5176 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5177 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5178 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5179 }
5180
5181 /*
5182 * Now we have to re-check the live state, in case the port recently
5183 * became disconnected. Not necessary for legacy mode.
5184 */
5185 if (dig_port->tc_type == TC_PORT_TYPEC &&
5186 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5187 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
5188 icl_tc_phy_disconnect(dev_priv, dig_port);
5189 return false;
5190 }
5191
5192 return true;
5193 }
5194
5195 /*
5196 * See the comment at the connect function. This implements the Disconnect
5197 * Flow.
5198 */
5199 void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5200 struct intel_digital_port *dig_port)
5201 {
5202 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5203
5204 if (dig_port->tc_type == TC_PORT_UNKNOWN)
5205 return;
5206
5207 /*
5208 * TBT disconnection flow is read the live status, what was done in
5209 * caller.
5210 */
5211 if (dig_port->tc_type == TC_PORT_TYPEC ||
5212 dig_port->tc_type == TC_PORT_LEGACY) {
5213 u32 val;
5214
5215 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5216 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5217 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5218 }
5219
5220 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5221 port_name(dig_port->base.port),
5222 tc_type_name(dig_port->tc_type));
5223
5224 dig_port->tc_type = TC_PORT_UNKNOWN;
5225 }
5226
5227 /*
5228 * The type-C ports are different because even when they are connected, they may
5229 * not be available/usable by the graphics driver: see the comment on
5230 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5231 * concept of "usable" and make everything check for "connected and usable" we
5232 * define a port as "connected" when it is not only connected, but also when it
5233 * is usable by the rest of the driver. That maintains the old assumption that
5234 * connected ports are usable, and avoids exposing to the users objects they
5235 * can't really use.
5236 */
5237 static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5238 struct intel_digital_port *intel_dig_port)
5239 {
5240 enum port port = intel_dig_port->base.port;
5241 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5242 bool is_legacy, is_typec, is_tbt;
5243 u32 dpsp;
5244
5245 /*
5246 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5247 * legacy. Treat the port as legacy from now on.
5248 */
5249 if (WARN_ON(!intel_dig_port->tc_legacy_port &&
5250 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
5251 intel_dig_port->tc_legacy_port = true;
5252 is_legacy = intel_dig_port->tc_legacy_port;
5253
5254 /*
5255 * The spec says we shouldn't be using the ISR bits for detecting
5256 * between TC and TBT. We should use DFLEXDPSP.
5257 */
5258 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5259 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5260 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5261
5262 if (!is_legacy && !is_typec && !is_tbt) {
5263 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5264
5265 return false;
5266 }
5267
5268 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5269 is_tbt);
5270
5271 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5272 return false;
5273
5274 return true;
5275 }
5276
5277 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5278 {
5279 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5280 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5281
5282 if (intel_port_is_combophy(dev_priv, encoder->port))
5283 return icl_combo_port_connected(dev_priv, dig_port);
5284 else if (intel_port_is_tc(dev_priv, encoder->port))
5285 return icl_tc_port_connected(dev_priv, dig_port);
5286 else
5287 MISSING_CASE(encoder->hpd_pin);
5288
5289 return false;
5290 }
5291
5292 /*
5293 * intel_digital_port_connected - is the specified port connected?
5294 * @encoder: intel_encoder
5295 *
5296 * In cases where there's a connector physically connected but it can't be used
5297 * by our hardware we also return false, since the rest of the driver should
5298 * pretty much treat the port as disconnected. This is relevant for type-C
5299 * (starting on ICL) where there's ownership involved.
5300 *
5301 * Return %true if port is connected, %false otherwise.
5302 */
5303 bool intel_digital_port_connected(struct intel_encoder *encoder)
5304 {
5305 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5306
5307 if (HAS_GMCH(dev_priv)) {
5308 if (IS_GM45(dev_priv))
5309 return gm45_digital_port_connected(encoder);
5310 else
5311 return g4x_digital_port_connected(encoder);
5312 }
5313
5314 if (INTEL_GEN(dev_priv) >= 11)
5315 return icl_digital_port_connected(encoder);
5316 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5317 return spt_digital_port_connected(encoder);
5318 else if (IS_GEN9_LP(dev_priv))
5319 return bxt_digital_port_connected(encoder);
5320 else if (IS_GEN(dev_priv, 8))
5321 return bdw_digital_port_connected(encoder);
5322 else if (IS_GEN(dev_priv, 7))
5323 return ivb_digital_port_connected(encoder);
5324 else if (IS_GEN(dev_priv, 6))
5325 return snb_digital_port_connected(encoder);
5326 else if (IS_GEN(dev_priv, 5))
5327 return ilk_digital_port_connected(encoder);
5328
5329 MISSING_CASE(INTEL_GEN(dev_priv));
5330 return false;
5331 }
5332
5333 static struct edid *
5334 intel_dp_get_edid(struct intel_dp *intel_dp)
5335 {
5336 struct intel_connector *intel_connector = intel_dp->attached_connector;
5337
5338 /* use cached edid if we have one */
5339 if (intel_connector->edid) {
5340 /* invalid edid */
5341 if (IS_ERR(intel_connector->edid))
5342 return NULL;
5343
5344 return drm_edid_duplicate(intel_connector->edid);
5345 } else
5346 return drm_get_edid(&intel_connector->base,
5347 &intel_dp->aux.ddc);
5348 }
5349
5350 static void
5351 intel_dp_set_edid(struct intel_dp *intel_dp)
5352 {
5353 struct intel_connector *intel_connector = intel_dp->attached_connector;
5354 struct edid *edid;
5355
5356 intel_dp_unset_edid(intel_dp);
5357 edid = intel_dp_get_edid(intel_dp);
5358 intel_connector->detect_edid = edid;
5359
5360 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5361 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5362 }
5363
5364 static void
5365 intel_dp_unset_edid(struct intel_dp *intel_dp)
5366 {
5367 struct intel_connector *intel_connector = intel_dp->attached_connector;
5368
5369 drm_dp_cec_unset_edid(&intel_dp->aux);
5370 kfree(intel_connector->detect_edid);
5371 intel_connector->detect_edid = NULL;
5372
5373 intel_dp->has_audio = false;
5374 }
5375
5376 static int
5377 intel_dp_detect(struct drm_connector *connector,
5378 struct drm_modeset_acquire_ctx *ctx,
5379 bool force)
5380 {
5381 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5382 struct intel_dp *intel_dp = intel_attached_dp(connector);
5383 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5384 struct intel_encoder *encoder = &dig_port->base;
5385 enum drm_connector_status status;
5386 enum intel_display_power_domain aux_domain =
5387 intel_aux_power_domain(dig_port);
5388 intel_wakeref_t wakeref;
5389
5390 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5391 connector->base.id, connector->name);
5392 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5393
5394 wakeref = intel_display_power_get(dev_priv, aux_domain);
5395
5396 /* Can't disconnect eDP */
5397 if (intel_dp_is_edp(intel_dp))
5398 status = edp_detect(intel_dp);
5399 else if (intel_digital_port_connected(encoder))
5400 status = intel_dp_detect_dpcd(intel_dp);
5401 else
5402 status = connector_status_disconnected;
5403
5404 if (status == connector_status_disconnected) {
5405 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5406 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5407
5408 if (intel_dp->is_mst) {
5409 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5410 intel_dp->is_mst,
5411 intel_dp->mst_mgr.mst_state);
5412 intel_dp->is_mst = false;
5413 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5414 intel_dp->is_mst);
5415 }
5416
5417 goto out;
5418 }
5419
5420 if (intel_dp->reset_link_params) {
5421 /* Initial max link lane count */
5422 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5423
5424 /* Initial max link rate */
5425 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5426
5427 intel_dp->reset_link_params = false;
5428 }
5429
5430 intel_dp_print_rates(intel_dp);
5431
5432 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5433 if (INTEL_GEN(dev_priv) >= 11)
5434 intel_dp_get_dsc_sink_cap(intel_dp);
5435
5436 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5437 drm_dp_is_branch(intel_dp->dpcd));
5438
5439 intel_dp_configure_mst(intel_dp);
5440
5441 if (intel_dp->is_mst) {
5442 /*
5443 * If we are in MST mode then this connector
5444 * won't appear connected or have anything
5445 * with EDID on it
5446 */
5447 status = connector_status_disconnected;
5448 goto out;
5449 }
5450
5451 /*
5452 * Some external monitors do not signal loss of link synchronization
5453 * with an IRQ_HPD, so force a link status check.
5454 */
5455 if (!intel_dp_is_edp(intel_dp)) {
5456 int ret;
5457
5458 ret = intel_dp_retrain_link(encoder, ctx);
5459 if (ret) {
5460 intel_display_power_put(dev_priv, aux_domain, wakeref);
5461 return ret;
5462 }
5463 }
5464
5465 /*
5466 * Clearing NACK and defer counts to get their exact values
5467 * while reading EDID which are required by Compliance tests
5468 * 4.2.2.4 and 4.2.2.5
5469 */
5470 intel_dp->aux.i2c_nack_count = 0;
5471 intel_dp->aux.i2c_defer_count = 0;
5472
5473 intel_dp_set_edid(intel_dp);
5474 if (intel_dp_is_edp(intel_dp) ||
5475 to_intel_connector(connector)->detect_edid)
5476 status = connector_status_connected;
5477
5478 intel_dp_check_service_irq(intel_dp);
5479
5480 out:
5481 if (status != connector_status_connected && !intel_dp->is_mst)
5482 intel_dp_unset_edid(intel_dp);
5483
5484 intel_display_power_put(dev_priv, aux_domain, wakeref);
5485 return status;
5486 }
5487
5488 static void
5489 intel_dp_force(struct drm_connector *connector)
5490 {
5491 struct intel_dp *intel_dp = intel_attached_dp(connector);
5492 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5493 struct intel_encoder *intel_encoder = &dig_port->base;
5494 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5495 enum intel_display_power_domain aux_domain =
5496 intel_aux_power_domain(dig_port);
5497 intel_wakeref_t wakeref;
5498
5499 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5500 connector->base.id, connector->name);
5501 intel_dp_unset_edid(intel_dp);
5502
5503 if (connector->status != connector_status_connected)
5504 return;
5505
5506 wakeref = intel_display_power_get(dev_priv, aux_domain);
5507
5508 intel_dp_set_edid(intel_dp);
5509
5510 intel_display_power_put(dev_priv, aux_domain, wakeref);
5511 }
5512
5513 static int intel_dp_get_modes(struct drm_connector *connector)
5514 {
5515 struct intel_connector *intel_connector = to_intel_connector(connector);
5516 struct edid *edid;
5517
5518 edid = intel_connector->detect_edid;
5519 if (edid) {
5520 int ret = intel_connector_update_modes(connector, edid);
5521 if (ret)
5522 return ret;
5523 }
5524
5525 /* if eDP has no EDID, fall back to fixed mode */
5526 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5527 intel_connector->panel.fixed_mode) {
5528 struct drm_display_mode *mode;
5529
5530 mode = drm_mode_duplicate(connector->dev,
5531 intel_connector->panel.fixed_mode);
5532 if (mode) {
5533 drm_mode_probed_add(connector, mode);
5534 return 1;
5535 }
5536 }
5537
5538 return 0;
5539 }
5540
5541 static int
5542 intel_dp_connector_register(struct drm_connector *connector)
5543 {
5544 struct intel_dp *intel_dp = intel_attached_dp(connector);
5545 struct drm_device *dev = connector->dev;
5546 int ret;
5547
5548 ret = intel_connector_register(connector);
5549 if (ret)
5550 return ret;
5551
5552 i915_debugfs_connector_add(connector);
5553
5554 DRM_DEBUG_KMS("registering %s bus for %s\n",
5555 intel_dp->aux.name, connector->kdev->kobj.name);
5556
5557 intel_dp->aux.dev = connector->kdev;
5558 ret = drm_dp_aux_register(&intel_dp->aux);
5559 if (!ret)
5560 drm_dp_cec_register_connector(&intel_dp->aux,
5561 connector->name, dev->dev);
5562 return ret;
5563 }
5564
5565 static void
5566 intel_dp_connector_unregister(struct drm_connector *connector)
5567 {
5568 struct intel_dp *intel_dp = intel_attached_dp(connector);
5569
5570 drm_dp_cec_unregister_connector(&intel_dp->aux);
5571 drm_dp_aux_unregister(&intel_dp->aux);
5572 intel_connector_unregister(connector);
5573 }
5574
5575 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5576 {
5577 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5578 struct intel_dp *intel_dp = &intel_dig_port->dp;
5579
5580 intel_dp_mst_encoder_cleanup(intel_dig_port);
5581 if (intel_dp_is_edp(intel_dp)) {
5582 intel_wakeref_t wakeref;
5583
5584 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5585 /*
5586 * vdd might still be enabled do to the delayed vdd off.
5587 * Make sure vdd is actually turned off here.
5588 */
5589 with_pps_lock(intel_dp, wakeref)
5590 edp_panel_vdd_off_sync(intel_dp);
5591
5592 if (intel_dp->edp_notifier.notifier_call) {
5593 unregister_reboot_notifier(&intel_dp->edp_notifier);
5594 intel_dp->edp_notifier.notifier_call = NULL;
5595 }
5596 }
5597
5598 intel_dp_aux_fini(intel_dp);
5599 }
5600
5601 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5602 {
5603 intel_dp_encoder_flush_work(encoder);
5604
5605 drm_encoder_cleanup(encoder);
5606 kfree(enc_to_dig_port(encoder));
5607 }
5608
5609 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5610 {
5611 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5612 intel_wakeref_t wakeref;
5613
5614 if (!intel_dp_is_edp(intel_dp))
5615 return;
5616
5617 /*
5618 * vdd might still be enabled do to the delayed vdd off.
5619 * Make sure vdd is actually turned off here.
5620 */
5621 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5622 with_pps_lock(intel_dp, wakeref)
5623 edp_panel_vdd_off_sync(intel_dp);
5624 }
5625
5626 static
5627 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5628 u8 *an)
5629 {
5630 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5631 static const struct drm_dp_aux_msg msg = {
5632 .request = DP_AUX_NATIVE_WRITE,
5633 .address = DP_AUX_HDCP_AKSV,
5634 .size = DRM_HDCP_KSV_LEN,
5635 };
5636 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5637 ssize_t dpcd_ret;
5638 int ret;
5639
5640 /* Output An first, that's easy */
5641 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5642 an, DRM_HDCP_AN_LEN);
5643 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5644 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5645 dpcd_ret);
5646 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5647 }
5648
5649 /*
5650 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5651 * order to get it on the wire, we need to create the AUX header as if
5652 * we were writing the data, and then tickle the hardware to output the
5653 * data once the header is sent out.
5654 */
5655 intel_dp_aux_header(txbuf, &msg);
5656
5657 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5658 rxbuf, sizeof(rxbuf),
5659 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5660 if (ret < 0) {
5661 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5662 return ret;
5663 } else if (ret == 0) {
5664 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5665 return -EIO;
5666 }
5667
5668 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5669 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5670 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5671 reply);
5672 return -EIO;
5673 }
5674 return 0;
5675 }
5676
5677 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5678 u8 *bksv)
5679 {
5680 ssize_t ret;
5681 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5682 DRM_HDCP_KSV_LEN);
5683 if (ret != DRM_HDCP_KSV_LEN) {
5684 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5685 return ret >= 0 ? -EIO : ret;
5686 }
5687 return 0;
5688 }
5689
5690 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5691 u8 *bstatus)
5692 {
5693 ssize_t ret;
5694 /*
5695 * For some reason the HDMI and DP HDCP specs call this register
5696 * definition by different names. In the HDMI spec, it's called BSTATUS,
5697 * but in DP it's called BINFO.
5698 */
5699 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5700 bstatus, DRM_HDCP_BSTATUS_LEN);
5701 if (ret != DRM_HDCP_BSTATUS_LEN) {
5702 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5703 return ret >= 0 ? -EIO : ret;
5704 }
5705 return 0;
5706 }
5707
5708 static
5709 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5710 u8 *bcaps)
5711 {
5712 ssize_t ret;
5713
5714 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5715 bcaps, 1);
5716 if (ret != 1) {
5717 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5718 return ret >= 0 ? -EIO : ret;
5719 }
5720
5721 return 0;
5722 }
5723
5724 static
5725 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5726 bool *repeater_present)
5727 {
5728 ssize_t ret;
5729 u8 bcaps;
5730
5731 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5732 if (ret)
5733 return ret;
5734
5735 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5736 return 0;
5737 }
5738
5739 static
5740 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5741 u8 *ri_prime)
5742 {
5743 ssize_t ret;
5744 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5745 ri_prime, DRM_HDCP_RI_LEN);
5746 if (ret != DRM_HDCP_RI_LEN) {
5747 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5748 return ret >= 0 ? -EIO : ret;
5749 }
5750 return 0;
5751 }
5752
5753 static
5754 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5755 bool *ksv_ready)
5756 {
5757 ssize_t ret;
5758 u8 bstatus;
5759 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5760 &bstatus, 1);
5761 if (ret != 1) {
5762 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5763 return ret >= 0 ? -EIO : ret;
5764 }
5765 *ksv_ready = bstatus & DP_BSTATUS_READY;
5766 return 0;
5767 }
5768
5769 static
5770 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5771 int num_downstream, u8 *ksv_fifo)
5772 {
5773 ssize_t ret;
5774 int i;
5775
5776 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5777 for (i = 0; i < num_downstream; i += 3) {
5778 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5779 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5780 DP_AUX_HDCP_KSV_FIFO,
5781 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5782 len);
5783 if (ret != len) {
5784 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5785 i, ret);
5786 return ret >= 0 ? -EIO : ret;
5787 }
5788 }
5789 return 0;
5790 }
5791
5792 static
5793 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5794 int i, u32 *part)
5795 {
5796 ssize_t ret;
5797
5798 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5799 return -EINVAL;
5800
5801 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5802 DP_AUX_HDCP_V_PRIME(i), part,
5803 DRM_HDCP_V_PRIME_PART_LEN);
5804 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5805 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5806 return ret >= 0 ? -EIO : ret;
5807 }
5808 return 0;
5809 }
5810
5811 static
5812 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5813 bool enable)
5814 {
5815 /* Not used for single stream DisplayPort setups */
5816 return 0;
5817 }
5818
5819 static
5820 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5821 {
5822 ssize_t ret;
5823 u8 bstatus;
5824
5825 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5826 &bstatus, 1);
5827 if (ret != 1) {
5828 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5829 return false;
5830 }
5831
5832 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5833 }
5834
5835 static
5836 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5837 bool *hdcp_capable)
5838 {
5839 ssize_t ret;
5840 u8 bcaps;
5841
5842 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5843 if (ret)
5844 return ret;
5845
5846 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5847 return 0;
5848 }
5849
5850 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
5851 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
5852 .read_bksv = intel_dp_hdcp_read_bksv,
5853 .read_bstatus = intel_dp_hdcp_read_bstatus,
5854 .repeater_present = intel_dp_hdcp_repeater_present,
5855 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
5856 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
5857 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
5858 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
5859 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
5860 .check_link = intel_dp_hdcp_check_link,
5861 .hdcp_capable = intel_dp_hdcp_capable,
5862 };
5863
5864 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5865 {
5866 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5867 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5868
5869 lockdep_assert_held(&dev_priv->pps_mutex);
5870
5871 if (!edp_have_panel_vdd(intel_dp))
5872 return;
5873
5874 /*
5875 * The VDD bit needs a power domain reference, so if the bit is
5876 * already enabled when we boot or resume, grab this reference and
5877 * schedule a vdd off, so we don't hold on to the reference
5878 * indefinitely.
5879 */
5880 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5881 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
5882
5883 edp_panel_vdd_schedule_off(intel_dp);
5884 }
5885
5886 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
5887 {
5888 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5889 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
5890 enum pipe pipe;
5891
5892 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
5893 encoder->port, &pipe))
5894 return pipe;
5895
5896 return INVALID_PIPE;
5897 }
5898
5899 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5900 {
5901 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5902 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5903 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5904 intel_wakeref_t wakeref;
5905
5906 if (!HAS_DDI(dev_priv))
5907 intel_dp->DP = I915_READ(intel_dp->output_reg);
5908
5909 if (lspcon->active)
5910 lspcon_resume(lspcon);
5911
5912 intel_dp->reset_link_params = true;
5913
5914 with_pps_lock(intel_dp, wakeref) {
5915 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5916 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5917
5918 if (intel_dp_is_edp(intel_dp)) {
5919 /*
5920 * Reinit the power sequencer, in case BIOS did
5921 * something nasty with it.
5922 */
5923 intel_dp_pps_init(intel_dp);
5924 intel_edp_panel_vdd_sanitize(intel_dp);
5925 }
5926 }
5927 }
5928
5929 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5930 .force = intel_dp_force,
5931 .fill_modes = drm_helper_probe_single_connector_modes,
5932 .atomic_get_property = intel_digital_connector_atomic_get_property,
5933 .atomic_set_property = intel_digital_connector_atomic_set_property,
5934 .late_register = intel_dp_connector_register,
5935 .early_unregister = intel_dp_connector_unregister,
5936 .destroy = intel_connector_destroy,
5937 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5938 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5939 };
5940
5941 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5942 .detect_ctx = intel_dp_detect,
5943 .get_modes = intel_dp_get_modes,
5944 .mode_valid = intel_dp_mode_valid,
5945 .atomic_check = intel_digital_connector_atomic_check,
5946 };
5947
5948 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5949 .reset = intel_dp_encoder_reset,
5950 .destroy = intel_dp_encoder_destroy,
5951 };
5952
5953 enum irqreturn
5954 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5955 {
5956 struct intel_dp *intel_dp = &intel_dig_port->dp;
5957 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5958 enum irqreturn ret = IRQ_NONE;
5959 intel_wakeref_t wakeref;
5960
5961 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5962 /*
5963 * vdd off can generate a long pulse on eDP which
5964 * would require vdd on to handle it, and thus we
5965 * would end up in an endless cycle of
5966 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5967 */
5968 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5969 port_name(intel_dig_port->base.port));
5970 return IRQ_HANDLED;
5971 }
5972
5973 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5974 port_name(intel_dig_port->base.port),
5975 long_hpd ? "long" : "short");
5976
5977 if (long_hpd) {
5978 intel_dp->reset_link_params = true;
5979 return IRQ_NONE;
5980 }
5981
5982 wakeref = intel_display_power_get(dev_priv,
5983 intel_aux_power_domain(intel_dig_port));
5984
5985 if (intel_dp->is_mst) {
5986 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5987 /*
5988 * If we were in MST mode, and device is not
5989 * there, get out of MST mode
5990 */
5991 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5992 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5993 intel_dp->is_mst = false;
5994 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5995 intel_dp->is_mst);
5996 goto put_power;
5997 }
5998 }
5999
6000 if (!intel_dp->is_mst) {
6001 bool handled;
6002
6003 handled = intel_dp_short_pulse(intel_dp);
6004
6005 if (!handled)
6006 goto put_power;
6007 }
6008
6009 ret = IRQ_HANDLED;
6010
6011 put_power:
6012 intel_display_power_put(dev_priv,
6013 intel_aux_power_domain(intel_dig_port),
6014 wakeref);
6015
6016 return ret;
6017 }
6018
6019 /* check the VBT to see whether the eDP is on another port */
6020 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6021 {
6022 /*
6023 * eDP not supported on g4x. so bail out early just
6024 * for a bit extra safety in case the VBT is bonkers.
6025 */
6026 if (INTEL_GEN(dev_priv) < 5)
6027 return false;
6028
6029 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6030 return true;
6031
6032 return intel_bios_is_port_edp(dev_priv, port);
6033 }
6034
6035 static void
6036 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6037 {
6038 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6039 enum port port = dp_to_dig_port(intel_dp)->base.port;
6040
6041 if (!IS_G4X(dev_priv) && port != PORT_A)
6042 intel_attach_force_audio_property(connector);
6043
6044 intel_attach_broadcast_rgb_property(connector);
6045 if (HAS_GMCH(dev_priv))
6046 drm_connector_attach_max_bpc_property(connector, 6, 10);
6047 else if (INTEL_GEN(dev_priv) >= 5)
6048 drm_connector_attach_max_bpc_property(connector, 6, 12);
6049
6050 if (intel_dp_is_edp(intel_dp)) {
6051 u32 allowed_scalers;
6052
6053 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6054 if (!HAS_GMCH(dev_priv))
6055 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6056
6057 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6058
6059 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6060
6061 }
6062 }
6063
6064 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6065 {
6066 intel_dp->panel_power_off_time = ktime_get_boottime();
6067 intel_dp->last_power_on = jiffies;
6068 intel_dp->last_backlight_off = jiffies;
6069 }
6070
6071 static void
6072 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6073 {
6074 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6075 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
6076 struct pps_registers regs;
6077
6078 intel_pps_get_registers(intel_dp, &regs);
6079
6080 /* Workaround: Need to write PP_CONTROL with the unlock key as
6081 * the very first thing. */
6082 pp_ctl = ironlake_get_pp_control(intel_dp);
6083
6084 pp_on = I915_READ(regs.pp_on);
6085 pp_off = I915_READ(regs.pp_off);
6086 if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
6087 !HAS_PCH_ICP(dev_priv)) {
6088 I915_WRITE(regs.pp_ctrl, pp_ctl);
6089 pp_div = I915_READ(regs.pp_div);
6090 }
6091
6092 /* Pull timing values out of registers */
6093 seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
6094 PANEL_POWER_UP_DELAY_SHIFT;
6095
6096 seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
6097 PANEL_LIGHT_ON_DELAY_SHIFT;
6098
6099 seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
6100 PANEL_LIGHT_OFF_DELAY_SHIFT;
6101
6102 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
6103 PANEL_POWER_DOWN_DELAY_SHIFT;
6104
6105 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
6106 HAS_PCH_ICP(dev_priv)) {
6107 seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
6108 BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
6109 } else {
6110 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
6111 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
6112 }
6113 }
6114
6115 static void
6116 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6117 {
6118 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6119 state_name,
6120 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6121 }
6122
6123 static void
6124 intel_pps_verify_state(struct intel_dp *intel_dp)
6125 {
6126 struct edp_power_seq hw;
6127 struct edp_power_seq *sw = &intel_dp->pps_delays;
6128
6129 intel_pps_readout_hw_state(intel_dp, &hw);
6130
6131 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6132 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6133 DRM_ERROR("PPS state mismatch\n");
6134 intel_pps_dump_state("sw", sw);
6135 intel_pps_dump_state("hw", &hw);
6136 }
6137 }
6138
6139 static void
6140 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6141 {
6142 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6143 struct edp_power_seq cur, vbt, spec,
6144 *final = &intel_dp->pps_delays;
6145
6146 lockdep_assert_held(&dev_priv->pps_mutex);
6147
6148 /* already initialized? */
6149 if (final->t11_t12 != 0)
6150 return;
6151
6152 intel_pps_readout_hw_state(intel_dp, &cur);
6153
6154 intel_pps_dump_state("cur", &cur);
6155
6156 vbt = dev_priv->vbt.edp.pps;
6157 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6158 * of 500ms appears to be too short. Ocassionally the panel
6159 * just fails to power back on. Increasing the delay to 800ms
6160 * seems sufficient to avoid this problem.
6161 */
6162 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6163 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6164 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6165 vbt.t11_t12);
6166 }
6167 /* T11_T12 delay is special and actually in units of 100ms, but zero
6168 * based in the hw (so we need to add 100 ms). But the sw vbt
6169 * table multiplies it with 1000 to make it in units of 100usec,
6170 * too. */
6171 vbt.t11_t12 += 100 * 10;
6172
6173 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6174 * our hw here, which are all in 100usec. */
6175 spec.t1_t3 = 210 * 10;
6176 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6177 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6178 spec.t10 = 500 * 10;
6179 /* This one is special and actually in units of 100ms, but zero
6180 * based in the hw (so we need to add 100 ms). But the sw vbt
6181 * table multiplies it with 1000 to make it in units of 100usec,
6182 * too. */
6183 spec.t11_t12 = (510 + 100) * 10;
6184
6185 intel_pps_dump_state("vbt", &vbt);
6186
6187 /* Use the max of the register settings and vbt. If both are
6188 * unset, fall back to the spec limits. */
6189 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6190 spec.field : \
6191 max(cur.field, vbt.field))
6192 assign_final(t1_t3);
6193 assign_final(t8);
6194 assign_final(t9);
6195 assign_final(t10);
6196 assign_final(t11_t12);
6197 #undef assign_final
6198
6199 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6200 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6201 intel_dp->backlight_on_delay = get_delay(t8);
6202 intel_dp->backlight_off_delay = get_delay(t9);
6203 intel_dp->panel_power_down_delay = get_delay(t10);
6204 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6205 #undef get_delay
6206
6207 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6208 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6209 intel_dp->panel_power_cycle_delay);
6210
6211 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6212 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6213
6214 /*
6215 * We override the HW backlight delays to 1 because we do manual waits
6216 * on them. For T8, even BSpec recommends doing it. For T9, if we
6217 * don't do this, we'll end up waiting for the backlight off delay
6218 * twice: once when we do the manual sleep, and once when we disable
6219 * the panel and wait for the PP_STATUS bit to become zero.
6220 */
6221 final->t8 = 1;
6222 final->t9 = 1;
6223
6224 /*
6225 * HW has only a 100msec granularity for t11_t12 so round it up
6226 * accordingly.
6227 */
6228 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6229 }
6230
6231 static void
6232 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6233 bool force_disable_vdd)
6234 {
6235 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6236 u32 pp_on, pp_off, pp_div, port_sel = 0;
6237 int div = dev_priv->rawclk_freq / 1000;
6238 struct pps_registers regs;
6239 enum port port = dp_to_dig_port(intel_dp)->base.port;
6240 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6241
6242 lockdep_assert_held(&dev_priv->pps_mutex);
6243
6244 intel_pps_get_registers(intel_dp, &regs);
6245
6246 /*
6247 * On some VLV machines the BIOS can leave the VDD
6248 * enabled even on power sequencers which aren't
6249 * hooked up to any port. This would mess up the
6250 * power domain tracking the first time we pick
6251 * one of these power sequencers for use since
6252 * edp_panel_vdd_on() would notice that the VDD was
6253 * already on and therefore wouldn't grab the power
6254 * domain reference. Disable VDD first to avoid this.
6255 * This also avoids spuriously turning the VDD on as
6256 * soon as the new power sequencer gets initialized.
6257 */
6258 if (force_disable_vdd) {
6259 u32 pp = ironlake_get_pp_control(intel_dp);
6260
6261 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6262
6263 if (pp & EDP_FORCE_VDD)
6264 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6265
6266 pp &= ~EDP_FORCE_VDD;
6267
6268 I915_WRITE(regs.pp_ctrl, pp);
6269 }
6270
6271 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
6272 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
6273 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
6274 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
6275 /* Compute the divisor for the pp clock, simply match the Bspec
6276 * formula. */
6277 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
6278 HAS_PCH_ICP(dev_priv)) {
6279 pp_div = I915_READ(regs.pp_ctrl);
6280 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
6281 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
6282 << BXT_POWER_CYCLE_DELAY_SHIFT);
6283 } else {
6284 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
6285 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
6286 << PANEL_POWER_CYCLE_DELAY_SHIFT);
6287 }
6288
6289 /* Haswell doesn't have any port selection bits for the panel
6290 * power sequencer any more. */
6291 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6292 port_sel = PANEL_PORT_SELECT_VLV(port);
6293 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6294 switch (port) {
6295 case PORT_A:
6296 port_sel = PANEL_PORT_SELECT_DPA;
6297 break;
6298 case PORT_C:
6299 port_sel = PANEL_PORT_SELECT_DPC;
6300 break;
6301 case PORT_D:
6302 port_sel = PANEL_PORT_SELECT_DPD;
6303 break;
6304 default:
6305 MISSING_CASE(port);
6306 break;
6307 }
6308 }
6309
6310 pp_on |= port_sel;
6311
6312 I915_WRITE(regs.pp_on, pp_on);
6313 I915_WRITE(regs.pp_off, pp_off);
6314 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
6315 HAS_PCH_ICP(dev_priv))
6316 I915_WRITE(regs.pp_ctrl, pp_div);
6317 else
6318 I915_WRITE(regs.pp_div, pp_div);
6319
6320 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6321 I915_READ(regs.pp_on),
6322 I915_READ(regs.pp_off),
6323 (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
6324 HAS_PCH_ICP(dev_priv)) ?
6325 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
6326 I915_READ(regs.pp_div));
6327 }
6328
6329 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6330 {
6331 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6332
6333 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6334 vlv_initial_power_sequencer_setup(intel_dp);
6335 } else {
6336 intel_dp_init_panel_power_sequencer(intel_dp);
6337 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6338 }
6339 }
6340
6341 /**
6342 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6343 * @dev_priv: i915 device
6344 * @crtc_state: a pointer to the active intel_crtc_state
6345 * @refresh_rate: RR to be programmed
6346 *
6347 * This function gets called when refresh rate (RR) has to be changed from
6348 * one frequency to another. Switches can be between high and low RR
6349 * supported by the panel or to any other RR based on media playback (in
6350 * this case, RR value needs to be passed from user space).
6351 *
6352 * The caller of this function needs to take a lock on dev_priv->drrs.
6353 */
6354 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6355 const struct intel_crtc_state *crtc_state,
6356 int refresh_rate)
6357 {
6358 struct intel_encoder *encoder;
6359 struct intel_digital_port *dig_port = NULL;
6360 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6361 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6362 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6363
6364 if (refresh_rate <= 0) {
6365 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6366 return;
6367 }
6368
6369 if (intel_dp == NULL) {
6370 DRM_DEBUG_KMS("DRRS not supported.\n");
6371 return;
6372 }
6373
6374 dig_port = dp_to_dig_port(intel_dp);
6375 encoder = &dig_port->base;
6376
6377 if (!intel_crtc) {
6378 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6379 return;
6380 }
6381
6382 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6383 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6384 return;
6385 }
6386
6387 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6388 refresh_rate)
6389 index = DRRS_LOW_RR;
6390
6391 if (index == dev_priv->drrs.refresh_rate_type) {
6392 DRM_DEBUG_KMS(
6393 "DRRS requested for previously set RR...ignoring\n");
6394 return;
6395 }
6396
6397 if (!crtc_state->base.active) {
6398 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6399 return;
6400 }
6401
6402 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6403 switch (index) {
6404 case DRRS_HIGH_RR:
6405 intel_dp_set_m_n(crtc_state, M1_N1);
6406 break;
6407 case DRRS_LOW_RR:
6408 intel_dp_set_m_n(crtc_state, M2_N2);
6409 break;
6410 case DRRS_MAX_RR:
6411 default:
6412 DRM_ERROR("Unsupported refreshrate type\n");
6413 }
6414 } else if (INTEL_GEN(dev_priv) > 6) {
6415 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6416 u32 val;
6417
6418 val = I915_READ(reg);
6419 if (index > DRRS_HIGH_RR) {
6420 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6421 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6422 else
6423 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6424 } else {
6425 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6426 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6427 else
6428 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6429 }
6430 I915_WRITE(reg, val);
6431 }
6432
6433 dev_priv->drrs.refresh_rate_type = index;
6434
6435 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6436 }
6437
6438 /**
6439 * intel_edp_drrs_enable - init drrs struct if supported
6440 * @intel_dp: DP struct
6441 * @crtc_state: A pointer to the active crtc state.
6442 *
6443 * Initializes frontbuffer_bits and drrs.dp
6444 */
6445 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6446 const struct intel_crtc_state *crtc_state)
6447 {
6448 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6449
6450 if (!crtc_state->has_drrs) {
6451 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6452 return;
6453 }
6454
6455 if (dev_priv->psr.enabled) {
6456 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6457 return;
6458 }
6459
6460 mutex_lock(&dev_priv->drrs.mutex);
6461 if (dev_priv->drrs.dp) {
6462 DRM_DEBUG_KMS("DRRS already enabled\n");
6463 goto unlock;
6464 }
6465
6466 dev_priv->drrs.busy_frontbuffer_bits = 0;
6467
6468 dev_priv->drrs.dp = intel_dp;
6469
6470 unlock:
6471 mutex_unlock(&dev_priv->drrs.mutex);
6472 }
6473
6474 /**
6475 * intel_edp_drrs_disable - Disable DRRS
6476 * @intel_dp: DP struct
6477 * @old_crtc_state: Pointer to old crtc_state.
6478 *
6479 */
6480 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6481 const struct intel_crtc_state *old_crtc_state)
6482 {
6483 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6484
6485 if (!old_crtc_state->has_drrs)
6486 return;
6487
6488 mutex_lock(&dev_priv->drrs.mutex);
6489 if (!dev_priv->drrs.dp) {
6490 mutex_unlock(&dev_priv->drrs.mutex);
6491 return;
6492 }
6493
6494 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6495 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6496 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6497
6498 dev_priv->drrs.dp = NULL;
6499 mutex_unlock(&dev_priv->drrs.mutex);
6500
6501 cancel_delayed_work_sync(&dev_priv->drrs.work);
6502 }
6503
6504 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6505 {
6506 struct drm_i915_private *dev_priv =
6507 container_of(work, typeof(*dev_priv), drrs.work.work);
6508 struct intel_dp *intel_dp;
6509
6510 mutex_lock(&dev_priv->drrs.mutex);
6511
6512 intel_dp = dev_priv->drrs.dp;
6513
6514 if (!intel_dp)
6515 goto unlock;
6516
6517 /*
6518 * The delayed work can race with an invalidate hence we need to
6519 * recheck.
6520 */
6521
6522 if (dev_priv->drrs.busy_frontbuffer_bits)
6523 goto unlock;
6524
6525 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6526 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6527
6528 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6529 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6530 }
6531
6532 unlock:
6533 mutex_unlock(&dev_priv->drrs.mutex);
6534 }
6535
6536 /**
6537 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6538 * @dev_priv: i915 device
6539 * @frontbuffer_bits: frontbuffer plane tracking bits
6540 *
6541 * This function gets called everytime rendering on the given planes start.
6542 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6543 *
6544 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6545 */
6546 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6547 unsigned int frontbuffer_bits)
6548 {
6549 struct drm_crtc *crtc;
6550 enum pipe pipe;
6551
6552 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6553 return;
6554
6555 cancel_delayed_work(&dev_priv->drrs.work);
6556
6557 mutex_lock(&dev_priv->drrs.mutex);
6558 if (!dev_priv->drrs.dp) {
6559 mutex_unlock(&dev_priv->drrs.mutex);
6560 return;
6561 }
6562
6563 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6564 pipe = to_intel_crtc(crtc)->pipe;
6565
6566 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6567 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6568
6569 /* invalidate means busy screen hence upclock */
6570 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6571 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6572 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6573
6574 mutex_unlock(&dev_priv->drrs.mutex);
6575 }
6576
6577 /**
6578 * intel_edp_drrs_flush - Restart Idleness DRRS
6579 * @dev_priv: i915 device
6580 * @frontbuffer_bits: frontbuffer plane tracking bits
6581 *
6582 * This function gets called every time rendering on the given planes has
6583 * completed or flip on a crtc is completed. So DRRS should be upclocked
6584 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6585 * if no other planes are dirty.
6586 *
6587 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6588 */
6589 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6590 unsigned int frontbuffer_bits)
6591 {
6592 struct drm_crtc *crtc;
6593 enum pipe pipe;
6594
6595 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6596 return;
6597
6598 cancel_delayed_work(&dev_priv->drrs.work);
6599
6600 mutex_lock(&dev_priv->drrs.mutex);
6601 if (!dev_priv->drrs.dp) {
6602 mutex_unlock(&dev_priv->drrs.mutex);
6603 return;
6604 }
6605
6606 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6607 pipe = to_intel_crtc(crtc)->pipe;
6608
6609 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6610 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6611
6612 /* flush means busy screen hence upclock */
6613 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6614 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6615 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6616
6617 /*
6618 * flush also means no more activity hence schedule downclock, if all
6619 * other fbs are quiescent too
6620 */
6621 if (!dev_priv->drrs.busy_frontbuffer_bits)
6622 schedule_delayed_work(&dev_priv->drrs.work,
6623 msecs_to_jiffies(1000));
6624 mutex_unlock(&dev_priv->drrs.mutex);
6625 }
6626
6627 /**
6628 * DOC: Display Refresh Rate Switching (DRRS)
6629 *
6630 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6631 * which enables swtching between low and high refresh rates,
6632 * dynamically, based on the usage scenario. This feature is applicable
6633 * for internal panels.
6634 *
6635 * Indication that the panel supports DRRS is given by the panel EDID, which
6636 * would list multiple refresh rates for one resolution.
6637 *
6638 * DRRS is of 2 types - static and seamless.
6639 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6640 * (may appear as a blink on screen) and is used in dock-undock scenario.
6641 * Seamless DRRS involves changing RR without any visual effect to the user
6642 * and can be used during normal system usage. This is done by programming
6643 * certain registers.
6644 *
6645 * Support for static/seamless DRRS may be indicated in the VBT based on
6646 * inputs from the panel spec.
6647 *
6648 * DRRS saves power by switching to low RR based on usage scenarios.
6649 *
6650 * The implementation is based on frontbuffer tracking implementation. When
6651 * there is a disturbance on the screen triggered by user activity or a periodic
6652 * system activity, DRRS is disabled (RR is changed to high RR). When there is
6653 * no movement on screen, after a timeout of 1 second, a switch to low RR is
6654 * made.
6655 *
6656 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6657 * and intel_edp_drrs_flush() are called.
6658 *
6659 * DRRS can be further extended to support other internal panels and also
6660 * the scenario of video playback wherein RR is set based on the rate
6661 * requested by userspace.
6662 */
6663
6664 /**
6665 * intel_dp_drrs_init - Init basic DRRS work and mutex.
6666 * @connector: eDP connector
6667 * @fixed_mode: preferred mode of panel
6668 *
6669 * This function is called only once at driver load to initialize basic
6670 * DRRS stuff.
6671 *
6672 * Returns:
6673 * Downclock mode if panel supports it, else return NULL.
6674 * DRRS support is determined by the presence of downclock mode (apart
6675 * from VBT setting).
6676 */
6677 static struct drm_display_mode *
6678 intel_dp_drrs_init(struct intel_connector *connector,
6679 struct drm_display_mode *fixed_mode)
6680 {
6681 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6682 struct drm_display_mode *downclock_mode = NULL;
6683
6684 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6685 mutex_init(&dev_priv->drrs.mutex);
6686
6687 if (INTEL_GEN(dev_priv) <= 6) {
6688 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6689 return NULL;
6690 }
6691
6692 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6693 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6694 return NULL;
6695 }
6696
6697 downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
6698 &connector->base);
6699
6700 if (!downclock_mode) {
6701 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6702 return NULL;
6703 }
6704
6705 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6706
6707 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6708 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6709 return downclock_mode;
6710 }
6711
6712 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6713 struct intel_connector *intel_connector)
6714 {
6715 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6716 struct drm_device *dev = &dev_priv->drm;
6717 struct drm_connector *connector = &intel_connector->base;
6718 struct drm_display_mode *fixed_mode = NULL;
6719 struct drm_display_mode *downclock_mode = NULL;
6720 bool has_dpcd;
6721 struct drm_display_mode *scan;
6722 enum pipe pipe = INVALID_PIPE;
6723 intel_wakeref_t wakeref;
6724 struct edid *edid;
6725
6726 if (!intel_dp_is_edp(intel_dp))
6727 return true;
6728
6729 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
6730
6731 /*
6732 * On IBX/CPT we may get here with LVDS already registered. Since the
6733 * driver uses the only internal power sequencer available for both
6734 * eDP and LVDS bail out early in this case to prevent interfering
6735 * with an already powered-on LVDS power sequencer.
6736 */
6737 if (intel_get_lvds_encoder(&dev_priv->drm)) {
6738 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6739 DRM_INFO("LVDS was detected, not registering eDP\n");
6740
6741 return false;
6742 }
6743
6744 with_pps_lock(intel_dp, wakeref) {
6745 intel_dp_init_panel_power_timestamps(intel_dp);
6746 intel_dp_pps_init(intel_dp);
6747 intel_edp_panel_vdd_sanitize(intel_dp);
6748 }
6749
6750 /* Cache DPCD and EDID for edp. */
6751 has_dpcd = intel_edp_init_dpcd(intel_dp);
6752
6753 if (!has_dpcd) {
6754 /* if this fails, presume the device is a ghost */
6755 DRM_INFO("failed to retrieve link info, disabling eDP\n");
6756 goto out_vdd_off;
6757 }
6758
6759 mutex_lock(&dev->mode_config.mutex);
6760 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
6761 if (edid) {
6762 if (drm_add_edid_modes(connector, edid)) {
6763 drm_connector_update_edid_property(connector,
6764 edid);
6765 } else {
6766 kfree(edid);
6767 edid = ERR_PTR(-EINVAL);
6768 }
6769 } else {
6770 edid = ERR_PTR(-ENOENT);
6771 }
6772 intel_connector->edid = edid;
6773
6774 /* prefer fixed mode from EDID if available */
6775 list_for_each_entry(scan, &connector->probed_modes, head) {
6776 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
6777 fixed_mode = drm_mode_duplicate(dev, scan);
6778 downclock_mode = intel_dp_drrs_init(
6779 intel_connector, fixed_mode);
6780 break;
6781 }
6782 }
6783
6784 /* fallback to VBT if available for eDP */
6785 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
6786 fixed_mode = drm_mode_duplicate(dev,
6787 dev_priv->vbt.lfp_lvds_vbt_mode);
6788 if (fixed_mode) {
6789 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6790 connector->display_info.width_mm = fixed_mode->width_mm;
6791 connector->display_info.height_mm = fixed_mode->height_mm;
6792 }
6793 }
6794 mutex_unlock(&dev->mode_config.mutex);
6795
6796 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6797 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
6798 register_reboot_notifier(&intel_dp->edp_notifier);
6799
6800 /*
6801 * Figure out the current pipe for the initial backlight setup.
6802 * If the current pipe isn't valid, try the PPS pipe, and if that
6803 * fails just assume pipe A.
6804 */
6805 pipe = vlv_active_pipe(intel_dp);
6806
6807 if (pipe != PIPE_A && pipe != PIPE_B)
6808 pipe = intel_dp->pps_pipe;
6809
6810 if (pipe != PIPE_A && pipe != PIPE_B)
6811 pipe = PIPE_A;
6812
6813 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6814 pipe_name(pipe));
6815 }
6816
6817 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6818 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6819 intel_panel_setup_backlight(connector, pipe);
6820
6821 if (fixed_mode)
6822 drm_connector_init_panel_orientation_property(
6823 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
6824
6825 return true;
6826
6827 out_vdd_off:
6828 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6829 /*
6830 * vdd might still be enabled do to the delayed vdd off.
6831 * Make sure vdd is actually turned off here.
6832 */
6833 with_pps_lock(intel_dp, wakeref)
6834 edp_panel_vdd_off_sync(intel_dp);
6835
6836 return false;
6837 }
6838
6839 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6840 {
6841 struct intel_connector *intel_connector;
6842 struct drm_connector *connector;
6843
6844 intel_connector = container_of(work, typeof(*intel_connector),
6845 modeset_retry_work);
6846 connector = &intel_connector->base;
6847 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
6848 connector->name);
6849
6850 /* Grab the locks before changing connector property*/
6851 mutex_lock(&connector->dev->mode_config.mutex);
6852 /* Set connector link status to BAD and send a Uevent to notify
6853 * userspace to do a modeset.
6854 */
6855 drm_connector_set_link_status_property(connector,
6856 DRM_MODE_LINK_STATUS_BAD);
6857 mutex_unlock(&connector->dev->mode_config.mutex);
6858 /* Send Hotplug uevent so userspace can reprobe */
6859 drm_kms_helper_hotplug_event(connector->dev);
6860 }
6861
6862 bool
6863 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6864 struct intel_connector *intel_connector)
6865 {
6866 struct drm_connector *connector = &intel_connector->base;
6867 struct intel_dp *intel_dp = &intel_dig_port->dp;
6868 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6869 struct drm_device *dev = intel_encoder->base.dev;
6870 struct drm_i915_private *dev_priv = to_i915(dev);
6871 enum port port = intel_encoder->port;
6872 int type;
6873
6874 /* Initialize the work for modeset in case of link train failure */
6875 INIT_WORK(&intel_connector->modeset_retry_work,
6876 intel_dp_modeset_retry_work_fn);
6877
6878 if (WARN(intel_dig_port->max_lanes < 1,
6879 "Not enough lanes (%d) for DP on port %c\n",
6880 intel_dig_port->max_lanes, port_name(port)))
6881 return false;
6882
6883 intel_dp_set_source_rates(intel_dp);
6884
6885 intel_dp->reset_link_params = true;
6886 intel_dp->pps_pipe = INVALID_PIPE;
6887 intel_dp->active_pipe = INVALID_PIPE;
6888
6889 /* intel_dp vfuncs */
6890 if (HAS_DDI(dev_priv))
6891 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
6892
6893 /* Preserve the current hw state. */
6894 intel_dp->DP = I915_READ(intel_dp->output_reg);
6895 intel_dp->attached_connector = intel_connector;
6896
6897 if (intel_dp_is_port_edp(dev_priv, port))
6898 type = DRM_MODE_CONNECTOR_eDP;
6899 else
6900 type = DRM_MODE_CONNECTOR_DisplayPort;
6901
6902 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6903 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6904
6905 /*
6906 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6907 * for DP the encoder type can be set by the caller to
6908 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6909 */
6910 if (type == DRM_MODE_CONNECTOR_eDP)
6911 intel_encoder->type = INTEL_OUTPUT_EDP;
6912
6913 /* eDP only on port B and/or C on vlv/chv */
6914 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6915 intel_dp_is_edp(intel_dp) &&
6916 port != PORT_B && port != PORT_C))
6917 return false;
6918
6919 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6920 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6921 port_name(port));
6922
6923 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6924 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6925
6926 if (!HAS_GMCH(dev_priv))
6927 connector->interlace_allowed = true;
6928 connector->doublescan_allowed = 0;
6929
6930 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
6931
6932 intel_dp_aux_init(intel_dp);
6933
6934 intel_connector_attach_encoder(intel_connector, intel_encoder);
6935
6936 if (HAS_DDI(dev_priv))
6937 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6938 else
6939 intel_connector->get_hw_state = intel_connector_get_hw_state;
6940
6941 /* init MST on ports that can support it */
6942 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
6943 (port == PORT_B || port == PORT_C ||
6944 port == PORT_D || port == PORT_F))
6945 intel_dp_mst_encoder_init(intel_dig_port,
6946 intel_connector->base.base.id);
6947
6948 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6949 intel_dp_aux_fini(intel_dp);
6950 intel_dp_mst_encoder_cleanup(intel_dig_port);
6951 goto fail;
6952 }
6953
6954 intel_dp_add_properties(intel_dp, connector);
6955
6956 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6957 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
6958 if (ret)
6959 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
6960 }
6961
6962 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6963 * 0xd. Failure to do so will result in spurious interrupts being
6964 * generated on the port when a cable is not attached.
6965 */
6966 if (IS_G45(dev_priv)) {
6967 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6968 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6969 }
6970
6971 return true;
6972
6973 fail:
6974 drm_connector_cleanup(connector);
6975
6976 return false;
6977 }
6978
6979 bool intel_dp_init(struct drm_i915_private *dev_priv,
6980 i915_reg_t output_reg,
6981 enum port port)
6982 {
6983 struct intel_digital_port *intel_dig_port;
6984 struct intel_encoder *intel_encoder;
6985 struct drm_encoder *encoder;
6986 struct intel_connector *intel_connector;
6987
6988 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6989 if (!intel_dig_port)
6990 return false;
6991
6992 intel_connector = intel_connector_alloc();
6993 if (!intel_connector)
6994 goto err_connector_alloc;
6995
6996 intel_encoder = &intel_dig_port->base;
6997 encoder = &intel_encoder->base;
6998
6999 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7000 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7001 "DP %c", port_name(port)))
7002 goto err_encoder_init;
7003
7004 intel_encoder->hotplug = intel_dp_hotplug;
7005 intel_encoder->compute_config = intel_dp_compute_config;
7006 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7007 intel_encoder->get_config = intel_dp_get_config;
7008 intel_encoder->update_pipe = intel_panel_update_backlight;
7009 intel_encoder->suspend = intel_dp_encoder_suspend;
7010 if (IS_CHERRYVIEW(dev_priv)) {
7011 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7012 intel_encoder->pre_enable = chv_pre_enable_dp;
7013 intel_encoder->enable = vlv_enable_dp;
7014 intel_encoder->disable = vlv_disable_dp;
7015 intel_encoder->post_disable = chv_post_disable_dp;
7016 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7017 } else if (IS_VALLEYVIEW(dev_priv)) {
7018 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7019 intel_encoder->pre_enable = vlv_pre_enable_dp;
7020 intel_encoder->enable = vlv_enable_dp;
7021 intel_encoder->disable = vlv_disable_dp;
7022 intel_encoder->post_disable = vlv_post_disable_dp;
7023 } else {
7024 intel_encoder->pre_enable = g4x_pre_enable_dp;
7025 intel_encoder->enable = g4x_enable_dp;
7026 intel_encoder->disable = g4x_disable_dp;
7027 intel_encoder->post_disable = g4x_post_disable_dp;
7028 }
7029
7030 intel_dig_port->dp.output_reg = output_reg;
7031 intel_dig_port->max_lanes = 4;
7032
7033 intel_encoder->type = INTEL_OUTPUT_DP;
7034 intel_encoder->power_domain = intel_port_to_power_domain(port);
7035 if (IS_CHERRYVIEW(dev_priv)) {
7036 if (port == PORT_D)
7037 intel_encoder->crtc_mask = 1 << 2;
7038 else
7039 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7040 } else {
7041 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7042 }
7043 intel_encoder->cloneable = 0;
7044 intel_encoder->port = port;
7045
7046 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7047
7048 if (port != PORT_A)
7049 intel_infoframe_init(intel_dig_port);
7050
7051 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7052 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7053 goto err_init_connector;
7054
7055 return true;
7056
7057 err_init_connector:
7058 drm_encoder_cleanup(encoder);
7059 err_encoder_init:
7060 kfree(intel_connector);
7061 err_connector_alloc:
7062 kfree(intel_dig_port);
7063 return false;
7064 }
7065
7066 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7067 {
7068 struct intel_encoder *encoder;
7069
7070 for_each_intel_encoder(&dev_priv->drm, encoder) {
7071 struct intel_dp *intel_dp;
7072
7073 if (encoder->type != INTEL_OUTPUT_DDI)
7074 continue;
7075
7076 intel_dp = enc_to_intel_dp(&encoder->base);
7077
7078 if (!intel_dp->can_mst)
7079 continue;
7080
7081 if (intel_dp->is_mst)
7082 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7083 }
7084 }
7085
7086 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7087 {
7088 struct intel_encoder *encoder;
7089
7090 for_each_intel_encoder(&dev_priv->drm, encoder) {
7091 struct intel_dp *intel_dp;
7092 int ret;
7093
7094 if (encoder->type != INTEL_OUTPUT_DDI)
7095 continue;
7096
7097 intel_dp = enc_to_intel_dp(&encoder->base);
7098
7099 if (!intel_dp->can_mst)
7100 continue;
7101
7102 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7103 if (ret) {
7104 intel_dp->is_mst = false;
7105 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7106 false);
7107 }
7108 }
7109 }