]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/gpu/drm/i915/intel_dp.c
Don't reset ->total_link_count on nested calls of vfs_path_lookup()
[people/ms/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
132
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135 return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
140 {
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
146 case DP_LINK_BW_5_4:
147 break;
148 default:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155 }
156
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171 }
172
173 /*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
190 static int
191 intel_dp_link_required(int pixel_clock, int bpp)
192 {
193 return (pixel_clock * bpp + 9) / 10;
194 }
195
196 static int
197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198 {
199 return (max_link_clock * max_lanes * 8) / 10;
200 }
201
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205 {
206 struct intel_dp *intel_dp = intel_attached_dp(connector);
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
211
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
214 return MODE_PANEL;
215
216 if (mode->vdisplay > fixed_mode->vdisplay)
217 return MODE_PANEL;
218
219 target_clock = fixed_mode->clock;
220 }
221
222 max_link_clock = intel_dp_max_link_rate(intel_dp);
223 max_lanes = intel_dp_max_lane_count(intel_dp);
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
229 return MODE_CLOCK_HIGH;
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
237 return MODE_OK;
238 }
239
240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 {
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250 }
251
252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 {
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259 }
260
261 static void
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263 struct intel_dp *intel_dp);
264 static void
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266 struct intel_dp *intel_dp);
267
268 static void pps_lock(struct intel_dp *intel_dp)
269 {
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284 }
285
286 static void pps_unlock(struct intel_dp *intel_dp)
287 {
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298 }
299
300 static void
301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302 {
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345 }
346
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
361
362 if (!pll_enabled) {
363 vlv_force_pll_off(dev, pipe);
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
368 }
369
370 static enum pipe
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378 enum pipe pipe;
379
380 lockdep_assert_held(&dev_priv->pps_mutex);
381
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
413
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
430
431 return intel_dp->pps_pipe;
432 }
433
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439 {
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 }
442
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445 {
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 }
448
449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451 {
452 return true;
453 }
454
455 static enum pipe
456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
459 {
460 enum pipe pipe;
461
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
472 return pipe;
473 }
474
475 return INVALID_PIPE;
476 }
477
478 static void
479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 {
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 }
514
515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 {
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
542 }
543
544 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545 {
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554 }
555
556 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557 {
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572 {
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
581 pps_lock(intel_dp);
582
583 if (IS_VALLEYVIEW(dev)) {
584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
587
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
599 pps_unlock(intel_dp);
600
601 return 0;
602 }
603
604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
605 {
606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
607 struct drm_i915_private *dev_priv = dev->dev_private;
608
609 lockdep_assert_held(&dev_priv->pps_mutex);
610
611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
616 }
617
618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
619 {
620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
621 struct drm_i915_private *dev_priv = dev->dev_private;
622
623 lockdep_assert_held(&dev_priv->pps_mutex);
624
625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
630 }
631
632 static void
633 intel_dp_check_edp(struct intel_dp *intel_dp)
634 {
635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
636 struct drm_i915_private *dev_priv = dev->dev_private;
637
638 if (!is_edp(intel_dp))
639 return;
640
641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
646 }
647 }
648
649 static uint32_t
650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651 {
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656 uint32_t status;
657 bool done;
658
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660 if (has_aux_irq)
661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662 msecs_to_jiffies_timeout(10));
663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668 #undef C
669
670 return status;
671 }
672
673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674 {
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
677
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */
682 return index ? 0 : intel_hrawclk(dev) / 2;
683 }
684
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
689 struct drm_i915_private *dev_priv = dev->dev_private;
690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700 }
701
702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 {
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
708 if (intel_dig_port->port == PORT_A) {
709 if (index)
710 return 0;
711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
719 } else {
720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722 }
723
724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726 return index ? 0 : 100;
727 }
728
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737 }
738
739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743 {
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
759 DP_AUX_CH_CTL_DONE |
760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
762 timeout |
763 DP_AUX_CH_CTL_RECEIVE_ERROR |
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767 }
768
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773 {
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782 }
783
784 static int
785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786 const uint8_t *send, int send_bytes,
787 uint8_t *recv, int recv_size)
788 {
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
794 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes;
796 uint32_t status;
797 int try, clock = 0;
798 bool has_aux_irq = HAS_AUX_IRQ(dev);
799 bool vdd;
800
801 pps_lock(intel_dp);
802
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
809 vdd = edp_panel_vdd_on(intel_dp);
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
818
819 intel_aux_display_runtime_get(dev_priv);
820
821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
823 status = I915_READ_NOTRACE(ch_ctl);
824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
839 ret = -EBUSY;
840 goto out;
841 }
842
843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
854
855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
862
863 /* Send the command and wait for it to complete */
864 I915_WRITE(ch_ctl, send_ctl);
865
866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
867
868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
874
875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
876 continue;
877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
885 continue;
886 }
887 if (status & DP_AUX_CH_CTL_DONE)
888 goto done;
889 }
890 }
891
892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
894 ret = -EBUSY;
895 goto out;
896 }
897
898 done:
899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
904 ret = -EIO;
905 goto out;
906 }
907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
912 ret = -ETIMEDOUT;
913 goto out;
914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
921
922 for (i = 0; i < recv_bytes; i += 4)
923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
925
926 ret = recv_bytes;
927 out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929 intel_aux_display_runtime_put(dev_priv);
930
931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
934 pps_unlock(intel_dp);
935
936 return ret;
937 }
938
939 #define BARE_ADDRESS_SIZE 3
940 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
941 static ssize_t
942 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
943 {
944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
947 int ret;
948
949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
954
955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
960 rxsize = 2; /* 0 or 1 data bytes */
961
962 if (WARN_ON(txsize > 20))
963 return -E2BIG;
964
965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
966
967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968 if (ret > 0) {
969 msg->reply = rxbuf[0] >> 4;
970
971 if (ret > 1) {
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974 } else {
975 /* Return payload size. */
976 ret = msg->size;
977 }
978 }
979 break;
980
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
984 rxsize = msg->size + 1;
985
986 if (WARN_ON(rxsize > 20))
987 return -E2BIG;
988
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 if (ret > 0) {
991 msg->reply = rxbuf[0] >> 4;
992 /*
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
995 *
996 * Return payload size.
997 */
998 ret--;
999 memcpy(msg->buffer, rxbuf + 1, ret);
1000 }
1001 break;
1002
1003 default:
1004 ret = -EINVAL;
1005 break;
1006 }
1007
1008 return ret;
1009 }
1010
1011 static void
1012 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1013 {
1014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1015 struct drm_i915_private *dev_priv = dev->dev_private;
1016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017 enum port port = intel_dig_port->port;
1018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1019 const char *name = NULL;
1020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1021 int ret;
1022
1023 /* On SKL we don't have Aux for port E so we rely on VBT to set
1024 * a proper alternate aux channel.
1025 */
1026 if (IS_SKYLAKE(dev) && port == PORT_E) {
1027 switch (info->alternate_aux_channel) {
1028 case DP_AUX_B:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1030 break;
1031 case DP_AUX_C:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1033 break;
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 }
1042
1043 switch (port) {
1044 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1046 name = "DPDDC-A";
1047 break;
1048 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1050 name = "DPDDC-B";
1051 break;
1052 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1054 name = "DPDDC-C";
1055 break;
1056 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1058 name = "DPDDC-D";
1059 break;
1060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
1064 default:
1065 BUG();
1066 }
1067
1068 /*
1069 * The AUX_CTL register is usually DP_CTL + 0x10.
1070 *
1071 * On Haswell and Broadwell though:
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1074 *
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1076 */
1077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1079
1080 intel_dp->aux.name = name;
1081 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer;
1083
1084 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085 connector->base.kdev->kobj.name);
1086
1087 ret = drm_dp_aux_register(&intel_dp->aux);
1088 if (ret < 0) {
1089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1090 name, ret);
1091 return;
1092 }
1093
1094 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1099 drm_dp_aux_unregister(&intel_dp->aux);
1100 }
1101 }
1102
1103 static void
1104 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1105 {
1106 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1107
1108 if (!intel_connector->mst_port)
1109 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110 intel_dp->aux.ddc.dev.kobj.name);
1111 intel_connector_unregister(intel_connector);
1112 }
1113
1114 static void
1115 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1116 {
1117 u32 ctrl1;
1118
1119 memset(&pipe_config->dpll_hw_state, 0,
1120 sizeof(pipe_config->dpll_hw_state));
1121
1122 pipe_config->ddi_pll_sel = SKL_DPLL0;
1123 pipe_config->dpll_hw_state.cfgcr1 = 0;
1124 pipe_config->dpll_hw_state.cfgcr2 = 0;
1125
1126 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1127 switch (pipe_config->port_clock / 2) {
1128 case 81000:
1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1130 SKL_DPLL0);
1131 break;
1132 case 135000:
1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1134 SKL_DPLL0);
1135 break;
1136 case 270000:
1137 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1138 SKL_DPLL0);
1139 break;
1140 case 162000:
1141 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1142 SKL_DPLL0);
1143 break;
1144 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145 results in CDCLK change. Need to handle the change of CDCLK by
1146 disabling pipes and re-enabling them */
1147 case 108000:
1148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1149 SKL_DPLL0);
1150 break;
1151 case 216000:
1152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1153 SKL_DPLL0);
1154 break;
1155
1156 }
1157 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1158 }
1159
1160 void
1161 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1162 {
1163 memset(&pipe_config->dpll_hw_state, 0,
1164 sizeof(pipe_config->dpll_hw_state));
1165
1166 switch (pipe_config->port_clock / 2) {
1167 case 81000:
1168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1169 break;
1170 case 135000:
1171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1172 break;
1173 case 270000:
1174 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1175 break;
1176 }
1177 }
1178
1179 static int
1180 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1181 {
1182 if (intel_dp->num_sink_rates) {
1183 *sink_rates = intel_dp->sink_rates;
1184 return intel_dp->num_sink_rates;
1185 }
1186
1187 *sink_rates = default_rates;
1188
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1190 }
1191
1192 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1193 {
1194 /* WaDisableHBR2:skl */
1195 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1196 return false;
1197
1198 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1199 (INTEL_INFO(dev)->gen >= 9))
1200 return true;
1201 else
1202 return false;
1203 }
1204
1205 static int
1206 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1207 {
1208 int size;
1209
1210 if (IS_BROXTON(dev)) {
1211 *source_rates = bxt_rates;
1212 size = ARRAY_SIZE(bxt_rates);
1213 } else if (IS_SKYLAKE(dev)) {
1214 *source_rates = skl_rates;
1215 size = ARRAY_SIZE(skl_rates);
1216 } else {
1217 *source_rates = default_rates;
1218 size = ARRAY_SIZE(default_rates);
1219 }
1220
1221 /* This depends on the fact that 5.4 is last value in the array */
1222 if (!intel_dp_source_supports_hbr2(dev))
1223 size--;
1224
1225 return size;
1226 }
1227
1228 static void
1229 intel_dp_set_clock(struct intel_encoder *encoder,
1230 struct intel_crtc_state *pipe_config)
1231 {
1232 struct drm_device *dev = encoder->base.dev;
1233 const struct dp_link_dpll *divisor = NULL;
1234 int i, count = 0;
1235
1236 if (IS_G4X(dev)) {
1237 divisor = gen4_dpll;
1238 count = ARRAY_SIZE(gen4_dpll);
1239 } else if (HAS_PCH_SPLIT(dev)) {
1240 divisor = pch_dpll;
1241 count = ARRAY_SIZE(pch_dpll);
1242 } else if (IS_CHERRYVIEW(dev)) {
1243 divisor = chv_dpll;
1244 count = ARRAY_SIZE(chv_dpll);
1245 } else if (IS_VALLEYVIEW(dev)) {
1246 divisor = vlv_dpll;
1247 count = ARRAY_SIZE(vlv_dpll);
1248 }
1249
1250 if (divisor && count) {
1251 for (i = 0; i < count; i++) {
1252 if (pipe_config->port_clock == divisor[i].clock) {
1253 pipe_config->dpll = divisor[i].dpll;
1254 pipe_config->clock_set = true;
1255 break;
1256 }
1257 }
1258 }
1259 }
1260
1261 static int intersect_rates(const int *source_rates, int source_len,
1262 const int *sink_rates, int sink_len,
1263 int *common_rates)
1264 {
1265 int i = 0, j = 0, k = 0;
1266
1267 while (i < source_len && j < sink_len) {
1268 if (source_rates[i] == sink_rates[j]) {
1269 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1270 return k;
1271 common_rates[k] = source_rates[i];
1272 ++k;
1273 ++i;
1274 ++j;
1275 } else if (source_rates[i] < sink_rates[j]) {
1276 ++i;
1277 } else {
1278 ++j;
1279 }
1280 }
1281 return k;
1282 }
1283
1284 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1285 int *common_rates)
1286 {
1287 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1288 const int *source_rates, *sink_rates;
1289 int source_len, sink_len;
1290
1291 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1292 source_len = intel_dp_source_rates(dev, &source_rates);
1293
1294 return intersect_rates(source_rates, source_len,
1295 sink_rates, sink_len,
1296 common_rates);
1297 }
1298
1299 static void snprintf_int_array(char *str, size_t len,
1300 const int *array, int nelem)
1301 {
1302 int i;
1303
1304 str[0] = '\0';
1305
1306 for (i = 0; i < nelem; i++) {
1307 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1308 if (r >= len)
1309 return;
1310 str += r;
1311 len -= r;
1312 }
1313 }
1314
1315 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1316 {
1317 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1318 const int *source_rates, *sink_rates;
1319 int source_len, sink_len, common_len;
1320 int common_rates[DP_MAX_SUPPORTED_RATES];
1321 char str[128]; /* FIXME: too big for stack? */
1322
1323 if ((drm_debug & DRM_UT_KMS) == 0)
1324 return;
1325
1326 source_len = intel_dp_source_rates(dev, &source_rates);
1327 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1328 DRM_DEBUG_KMS("source rates: %s\n", str);
1329
1330 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1331 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1332 DRM_DEBUG_KMS("sink rates: %s\n", str);
1333
1334 common_len = intel_dp_common_rates(intel_dp, common_rates);
1335 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1336 DRM_DEBUG_KMS("common rates: %s\n", str);
1337 }
1338
1339 static int rate_to_index(int find, const int *rates)
1340 {
1341 int i = 0;
1342
1343 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1344 if (find == rates[i])
1345 break;
1346
1347 return i;
1348 }
1349
1350 int
1351 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1352 {
1353 int rates[DP_MAX_SUPPORTED_RATES] = {};
1354 int len;
1355
1356 len = intel_dp_common_rates(intel_dp, rates);
1357 if (WARN_ON(len <= 0))
1358 return 162000;
1359
1360 return rates[rate_to_index(0, rates) - 1];
1361 }
1362
1363 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1364 {
1365 return rate_to_index(rate, intel_dp->sink_rates);
1366 }
1367
1368 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1369 uint8_t *link_bw, uint8_t *rate_select)
1370 {
1371 if (intel_dp->num_sink_rates) {
1372 *link_bw = 0;
1373 *rate_select =
1374 intel_dp_rate_select(intel_dp, port_clock);
1375 } else {
1376 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1377 *rate_select = 0;
1378 }
1379 }
1380
1381 bool
1382 intel_dp_compute_config(struct intel_encoder *encoder,
1383 struct intel_crtc_state *pipe_config)
1384 {
1385 struct drm_device *dev = encoder->base.dev;
1386 struct drm_i915_private *dev_priv = dev->dev_private;
1387 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1388 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1389 enum port port = dp_to_dig_port(intel_dp)->port;
1390 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1391 struct intel_connector *intel_connector = intel_dp->attached_connector;
1392 int lane_count, clock;
1393 int min_lane_count = 1;
1394 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1395 /* Conveniently, the link BW constants become indices with a shift...*/
1396 int min_clock = 0;
1397 int max_clock;
1398 int bpp, mode_rate;
1399 int link_avail, link_clock;
1400 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1401 int common_len;
1402 uint8_t link_bw, rate_select;
1403
1404 common_len = intel_dp_common_rates(intel_dp, common_rates);
1405
1406 /* No common link rates between source and sink */
1407 WARN_ON(common_len <= 0);
1408
1409 max_clock = common_len - 1;
1410
1411 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1412 pipe_config->has_pch_encoder = true;
1413
1414 pipe_config->has_dp_encoder = true;
1415 pipe_config->has_drrs = false;
1416 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1417
1418 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1419 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1420 adjusted_mode);
1421
1422 if (INTEL_INFO(dev)->gen >= 9) {
1423 int ret;
1424 ret = skl_update_scaler_crtc(pipe_config);
1425 if (ret)
1426 return ret;
1427 }
1428
1429 if (!HAS_PCH_SPLIT(dev))
1430 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1431 intel_connector->panel.fitting_mode);
1432 else
1433 intel_pch_panel_fitting(intel_crtc, pipe_config,
1434 intel_connector->panel.fitting_mode);
1435 }
1436
1437 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1438 return false;
1439
1440 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1441 "max bw %d pixel clock %iKHz\n",
1442 max_lane_count, common_rates[max_clock],
1443 adjusted_mode->crtc_clock);
1444
1445 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1446 * bpc in between. */
1447 bpp = pipe_config->pipe_bpp;
1448 if (is_edp(intel_dp)) {
1449
1450 /* Get bpp from vbt only for panels that dont have bpp in edid */
1451 if (intel_connector->base.display_info.bpc == 0 &&
1452 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1453 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1454 dev_priv->vbt.edp_bpp);
1455 bpp = dev_priv->vbt.edp_bpp;
1456 }
1457
1458 /*
1459 * Use the maximum clock and number of lanes the eDP panel
1460 * advertizes being capable of. The panels are generally
1461 * designed to support only a single clock and lane
1462 * configuration, and typically these values correspond to the
1463 * native resolution of the panel.
1464 */
1465 min_lane_count = max_lane_count;
1466 min_clock = max_clock;
1467 }
1468
1469 for (; bpp >= 6*3; bpp -= 2*3) {
1470 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1471 bpp);
1472
1473 for (clock = min_clock; clock <= max_clock; clock++) {
1474 for (lane_count = min_lane_count;
1475 lane_count <= max_lane_count;
1476 lane_count <<= 1) {
1477
1478 link_clock = common_rates[clock];
1479 link_avail = intel_dp_max_data_rate(link_clock,
1480 lane_count);
1481
1482 if (mode_rate <= link_avail) {
1483 goto found;
1484 }
1485 }
1486 }
1487 }
1488
1489 return false;
1490
1491 found:
1492 if (intel_dp->color_range_auto) {
1493 /*
1494 * See:
1495 * CEA-861-E - 5.1 Default Encoding Parameters
1496 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1497 */
1498 pipe_config->limited_color_range =
1499 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1500 } else {
1501 pipe_config->limited_color_range =
1502 intel_dp->limited_color_range;
1503 }
1504
1505 pipe_config->lane_count = lane_count;
1506
1507 pipe_config->pipe_bpp = bpp;
1508 pipe_config->port_clock = common_rates[clock];
1509
1510 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1511 &link_bw, &rate_select);
1512
1513 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1514 link_bw, rate_select, pipe_config->lane_count,
1515 pipe_config->port_clock, bpp);
1516 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1517 mode_rate, link_avail);
1518
1519 intel_link_compute_m_n(bpp, lane_count,
1520 adjusted_mode->crtc_clock,
1521 pipe_config->port_clock,
1522 &pipe_config->dp_m_n);
1523
1524 if (intel_connector->panel.downclock_mode != NULL &&
1525 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1526 pipe_config->has_drrs = true;
1527 intel_link_compute_m_n(bpp, lane_count,
1528 intel_connector->panel.downclock_mode->clock,
1529 pipe_config->port_clock,
1530 &pipe_config->dp_m2_n2);
1531 }
1532
1533 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1534 skl_edp_set_pll_config(pipe_config);
1535 else if (IS_BROXTON(dev))
1536 /* handled in ddi */;
1537 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1538 hsw_dp_set_ddi_pll_sel(pipe_config);
1539 else
1540 intel_dp_set_clock(encoder, pipe_config);
1541
1542 return true;
1543 }
1544
1545 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1546 {
1547 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1548 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1549 struct drm_device *dev = crtc->base.dev;
1550 struct drm_i915_private *dev_priv = dev->dev_private;
1551 u32 dpa_ctl;
1552
1553 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1554 crtc->config->port_clock);
1555 dpa_ctl = I915_READ(DP_A);
1556 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1557
1558 if (crtc->config->port_clock == 162000) {
1559 /* For a long time we've carried around a ILK-DevA w/a for the
1560 * 160MHz clock. If we're really unlucky, it's still required.
1561 */
1562 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1563 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1564 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1565 } else {
1566 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1567 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1568 }
1569
1570 I915_WRITE(DP_A, dpa_ctl);
1571
1572 POSTING_READ(DP_A);
1573 udelay(500);
1574 }
1575
1576 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1577 const struct intel_crtc_state *pipe_config)
1578 {
1579 intel_dp->link_rate = pipe_config->port_clock;
1580 intel_dp->lane_count = pipe_config->lane_count;
1581 }
1582
1583 static void intel_dp_prepare(struct intel_encoder *encoder)
1584 {
1585 struct drm_device *dev = encoder->base.dev;
1586 struct drm_i915_private *dev_priv = dev->dev_private;
1587 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1588 enum port port = dp_to_dig_port(intel_dp)->port;
1589 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1590 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1591
1592 intel_dp_set_link_params(intel_dp, crtc->config);
1593
1594 /*
1595 * There are four kinds of DP registers:
1596 *
1597 * IBX PCH
1598 * SNB CPU
1599 * IVB CPU
1600 * CPT PCH
1601 *
1602 * IBX PCH and CPU are the same for almost everything,
1603 * except that the CPU DP PLL is configured in this
1604 * register
1605 *
1606 * CPT PCH is quite different, having many bits moved
1607 * to the TRANS_DP_CTL register instead. That
1608 * configuration happens (oddly) in ironlake_pch_enable
1609 */
1610
1611 /* Preserve the BIOS-computed detected bit. This is
1612 * supposed to be read-only.
1613 */
1614 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1615
1616 /* Handle DP bits in common between all three register formats */
1617 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1618 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1619
1620 if (crtc->config->has_audio)
1621 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1622
1623 /* Split out the IBX/CPU vs CPT settings */
1624
1625 if (IS_GEN7(dev) && port == PORT_A) {
1626 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1627 intel_dp->DP |= DP_SYNC_HS_HIGH;
1628 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1629 intel_dp->DP |= DP_SYNC_VS_HIGH;
1630 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1631
1632 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1633 intel_dp->DP |= DP_ENHANCED_FRAMING;
1634
1635 intel_dp->DP |= crtc->pipe << 29;
1636 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1637 u32 trans_dp;
1638
1639 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1640
1641 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1642 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1643 trans_dp |= TRANS_DP_ENH_FRAMING;
1644 else
1645 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1646 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1647 } else {
1648 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1649 crtc->config->limited_color_range)
1650 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1651
1652 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1653 intel_dp->DP |= DP_SYNC_HS_HIGH;
1654 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1655 intel_dp->DP |= DP_SYNC_VS_HIGH;
1656 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1657
1658 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1659 intel_dp->DP |= DP_ENHANCED_FRAMING;
1660
1661 if (IS_CHERRYVIEW(dev))
1662 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1663 else if (crtc->pipe == PIPE_B)
1664 intel_dp->DP |= DP_PIPEB_SELECT;
1665 }
1666 }
1667
1668 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1669 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1670
1671 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1672 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1673
1674 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1675 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1676
1677 static void wait_panel_status(struct intel_dp *intel_dp,
1678 u32 mask,
1679 u32 value)
1680 {
1681 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1682 struct drm_i915_private *dev_priv = dev->dev_private;
1683 u32 pp_stat_reg, pp_ctrl_reg;
1684
1685 lockdep_assert_held(&dev_priv->pps_mutex);
1686
1687 pp_stat_reg = _pp_stat_reg(intel_dp);
1688 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1689
1690 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1691 mask, value,
1692 I915_READ(pp_stat_reg),
1693 I915_READ(pp_ctrl_reg));
1694
1695 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1696 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1697 I915_READ(pp_stat_reg),
1698 I915_READ(pp_ctrl_reg));
1699 }
1700
1701 DRM_DEBUG_KMS("Wait complete\n");
1702 }
1703
1704 static void wait_panel_on(struct intel_dp *intel_dp)
1705 {
1706 DRM_DEBUG_KMS("Wait for panel power on\n");
1707 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1708 }
1709
1710 static void wait_panel_off(struct intel_dp *intel_dp)
1711 {
1712 DRM_DEBUG_KMS("Wait for panel power off time\n");
1713 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1714 }
1715
1716 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1717 {
1718 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1719
1720 /* When we disable the VDD override bit last we have to do the manual
1721 * wait. */
1722 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1723 intel_dp->panel_power_cycle_delay);
1724
1725 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1726 }
1727
1728 static void wait_backlight_on(struct intel_dp *intel_dp)
1729 {
1730 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1731 intel_dp->backlight_on_delay);
1732 }
1733
1734 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1735 {
1736 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1737 intel_dp->backlight_off_delay);
1738 }
1739
1740 /* Read the current pp_control value, unlocking the register if it
1741 * is locked
1742 */
1743
1744 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1745 {
1746 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1748 u32 control;
1749
1750 lockdep_assert_held(&dev_priv->pps_mutex);
1751
1752 control = I915_READ(_pp_ctrl_reg(intel_dp));
1753 if (!IS_BROXTON(dev)) {
1754 control &= ~PANEL_UNLOCK_MASK;
1755 control |= PANEL_UNLOCK_REGS;
1756 }
1757 return control;
1758 }
1759
1760 /*
1761 * Must be paired with edp_panel_vdd_off().
1762 * Must hold pps_mutex around the whole on/off sequence.
1763 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1764 */
1765 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1766 {
1767 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1768 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1769 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1770 struct drm_i915_private *dev_priv = dev->dev_private;
1771 enum intel_display_power_domain power_domain;
1772 u32 pp;
1773 u32 pp_stat_reg, pp_ctrl_reg;
1774 bool need_to_disable = !intel_dp->want_panel_vdd;
1775
1776 lockdep_assert_held(&dev_priv->pps_mutex);
1777
1778 if (!is_edp(intel_dp))
1779 return false;
1780
1781 cancel_delayed_work(&intel_dp->panel_vdd_work);
1782 intel_dp->want_panel_vdd = true;
1783
1784 if (edp_have_panel_vdd(intel_dp))
1785 return need_to_disable;
1786
1787 power_domain = intel_display_port_power_domain(intel_encoder);
1788 intel_display_power_get(dev_priv, power_domain);
1789
1790 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1791 port_name(intel_dig_port->port));
1792
1793 if (!edp_have_panel_power(intel_dp))
1794 wait_panel_power_cycle(intel_dp);
1795
1796 pp = ironlake_get_pp_control(intel_dp);
1797 pp |= EDP_FORCE_VDD;
1798
1799 pp_stat_reg = _pp_stat_reg(intel_dp);
1800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1801
1802 I915_WRITE(pp_ctrl_reg, pp);
1803 POSTING_READ(pp_ctrl_reg);
1804 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1805 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1806 /*
1807 * If the panel wasn't on, delay before accessing aux channel
1808 */
1809 if (!edp_have_panel_power(intel_dp)) {
1810 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1811 port_name(intel_dig_port->port));
1812 msleep(intel_dp->panel_power_up_delay);
1813 }
1814
1815 return need_to_disable;
1816 }
1817
1818 /*
1819 * Must be paired with intel_edp_panel_vdd_off() or
1820 * intel_edp_panel_off().
1821 * Nested calls to these functions are not allowed since
1822 * we drop the lock. Caller must use some higher level
1823 * locking to prevent nested calls from other threads.
1824 */
1825 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1826 {
1827 bool vdd;
1828
1829 if (!is_edp(intel_dp))
1830 return;
1831
1832 pps_lock(intel_dp);
1833 vdd = edp_panel_vdd_on(intel_dp);
1834 pps_unlock(intel_dp);
1835
1836 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1837 port_name(dp_to_dig_port(intel_dp)->port));
1838 }
1839
1840 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1841 {
1842 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1843 struct drm_i915_private *dev_priv = dev->dev_private;
1844 struct intel_digital_port *intel_dig_port =
1845 dp_to_dig_port(intel_dp);
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1847 enum intel_display_power_domain power_domain;
1848 u32 pp;
1849 u32 pp_stat_reg, pp_ctrl_reg;
1850
1851 lockdep_assert_held(&dev_priv->pps_mutex);
1852
1853 WARN_ON(intel_dp->want_panel_vdd);
1854
1855 if (!edp_have_panel_vdd(intel_dp))
1856 return;
1857
1858 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1859 port_name(intel_dig_port->port));
1860
1861 pp = ironlake_get_pp_control(intel_dp);
1862 pp &= ~EDP_FORCE_VDD;
1863
1864 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1865 pp_stat_reg = _pp_stat_reg(intel_dp);
1866
1867 I915_WRITE(pp_ctrl_reg, pp);
1868 POSTING_READ(pp_ctrl_reg);
1869
1870 /* Make sure sequencer is idle before allowing subsequent activity */
1871 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1872 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1873
1874 if ((pp & POWER_TARGET_ON) == 0)
1875 intel_dp->last_power_cycle = jiffies;
1876
1877 power_domain = intel_display_port_power_domain(intel_encoder);
1878 intel_display_power_put(dev_priv, power_domain);
1879 }
1880
1881 static void edp_panel_vdd_work(struct work_struct *__work)
1882 {
1883 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1884 struct intel_dp, panel_vdd_work);
1885
1886 pps_lock(intel_dp);
1887 if (!intel_dp->want_panel_vdd)
1888 edp_panel_vdd_off_sync(intel_dp);
1889 pps_unlock(intel_dp);
1890 }
1891
1892 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1893 {
1894 unsigned long delay;
1895
1896 /*
1897 * Queue the timer to fire a long time from now (relative to the power
1898 * down delay) to keep the panel power up across a sequence of
1899 * operations.
1900 */
1901 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1902 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1903 }
1904
1905 /*
1906 * Must be paired with edp_panel_vdd_on().
1907 * Must hold pps_mutex around the whole on/off sequence.
1908 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1909 */
1910 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1911 {
1912 struct drm_i915_private *dev_priv =
1913 intel_dp_to_dev(intel_dp)->dev_private;
1914
1915 lockdep_assert_held(&dev_priv->pps_mutex);
1916
1917 if (!is_edp(intel_dp))
1918 return;
1919
1920 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1921 port_name(dp_to_dig_port(intel_dp)->port));
1922
1923 intel_dp->want_panel_vdd = false;
1924
1925 if (sync)
1926 edp_panel_vdd_off_sync(intel_dp);
1927 else
1928 edp_panel_vdd_schedule_off(intel_dp);
1929 }
1930
1931 static void edp_panel_on(struct intel_dp *intel_dp)
1932 {
1933 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1934 struct drm_i915_private *dev_priv = dev->dev_private;
1935 u32 pp;
1936 u32 pp_ctrl_reg;
1937
1938 lockdep_assert_held(&dev_priv->pps_mutex);
1939
1940 if (!is_edp(intel_dp))
1941 return;
1942
1943 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1944 port_name(dp_to_dig_port(intel_dp)->port));
1945
1946 if (WARN(edp_have_panel_power(intel_dp),
1947 "eDP port %c panel power already on\n",
1948 port_name(dp_to_dig_port(intel_dp)->port)))
1949 return;
1950
1951 wait_panel_power_cycle(intel_dp);
1952
1953 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1954 pp = ironlake_get_pp_control(intel_dp);
1955 if (IS_GEN5(dev)) {
1956 /* ILK workaround: disable reset around power sequence */
1957 pp &= ~PANEL_POWER_RESET;
1958 I915_WRITE(pp_ctrl_reg, pp);
1959 POSTING_READ(pp_ctrl_reg);
1960 }
1961
1962 pp |= POWER_TARGET_ON;
1963 if (!IS_GEN5(dev))
1964 pp |= PANEL_POWER_RESET;
1965
1966 I915_WRITE(pp_ctrl_reg, pp);
1967 POSTING_READ(pp_ctrl_reg);
1968
1969 wait_panel_on(intel_dp);
1970 intel_dp->last_power_on = jiffies;
1971
1972 if (IS_GEN5(dev)) {
1973 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1974 I915_WRITE(pp_ctrl_reg, pp);
1975 POSTING_READ(pp_ctrl_reg);
1976 }
1977 }
1978
1979 void intel_edp_panel_on(struct intel_dp *intel_dp)
1980 {
1981 if (!is_edp(intel_dp))
1982 return;
1983
1984 pps_lock(intel_dp);
1985 edp_panel_on(intel_dp);
1986 pps_unlock(intel_dp);
1987 }
1988
1989
1990 static void edp_panel_off(struct intel_dp *intel_dp)
1991 {
1992 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1993 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1994 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1995 struct drm_i915_private *dev_priv = dev->dev_private;
1996 enum intel_display_power_domain power_domain;
1997 u32 pp;
1998 u32 pp_ctrl_reg;
1999
2000 lockdep_assert_held(&dev_priv->pps_mutex);
2001
2002 if (!is_edp(intel_dp))
2003 return;
2004
2005 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2006 port_name(dp_to_dig_port(intel_dp)->port));
2007
2008 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2009 port_name(dp_to_dig_port(intel_dp)->port));
2010
2011 pp = ironlake_get_pp_control(intel_dp);
2012 /* We need to switch off panel power _and_ force vdd, for otherwise some
2013 * panels get very unhappy and cease to work. */
2014 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2015 EDP_BLC_ENABLE);
2016
2017 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2018
2019 intel_dp->want_panel_vdd = false;
2020
2021 I915_WRITE(pp_ctrl_reg, pp);
2022 POSTING_READ(pp_ctrl_reg);
2023
2024 intel_dp->last_power_cycle = jiffies;
2025 wait_panel_off(intel_dp);
2026
2027 /* We got a reference when we enabled the VDD. */
2028 power_domain = intel_display_port_power_domain(intel_encoder);
2029 intel_display_power_put(dev_priv, power_domain);
2030 }
2031
2032 void intel_edp_panel_off(struct intel_dp *intel_dp)
2033 {
2034 if (!is_edp(intel_dp))
2035 return;
2036
2037 pps_lock(intel_dp);
2038 edp_panel_off(intel_dp);
2039 pps_unlock(intel_dp);
2040 }
2041
2042 /* Enable backlight in the panel power control. */
2043 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2044 {
2045 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2046 struct drm_device *dev = intel_dig_port->base.base.dev;
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 pp;
2049 u32 pp_ctrl_reg;
2050
2051 /*
2052 * If we enable the backlight right away following a panel power
2053 * on, we may see slight flicker as the panel syncs with the eDP
2054 * link. So delay a bit to make sure the image is solid before
2055 * allowing it to appear.
2056 */
2057 wait_backlight_on(intel_dp);
2058
2059 pps_lock(intel_dp);
2060
2061 pp = ironlake_get_pp_control(intel_dp);
2062 pp |= EDP_BLC_ENABLE;
2063
2064 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2065
2066 I915_WRITE(pp_ctrl_reg, pp);
2067 POSTING_READ(pp_ctrl_reg);
2068
2069 pps_unlock(intel_dp);
2070 }
2071
2072 /* Enable backlight PWM and backlight PP control. */
2073 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2074 {
2075 if (!is_edp(intel_dp))
2076 return;
2077
2078 DRM_DEBUG_KMS("\n");
2079
2080 intel_panel_enable_backlight(intel_dp->attached_connector);
2081 _intel_edp_backlight_on(intel_dp);
2082 }
2083
2084 /* Disable backlight in the panel power control. */
2085 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2086 {
2087 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2088 struct drm_i915_private *dev_priv = dev->dev_private;
2089 u32 pp;
2090 u32 pp_ctrl_reg;
2091
2092 if (!is_edp(intel_dp))
2093 return;
2094
2095 pps_lock(intel_dp);
2096
2097 pp = ironlake_get_pp_control(intel_dp);
2098 pp &= ~EDP_BLC_ENABLE;
2099
2100 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2101
2102 I915_WRITE(pp_ctrl_reg, pp);
2103 POSTING_READ(pp_ctrl_reg);
2104
2105 pps_unlock(intel_dp);
2106
2107 intel_dp->last_backlight_off = jiffies;
2108 edp_wait_backlight_off(intel_dp);
2109 }
2110
2111 /* Disable backlight PP control and backlight PWM. */
2112 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2113 {
2114 if (!is_edp(intel_dp))
2115 return;
2116
2117 DRM_DEBUG_KMS("\n");
2118
2119 _intel_edp_backlight_off(intel_dp);
2120 intel_panel_disable_backlight(intel_dp->attached_connector);
2121 }
2122
2123 /*
2124 * Hook for controlling the panel power control backlight through the bl_power
2125 * sysfs attribute. Take care to handle multiple calls.
2126 */
2127 static void intel_edp_backlight_power(struct intel_connector *connector,
2128 bool enable)
2129 {
2130 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2131 bool is_enabled;
2132
2133 pps_lock(intel_dp);
2134 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2135 pps_unlock(intel_dp);
2136
2137 if (is_enabled == enable)
2138 return;
2139
2140 DRM_DEBUG_KMS("panel power control backlight %s\n",
2141 enable ? "enable" : "disable");
2142
2143 if (enable)
2144 _intel_edp_backlight_on(intel_dp);
2145 else
2146 _intel_edp_backlight_off(intel_dp);
2147 }
2148
2149 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2150 {
2151 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2152 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2153 struct drm_device *dev = crtc->dev;
2154 struct drm_i915_private *dev_priv = dev->dev_private;
2155 u32 dpa_ctl;
2156
2157 assert_pipe_disabled(dev_priv,
2158 to_intel_crtc(crtc)->pipe);
2159
2160 DRM_DEBUG_KMS("\n");
2161 dpa_ctl = I915_READ(DP_A);
2162 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2163 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2164
2165 /* We don't adjust intel_dp->DP while tearing down the link, to
2166 * facilitate link retraining (e.g. after hotplug). Hence clear all
2167 * enable bits here to ensure that we don't enable too much. */
2168 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2169 intel_dp->DP |= DP_PLL_ENABLE;
2170 I915_WRITE(DP_A, intel_dp->DP);
2171 POSTING_READ(DP_A);
2172 udelay(200);
2173 }
2174
2175 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2176 {
2177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2179 struct drm_device *dev = crtc->dev;
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 u32 dpa_ctl;
2182
2183 assert_pipe_disabled(dev_priv,
2184 to_intel_crtc(crtc)->pipe);
2185
2186 dpa_ctl = I915_READ(DP_A);
2187 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2188 "dp pll off, should be on\n");
2189 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2190
2191 /* We can't rely on the value tracked for the DP register in
2192 * intel_dp->DP because link_down must not change that (otherwise link
2193 * re-training will fail. */
2194 dpa_ctl &= ~DP_PLL_ENABLE;
2195 I915_WRITE(DP_A, dpa_ctl);
2196 POSTING_READ(DP_A);
2197 udelay(200);
2198 }
2199
2200 /* If the sink supports it, try to set the power state appropriately */
2201 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2202 {
2203 int ret, i;
2204
2205 /* Should have a valid DPCD by this point */
2206 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2207 return;
2208
2209 if (mode != DRM_MODE_DPMS_ON) {
2210 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2211 DP_SET_POWER_D3);
2212 } else {
2213 /*
2214 * When turning on, we need to retry for 1ms to give the sink
2215 * time to wake up.
2216 */
2217 for (i = 0; i < 3; i++) {
2218 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2219 DP_SET_POWER_D0);
2220 if (ret == 1)
2221 break;
2222 msleep(1);
2223 }
2224 }
2225
2226 if (ret != 1)
2227 DRM_DEBUG_KMS("failed to %s sink power state\n",
2228 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2229 }
2230
2231 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2232 enum pipe *pipe)
2233 {
2234 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2235 enum port port = dp_to_dig_port(intel_dp)->port;
2236 struct drm_device *dev = encoder->base.dev;
2237 struct drm_i915_private *dev_priv = dev->dev_private;
2238 enum intel_display_power_domain power_domain;
2239 u32 tmp;
2240
2241 power_domain = intel_display_port_power_domain(encoder);
2242 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2243 return false;
2244
2245 tmp = I915_READ(intel_dp->output_reg);
2246
2247 if (!(tmp & DP_PORT_EN))
2248 return false;
2249
2250 if (IS_GEN7(dev) && port == PORT_A) {
2251 *pipe = PORT_TO_PIPE_CPT(tmp);
2252 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2253 enum pipe p;
2254
2255 for_each_pipe(dev_priv, p) {
2256 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2257 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2258 *pipe = p;
2259 return true;
2260 }
2261 }
2262
2263 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2264 intel_dp->output_reg);
2265 } else if (IS_CHERRYVIEW(dev)) {
2266 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2267 } else {
2268 *pipe = PORT_TO_PIPE(tmp);
2269 }
2270
2271 return true;
2272 }
2273
2274 static void intel_dp_get_config(struct intel_encoder *encoder,
2275 struct intel_crtc_state *pipe_config)
2276 {
2277 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 u32 tmp, flags = 0;
2279 struct drm_device *dev = encoder->base.dev;
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 enum port port = dp_to_dig_port(intel_dp)->port;
2282 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2283 int dotclock;
2284
2285 tmp = I915_READ(intel_dp->output_reg);
2286
2287 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2288
2289 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2290 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2291
2292 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2293 flags |= DRM_MODE_FLAG_PHSYNC;
2294 else
2295 flags |= DRM_MODE_FLAG_NHSYNC;
2296
2297 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2298 flags |= DRM_MODE_FLAG_PVSYNC;
2299 else
2300 flags |= DRM_MODE_FLAG_NVSYNC;
2301 } else {
2302 if (tmp & DP_SYNC_HS_HIGH)
2303 flags |= DRM_MODE_FLAG_PHSYNC;
2304 else
2305 flags |= DRM_MODE_FLAG_NHSYNC;
2306
2307 if (tmp & DP_SYNC_VS_HIGH)
2308 flags |= DRM_MODE_FLAG_PVSYNC;
2309 else
2310 flags |= DRM_MODE_FLAG_NVSYNC;
2311 }
2312
2313 pipe_config->base.adjusted_mode.flags |= flags;
2314
2315 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2316 tmp & DP_COLOR_RANGE_16_235)
2317 pipe_config->limited_color_range = true;
2318
2319 pipe_config->has_dp_encoder = true;
2320
2321 pipe_config->lane_count =
2322 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2323
2324 intel_dp_get_m_n(crtc, pipe_config);
2325
2326 if (port == PORT_A) {
2327 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2328 pipe_config->port_clock = 162000;
2329 else
2330 pipe_config->port_clock = 270000;
2331 }
2332
2333 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2334 &pipe_config->dp_m_n);
2335
2336 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2337 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2338
2339 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2340
2341 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2342 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2343 /*
2344 * This is a big fat ugly hack.
2345 *
2346 * Some machines in UEFI boot mode provide us a VBT that has 18
2347 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2348 * unknown we fail to light up. Yet the same BIOS boots up with
2349 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2350 * max, not what it tells us to use.
2351 *
2352 * Note: This will still be broken if the eDP panel is not lit
2353 * up by the BIOS, and thus we can't get the mode at module
2354 * load.
2355 */
2356 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2357 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2358 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2359 }
2360 }
2361
2362 static void intel_disable_dp(struct intel_encoder *encoder)
2363 {
2364 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2365 struct drm_device *dev = encoder->base.dev;
2366 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2367
2368 if (crtc->config->has_audio)
2369 intel_audio_codec_disable(encoder);
2370
2371 if (HAS_PSR(dev) && !HAS_DDI(dev))
2372 intel_psr_disable(intel_dp);
2373
2374 /* Make sure the panel is off before trying to change the mode. But also
2375 * ensure that we have vdd while we switch off the panel. */
2376 intel_edp_panel_vdd_on(intel_dp);
2377 intel_edp_backlight_off(intel_dp);
2378 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2379 intel_edp_panel_off(intel_dp);
2380
2381 /* disable the port before the pipe on g4x */
2382 if (INTEL_INFO(dev)->gen < 5)
2383 intel_dp_link_down(intel_dp);
2384 }
2385
2386 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2387 {
2388 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2389 enum port port = dp_to_dig_port(intel_dp)->port;
2390
2391 intel_dp_link_down(intel_dp);
2392 if (port == PORT_A)
2393 ironlake_edp_pll_off(intel_dp);
2394 }
2395
2396 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2397 {
2398 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2399
2400 intel_dp_link_down(intel_dp);
2401 }
2402
2403 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2404 bool reset)
2405 {
2406 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2407 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2408 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2409 enum pipe pipe = crtc->pipe;
2410 uint32_t val;
2411
2412 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2413 if (reset)
2414 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2415 else
2416 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2417 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2418
2419 if (crtc->config->lane_count > 2) {
2420 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2421 if (reset)
2422 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2423 else
2424 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2425 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2426 }
2427
2428 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2429 val |= CHV_PCS_REQ_SOFTRESET_EN;
2430 if (reset)
2431 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2432 else
2433 val |= DPIO_PCS_CLK_SOFT_RESET;
2434 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2435
2436 if (crtc->config->lane_count > 2) {
2437 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2438 val |= CHV_PCS_REQ_SOFTRESET_EN;
2439 if (reset)
2440 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2441 else
2442 val |= DPIO_PCS_CLK_SOFT_RESET;
2443 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2444 }
2445 }
2446
2447 static void chv_post_disable_dp(struct intel_encoder *encoder)
2448 {
2449 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2450 struct drm_device *dev = encoder->base.dev;
2451 struct drm_i915_private *dev_priv = dev->dev_private;
2452
2453 intel_dp_link_down(intel_dp);
2454
2455 mutex_lock(&dev_priv->sb_lock);
2456
2457 /* Assert data lane reset */
2458 chv_data_lane_soft_reset(encoder, true);
2459
2460 mutex_unlock(&dev_priv->sb_lock);
2461 }
2462
2463 static void
2464 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2465 uint32_t *DP,
2466 uint8_t dp_train_pat)
2467 {
2468 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2469 struct drm_device *dev = intel_dig_port->base.base.dev;
2470 struct drm_i915_private *dev_priv = dev->dev_private;
2471 enum port port = intel_dig_port->port;
2472
2473 if (HAS_DDI(dev)) {
2474 uint32_t temp = I915_READ(DP_TP_CTL(port));
2475
2476 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2477 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2478 else
2479 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2480
2481 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2482 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2483 case DP_TRAINING_PATTERN_DISABLE:
2484 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2485
2486 break;
2487 case DP_TRAINING_PATTERN_1:
2488 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2489 break;
2490 case DP_TRAINING_PATTERN_2:
2491 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2492 break;
2493 case DP_TRAINING_PATTERN_3:
2494 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2495 break;
2496 }
2497 I915_WRITE(DP_TP_CTL(port), temp);
2498
2499 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2500 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2501 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2502
2503 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2504 case DP_TRAINING_PATTERN_DISABLE:
2505 *DP |= DP_LINK_TRAIN_OFF_CPT;
2506 break;
2507 case DP_TRAINING_PATTERN_1:
2508 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2509 break;
2510 case DP_TRAINING_PATTERN_2:
2511 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2512 break;
2513 case DP_TRAINING_PATTERN_3:
2514 DRM_ERROR("DP training pattern 3 not supported\n");
2515 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2516 break;
2517 }
2518
2519 } else {
2520 if (IS_CHERRYVIEW(dev))
2521 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2522 else
2523 *DP &= ~DP_LINK_TRAIN_MASK;
2524
2525 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2526 case DP_TRAINING_PATTERN_DISABLE:
2527 *DP |= DP_LINK_TRAIN_OFF;
2528 break;
2529 case DP_TRAINING_PATTERN_1:
2530 *DP |= DP_LINK_TRAIN_PAT_1;
2531 break;
2532 case DP_TRAINING_PATTERN_2:
2533 *DP |= DP_LINK_TRAIN_PAT_2;
2534 break;
2535 case DP_TRAINING_PATTERN_3:
2536 if (IS_CHERRYVIEW(dev)) {
2537 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2538 } else {
2539 DRM_ERROR("DP training pattern 3 not supported\n");
2540 *DP |= DP_LINK_TRAIN_PAT_2;
2541 }
2542 break;
2543 }
2544 }
2545 }
2546
2547 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2548 {
2549 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2550 struct drm_i915_private *dev_priv = dev->dev_private;
2551
2552 /* enable with pattern 1 (as per spec) */
2553 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2554 DP_TRAINING_PATTERN_1);
2555
2556 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2557 POSTING_READ(intel_dp->output_reg);
2558
2559 /*
2560 * Magic for VLV/CHV. We _must_ first set up the register
2561 * without actually enabling the port, and then do another
2562 * write to enable the port. Otherwise link training will
2563 * fail when the power sequencer is freshly used for this port.
2564 */
2565 intel_dp->DP |= DP_PORT_EN;
2566
2567 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2568 POSTING_READ(intel_dp->output_reg);
2569 }
2570
2571 static void intel_enable_dp(struct intel_encoder *encoder)
2572 {
2573 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2574 struct drm_device *dev = encoder->base.dev;
2575 struct drm_i915_private *dev_priv = dev->dev_private;
2576 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2577 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2578
2579 if (WARN_ON(dp_reg & DP_PORT_EN))
2580 return;
2581
2582 pps_lock(intel_dp);
2583
2584 if (IS_VALLEYVIEW(dev))
2585 vlv_init_panel_power_sequencer(intel_dp);
2586
2587 intel_dp_enable_port(intel_dp);
2588
2589 edp_panel_vdd_on(intel_dp);
2590 edp_panel_on(intel_dp);
2591 edp_panel_vdd_off(intel_dp, true);
2592
2593 pps_unlock(intel_dp);
2594
2595 if (IS_VALLEYVIEW(dev)) {
2596 unsigned int lane_mask = 0x0;
2597
2598 if (IS_CHERRYVIEW(dev))
2599 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2600
2601 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2602 lane_mask);
2603 }
2604
2605 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2606 intel_dp_start_link_train(intel_dp);
2607 intel_dp_stop_link_train(intel_dp);
2608
2609 if (crtc->config->has_audio) {
2610 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2611 pipe_name(crtc->pipe));
2612 intel_audio_codec_enable(encoder);
2613 }
2614 }
2615
2616 static void g4x_enable_dp(struct intel_encoder *encoder)
2617 {
2618 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2619
2620 intel_enable_dp(encoder);
2621 intel_edp_backlight_on(intel_dp);
2622 }
2623
2624 static void vlv_enable_dp(struct intel_encoder *encoder)
2625 {
2626 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2627
2628 intel_edp_backlight_on(intel_dp);
2629 intel_psr_enable(intel_dp);
2630 }
2631
2632 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2633 {
2634 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2635 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2636
2637 intel_dp_prepare(encoder);
2638
2639 /* Only ilk+ has port A */
2640 if (dport->port == PORT_A) {
2641 ironlake_set_pll_cpu_edp(intel_dp);
2642 ironlake_edp_pll_on(intel_dp);
2643 }
2644 }
2645
2646 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2647 {
2648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2649 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2650 enum pipe pipe = intel_dp->pps_pipe;
2651 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2652
2653 edp_panel_vdd_off_sync(intel_dp);
2654
2655 /*
2656 * VLV seems to get confused when multiple power seqeuencers
2657 * have the same port selected (even if only one has power/vdd
2658 * enabled). The failure manifests as vlv_wait_port_ready() failing
2659 * CHV on the other hand doesn't seem to mind having the same port
2660 * selected in multiple power seqeuencers, but let's clear the
2661 * port select always when logically disconnecting a power sequencer
2662 * from a port.
2663 */
2664 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2665 pipe_name(pipe), port_name(intel_dig_port->port));
2666 I915_WRITE(pp_on_reg, 0);
2667 POSTING_READ(pp_on_reg);
2668
2669 intel_dp->pps_pipe = INVALID_PIPE;
2670 }
2671
2672 static void vlv_steal_power_sequencer(struct drm_device *dev,
2673 enum pipe pipe)
2674 {
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct intel_encoder *encoder;
2677
2678 lockdep_assert_held(&dev_priv->pps_mutex);
2679
2680 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2681 return;
2682
2683 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2684 base.head) {
2685 struct intel_dp *intel_dp;
2686 enum port port;
2687
2688 if (encoder->type != INTEL_OUTPUT_EDP)
2689 continue;
2690
2691 intel_dp = enc_to_intel_dp(&encoder->base);
2692 port = dp_to_dig_port(intel_dp)->port;
2693
2694 if (intel_dp->pps_pipe != pipe)
2695 continue;
2696
2697 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2698 pipe_name(pipe), port_name(port));
2699
2700 WARN(encoder->base.crtc,
2701 "stealing pipe %c power sequencer from active eDP port %c\n",
2702 pipe_name(pipe), port_name(port));
2703
2704 /* make sure vdd is off before we steal it */
2705 vlv_detach_power_sequencer(intel_dp);
2706 }
2707 }
2708
2709 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2710 {
2711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2712 struct intel_encoder *encoder = &intel_dig_port->base;
2713 struct drm_device *dev = encoder->base.dev;
2714 struct drm_i915_private *dev_priv = dev->dev_private;
2715 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2716
2717 lockdep_assert_held(&dev_priv->pps_mutex);
2718
2719 if (!is_edp(intel_dp))
2720 return;
2721
2722 if (intel_dp->pps_pipe == crtc->pipe)
2723 return;
2724
2725 /*
2726 * If another power sequencer was being used on this
2727 * port previously make sure to turn off vdd there while
2728 * we still have control of it.
2729 */
2730 if (intel_dp->pps_pipe != INVALID_PIPE)
2731 vlv_detach_power_sequencer(intel_dp);
2732
2733 /*
2734 * We may be stealing the power
2735 * sequencer from another port.
2736 */
2737 vlv_steal_power_sequencer(dev, crtc->pipe);
2738
2739 /* now it's all ours */
2740 intel_dp->pps_pipe = crtc->pipe;
2741
2742 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2743 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2744
2745 /* init power sequencer on this pipe and port */
2746 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2747 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2748 }
2749
2750 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2751 {
2752 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2753 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2754 struct drm_device *dev = encoder->base.dev;
2755 struct drm_i915_private *dev_priv = dev->dev_private;
2756 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2757 enum dpio_channel port = vlv_dport_to_channel(dport);
2758 int pipe = intel_crtc->pipe;
2759 u32 val;
2760
2761 mutex_lock(&dev_priv->sb_lock);
2762
2763 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2764 val = 0;
2765 if (pipe)
2766 val |= (1<<21);
2767 else
2768 val &= ~(1<<21);
2769 val |= 0x001000c4;
2770 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2771 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2772 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2773
2774 mutex_unlock(&dev_priv->sb_lock);
2775
2776 intel_enable_dp(encoder);
2777 }
2778
2779 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2780 {
2781 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2782 struct drm_device *dev = encoder->base.dev;
2783 struct drm_i915_private *dev_priv = dev->dev_private;
2784 struct intel_crtc *intel_crtc =
2785 to_intel_crtc(encoder->base.crtc);
2786 enum dpio_channel port = vlv_dport_to_channel(dport);
2787 int pipe = intel_crtc->pipe;
2788
2789 intel_dp_prepare(encoder);
2790
2791 /* Program Tx lane resets to default */
2792 mutex_lock(&dev_priv->sb_lock);
2793 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2794 DPIO_PCS_TX_LANE2_RESET |
2795 DPIO_PCS_TX_LANE1_RESET);
2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2797 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2798 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2799 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2800 DPIO_PCS_CLK_SOFT_RESET);
2801
2802 /* Fix up inter-pair skew failure */
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2804 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2805 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2806 mutex_unlock(&dev_priv->sb_lock);
2807 }
2808
2809 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2810 {
2811 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2812 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2813 struct drm_device *dev = encoder->base.dev;
2814 struct drm_i915_private *dev_priv = dev->dev_private;
2815 struct intel_crtc *intel_crtc =
2816 to_intel_crtc(encoder->base.crtc);
2817 enum dpio_channel ch = vlv_dport_to_channel(dport);
2818 int pipe = intel_crtc->pipe;
2819 int data, i, stagger;
2820 u32 val;
2821
2822 mutex_lock(&dev_priv->sb_lock);
2823
2824 /* allow hardware to manage TX FIFO reset source */
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2826 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2828
2829 if (intel_crtc->config->lane_count > 2) {
2830 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2831 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2833 }
2834
2835 /* Program Tx lane latency optimal setting*/
2836 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2837 /* Set the upar bit */
2838 if (intel_crtc->config->lane_count == 1)
2839 data = 0x0;
2840 else
2841 data = (i == 1) ? 0x0 : 0x1;
2842 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2843 data << DPIO_UPAR_SHIFT);
2844 }
2845
2846 /* Data lane stagger programming */
2847 if (intel_crtc->config->port_clock > 270000)
2848 stagger = 0x18;
2849 else if (intel_crtc->config->port_clock > 135000)
2850 stagger = 0xd;
2851 else if (intel_crtc->config->port_clock > 67500)
2852 stagger = 0x7;
2853 else if (intel_crtc->config->port_clock > 33750)
2854 stagger = 0x4;
2855 else
2856 stagger = 0x2;
2857
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2859 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2860 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2861
2862 if (intel_crtc->config->lane_count > 2) {
2863 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2864 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2866 }
2867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(6) |
2873 DPIO_TX2_STAGGER_MULT(0));
2874
2875 if (intel_crtc->config->lane_count > 2) {
2876 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2877 DPIO_LANESTAGGER_STRAP(stagger) |
2878 DPIO_LANESTAGGER_STRAP_OVRD |
2879 DPIO_TX1_STAGGER_MASK(0x1f) |
2880 DPIO_TX1_STAGGER_MULT(7) |
2881 DPIO_TX2_STAGGER_MULT(5));
2882 }
2883
2884 /* Deassert data lane reset */
2885 chv_data_lane_soft_reset(encoder, false);
2886
2887 mutex_unlock(&dev_priv->sb_lock);
2888
2889 intel_enable_dp(encoder);
2890
2891 /* Second common lane will stay alive on its own now */
2892 if (dport->release_cl2_override) {
2893 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2894 dport->release_cl2_override = false;
2895 }
2896 }
2897
2898 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2899 {
2900 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2901 struct drm_device *dev = encoder->base.dev;
2902 struct drm_i915_private *dev_priv = dev->dev_private;
2903 struct intel_crtc *intel_crtc =
2904 to_intel_crtc(encoder->base.crtc);
2905 enum dpio_channel ch = vlv_dport_to_channel(dport);
2906 enum pipe pipe = intel_crtc->pipe;
2907 unsigned int lane_mask =
2908 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2909 u32 val;
2910
2911 intel_dp_prepare(encoder);
2912
2913 /*
2914 * Must trick the second common lane into life.
2915 * Otherwise we can't even access the PLL.
2916 */
2917 if (ch == DPIO_CH0 && pipe == PIPE_B)
2918 dport->release_cl2_override =
2919 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2920
2921 chv_phy_powergate_lanes(encoder, true, lane_mask);
2922
2923 mutex_lock(&dev_priv->sb_lock);
2924
2925 /* Assert data lane reset */
2926 chv_data_lane_soft_reset(encoder, true);
2927
2928 /* program left/right clock distribution */
2929 if (pipe != PIPE_B) {
2930 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2931 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2932 if (ch == DPIO_CH0)
2933 val |= CHV_BUFLEFTENA1_FORCE;
2934 if (ch == DPIO_CH1)
2935 val |= CHV_BUFRIGHTENA1_FORCE;
2936 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2937 } else {
2938 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2939 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2940 if (ch == DPIO_CH0)
2941 val |= CHV_BUFLEFTENA2_FORCE;
2942 if (ch == DPIO_CH1)
2943 val |= CHV_BUFRIGHTENA2_FORCE;
2944 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2945 }
2946
2947 /* program clock channel usage */
2948 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2949 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2950 if (pipe != PIPE_B)
2951 val &= ~CHV_PCS_USEDCLKCHANNEL;
2952 else
2953 val |= CHV_PCS_USEDCLKCHANNEL;
2954 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2955
2956 if (intel_crtc->config->lane_count > 2) {
2957 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2958 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2959 if (pipe != PIPE_B)
2960 val &= ~CHV_PCS_USEDCLKCHANNEL;
2961 else
2962 val |= CHV_PCS_USEDCLKCHANNEL;
2963 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2964 }
2965
2966 /*
2967 * This a a bit weird since generally CL
2968 * matches the pipe, but here we need to
2969 * pick the CL based on the port.
2970 */
2971 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2972 if (pipe != PIPE_B)
2973 val &= ~CHV_CMN_USEDCLKCHANNEL;
2974 else
2975 val |= CHV_CMN_USEDCLKCHANNEL;
2976 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2977
2978 mutex_unlock(&dev_priv->sb_lock);
2979 }
2980
2981 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2982 {
2983 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2984 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2985 u32 val;
2986
2987 mutex_lock(&dev_priv->sb_lock);
2988
2989 /* disable left/right clock distribution */
2990 if (pipe != PIPE_B) {
2991 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2992 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2993 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2994 } else {
2995 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2996 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2997 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2998 }
2999
3000 mutex_unlock(&dev_priv->sb_lock);
3001
3002 /*
3003 * Leave the power down bit cleared for at least one
3004 * lane so that chv_powergate_phy_ch() will power
3005 * on something when the channel is otherwise unused.
3006 * When the port is off and the override is removed
3007 * the lanes power down anyway, so otherwise it doesn't
3008 * really matter what the state of power down bits is
3009 * after this.
3010 */
3011 chv_phy_powergate_lanes(encoder, false, 0x0);
3012 }
3013
3014 /*
3015 * Native read with retry for link status and receiver capability reads for
3016 * cases where the sink may still be asleep.
3017 *
3018 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3019 * supposed to retry 3 times per the spec.
3020 */
3021 static ssize_t
3022 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3023 void *buffer, size_t size)
3024 {
3025 ssize_t ret;
3026 int i;
3027
3028 /*
3029 * Sometime we just get the same incorrect byte repeated
3030 * over the entire buffer. Doing just one throw away read
3031 * initially seems to "solve" it.
3032 */
3033 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3034
3035 for (i = 0; i < 3; i++) {
3036 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3037 if (ret == size)
3038 return ret;
3039 msleep(1);
3040 }
3041
3042 return ret;
3043 }
3044
3045 /*
3046 * Fetch AUX CH registers 0x202 - 0x207 which contain
3047 * link status information
3048 */
3049 static bool
3050 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3051 {
3052 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3053 DP_LANE0_1_STATUS,
3054 link_status,
3055 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3056 }
3057
3058 /* These are source-specific values. */
3059 static uint8_t
3060 intel_dp_voltage_max(struct intel_dp *intel_dp)
3061 {
3062 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3063 struct drm_i915_private *dev_priv = dev->dev_private;
3064 enum port port = dp_to_dig_port(intel_dp)->port;
3065
3066 if (IS_BROXTON(dev))
3067 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3068 else if (INTEL_INFO(dev)->gen >= 9) {
3069 if (dev_priv->edp_low_vswing && port == PORT_A)
3070 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3071 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3072 } else if (IS_VALLEYVIEW(dev))
3073 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3074 else if (IS_GEN7(dev) && port == PORT_A)
3075 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3076 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3077 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3078 else
3079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3080 }
3081
3082 static uint8_t
3083 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3084 {
3085 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3086 enum port port = dp_to_dig_port(intel_dp)->port;
3087
3088 if (INTEL_INFO(dev)->gen >= 9) {
3089 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3091 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3093 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3094 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3095 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3097 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3098 default:
3099 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3100 }
3101 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3102 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3104 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3106 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3108 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3110 default:
3111 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3112 }
3113 } else if (IS_VALLEYVIEW(dev)) {
3114 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3116 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3117 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3118 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3122 default:
3123 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3124 }
3125 } else if (IS_GEN7(dev) && port == PORT_A) {
3126 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3132 default:
3133 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3134 }
3135 } else {
3136 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3144 default:
3145 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3146 }
3147 }
3148 }
3149
3150 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3151 {
3152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3153 struct drm_i915_private *dev_priv = dev->dev_private;
3154 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3155 struct intel_crtc *intel_crtc =
3156 to_intel_crtc(dport->base.base.crtc);
3157 unsigned long demph_reg_value, preemph_reg_value,
3158 uniqtranscale_reg_value;
3159 uint8_t train_set = intel_dp->train_set[0];
3160 enum dpio_channel port = vlv_dport_to_channel(dport);
3161 int pipe = intel_crtc->pipe;
3162
3163 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3164 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3165 preemph_reg_value = 0x0004000;
3166 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3168 demph_reg_value = 0x2B405555;
3169 uniqtranscale_reg_value = 0x552AB83A;
3170 break;
3171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3172 demph_reg_value = 0x2B404040;
3173 uniqtranscale_reg_value = 0x5548B83A;
3174 break;
3175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3176 demph_reg_value = 0x2B245555;
3177 uniqtranscale_reg_value = 0x5560B83A;
3178 break;
3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3180 demph_reg_value = 0x2B405555;
3181 uniqtranscale_reg_value = 0x5598DA3A;
3182 break;
3183 default:
3184 return 0;
3185 }
3186 break;
3187 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3188 preemph_reg_value = 0x0002000;
3189 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3191 demph_reg_value = 0x2B404040;
3192 uniqtranscale_reg_value = 0x5552B83A;
3193 break;
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3195 demph_reg_value = 0x2B404848;
3196 uniqtranscale_reg_value = 0x5580B83A;
3197 break;
3198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3199 demph_reg_value = 0x2B404040;
3200 uniqtranscale_reg_value = 0x55ADDA3A;
3201 break;
3202 default:
3203 return 0;
3204 }
3205 break;
3206 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3207 preemph_reg_value = 0x0000000;
3208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3210 demph_reg_value = 0x2B305555;
3211 uniqtranscale_reg_value = 0x5570B83A;
3212 break;
3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3214 demph_reg_value = 0x2B2B4040;
3215 uniqtranscale_reg_value = 0x55ADDA3A;
3216 break;
3217 default:
3218 return 0;
3219 }
3220 break;
3221 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3222 preemph_reg_value = 0x0006000;
3223 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3225 demph_reg_value = 0x1B405555;
3226 uniqtranscale_reg_value = 0x55ADDA3A;
3227 break;
3228 default:
3229 return 0;
3230 }
3231 break;
3232 default:
3233 return 0;
3234 }
3235
3236 mutex_lock(&dev_priv->sb_lock);
3237 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3238 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3239 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3240 uniqtranscale_reg_value);
3241 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3242 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3243 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3244 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3245 mutex_unlock(&dev_priv->sb_lock);
3246
3247 return 0;
3248 }
3249
3250 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3251 {
3252 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3253 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3254 }
3255
3256 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3257 {
3258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3261 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3262 u32 deemph_reg_value, margin_reg_value, val;
3263 uint8_t train_set = intel_dp->train_set[0];
3264 enum dpio_channel ch = vlv_dport_to_channel(dport);
3265 enum pipe pipe = intel_crtc->pipe;
3266 int i;
3267
3268 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3269 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3270 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3272 deemph_reg_value = 128;
3273 margin_reg_value = 52;
3274 break;
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3276 deemph_reg_value = 128;
3277 margin_reg_value = 77;
3278 break;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280 deemph_reg_value = 128;
3281 margin_reg_value = 102;
3282 break;
3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3284 deemph_reg_value = 128;
3285 margin_reg_value = 154;
3286 /* FIXME extra to set for 1200 */
3287 break;
3288 default:
3289 return 0;
3290 }
3291 break;
3292 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3293 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3295 deemph_reg_value = 85;
3296 margin_reg_value = 78;
3297 break;
3298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3299 deemph_reg_value = 85;
3300 margin_reg_value = 116;
3301 break;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303 deemph_reg_value = 85;
3304 margin_reg_value = 154;
3305 break;
3306 default:
3307 return 0;
3308 }
3309 break;
3310 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3311 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3313 deemph_reg_value = 64;
3314 margin_reg_value = 104;
3315 break;
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3317 deemph_reg_value = 64;
3318 margin_reg_value = 154;
3319 break;
3320 default:
3321 return 0;
3322 }
3323 break;
3324 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3325 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3327 deemph_reg_value = 43;
3328 margin_reg_value = 154;
3329 break;
3330 default:
3331 return 0;
3332 }
3333 break;
3334 default:
3335 return 0;
3336 }
3337
3338 mutex_lock(&dev_priv->sb_lock);
3339
3340 /* Clear calc init */
3341 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3342 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3343 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3344 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3345 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3346
3347 if (intel_crtc->config->lane_count > 2) {
3348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3349 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3350 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3351 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3352 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3353 }
3354
3355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3356 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3357 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3358 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3359
3360 if (intel_crtc->config->lane_count > 2) {
3361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3362 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3363 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3365 }
3366
3367 /* Program swing deemph */
3368 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3369 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3370 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3371 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3372 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3373 }
3374
3375 /* Program swing margin */
3376 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3377 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3378
3379 val &= ~DPIO_SWING_MARGIN000_MASK;
3380 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3381
3382 /*
3383 * Supposedly this value shouldn't matter when unique transition
3384 * scale is disabled, but in fact it does matter. Let's just
3385 * always program the same value and hope it's OK.
3386 */
3387 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3388 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3389
3390 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3391 }
3392
3393 /*
3394 * The document said it needs to set bit 27 for ch0 and bit 26
3395 * for ch1. Might be a typo in the doc.
3396 * For now, for this unique transition scale selection, set bit
3397 * 27 for ch0 and ch1.
3398 */
3399 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3400 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3401 if (chv_need_uniq_trans_scale(train_set))
3402 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3403 else
3404 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3405 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3406 }
3407
3408 /* Start swing calculation */
3409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3410 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3411 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3412
3413 if (intel_crtc->config->lane_count > 2) {
3414 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3415 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3416 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3417 }
3418
3419 mutex_unlock(&dev_priv->sb_lock);
3420
3421 return 0;
3422 }
3423
3424 static void
3425 intel_get_adjust_train(struct intel_dp *intel_dp,
3426 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3427 {
3428 uint8_t v = 0;
3429 uint8_t p = 0;
3430 int lane;
3431 uint8_t voltage_max;
3432 uint8_t preemph_max;
3433
3434 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3435 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3436 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3437
3438 if (this_v > v)
3439 v = this_v;
3440 if (this_p > p)
3441 p = this_p;
3442 }
3443
3444 voltage_max = intel_dp_voltage_max(intel_dp);
3445 if (v >= voltage_max)
3446 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3447
3448 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3449 if (p >= preemph_max)
3450 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3451
3452 for (lane = 0; lane < 4; lane++)
3453 intel_dp->train_set[lane] = v | p;
3454 }
3455
3456 static uint32_t
3457 gen4_signal_levels(uint8_t train_set)
3458 {
3459 uint32_t signal_levels = 0;
3460
3461 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3463 default:
3464 signal_levels |= DP_VOLTAGE_0_4;
3465 break;
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3467 signal_levels |= DP_VOLTAGE_0_6;
3468 break;
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3470 signal_levels |= DP_VOLTAGE_0_8;
3471 break;
3472 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3473 signal_levels |= DP_VOLTAGE_1_2;
3474 break;
3475 }
3476 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3477 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3478 default:
3479 signal_levels |= DP_PRE_EMPHASIS_0;
3480 break;
3481 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3482 signal_levels |= DP_PRE_EMPHASIS_3_5;
3483 break;
3484 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3485 signal_levels |= DP_PRE_EMPHASIS_6;
3486 break;
3487 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3488 signal_levels |= DP_PRE_EMPHASIS_9_5;
3489 break;
3490 }
3491 return signal_levels;
3492 }
3493
3494 /* Gen6's DP voltage swing and pre-emphasis control */
3495 static uint32_t
3496 gen6_edp_signal_levels(uint8_t train_set)
3497 {
3498 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3499 DP_TRAIN_PRE_EMPHASIS_MASK);
3500 switch (signal_levels) {
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3502 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3503 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3504 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3505 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3507 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3508 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3509 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3510 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3511 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3512 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3513 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3514 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3515 default:
3516 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3517 "0x%x\n", signal_levels);
3518 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3519 }
3520 }
3521
3522 /* Gen7's DP voltage swing and pre-emphasis control */
3523 static uint32_t
3524 gen7_edp_signal_levels(uint8_t train_set)
3525 {
3526 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3527 DP_TRAIN_PRE_EMPHASIS_MASK);
3528 switch (signal_levels) {
3529 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3530 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3531 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3532 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3534 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3535
3536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3537 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3539 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3540
3541 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3542 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3543 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3544 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3545
3546 default:
3547 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3548 "0x%x\n", signal_levels);
3549 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3550 }
3551 }
3552
3553 /* Properly updates "DP" with the correct signal levels. */
3554 static void
3555 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3556 {
3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3558 enum port port = intel_dig_port->port;
3559 struct drm_device *dev = intel_dig_port->base.base.dev;
3560 uint32_t signal_levels, mask = 0;
3561 uint8_t train_set = intel_dp->train_set[0];
3562
3563 if (HAS_DDI(dev)) {
3564 signal_levels = ddi_signal_levels(intel_dp);
3565
3566 if (IS_BROXTON(dev))
3567 signal_levels = 0;
3568 else
3569 mask = DDI_BUF_EMP_MASK;
3570 } else if (IS_CHERRYVIEW(dev)) {
3571 signal_levels = chv_signal_levels(intel_dp);
3572 } else if (IS_VALLEYVIEW(dev)) {
3573 signal_levels = vlv_signal_levels(intel_dp);
3574 } else if (IS_GEN7(dev) && port == PORT_A) {
3575 signal_levels = gen7_edp_signal_levels(train_set);
3576 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3577 } else if (IS_GEN6(dev) && port == PORT_A) {
3578 signal_levels = gen6_edp_signal_levels(train_set);
3579 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3580 } else {
3581 signal_levels = gen4_signal_levels(train_set);
3582 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3583 }
3584
3585 if (mask)
3586 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3587
3588 DRM_DEBUG_KMS("Using vswing level %d\n",
3589 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3590 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3591 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3592 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3593
3594 *DP = (*DP & ~mask) | signal_levels;
3595 }
3596
3597 static bool
3598 intel_dp_set_link_train(struct intel_dp *intel_dp,
3599 uint32_t *DP,
3600 uint8_t dp_train_pat)
3601 {
3602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3603 struct drm_i915_private *dev_priv =
3604 to_i915(intel_dig_port->base.base.dev);
3605 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3606 int ret, len;
3607
3608 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3609
3610 I915_WRITE(intel_dp->output_reg, *DP);
3611 POSTING_READ(intel_dp->output_reg);
3612
3613 buf[0] = dp_train_pat;
3614 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3615 DP_TRAINING_PATTERN_DISABLE) {
3616 /* don't write DP_TRAINING_LANEx_SET on disable */
3617 len = 1;
3618 } else {
3619 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3620 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3621 len = intel_dp->lane_count + 1;
3622 }
3623
3624 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3625 buf, len);
3626
3627 return ret == len;
3628 }
3629
3630 static bool
3631 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3632 uint8_t dp_train_pat)
3633 {
3634 if (!intel_dp->train_set_valid)
3635 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3636 intel_dp_set_signal_levels(intel_dp, DP);
3637 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3638 }
3639
3640 static bool
3641 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3642 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3643 {
3644 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3645 struct drm_i915_private *dev_priv =
3646 to_i915(intel_dig_port->base.base.dev);
3647 int ret;
3648
3649 intel_get_adjust_train(intel_dp, link_status);
3650 intel_dp_set_signal_levels(intel_dp, DP);
3651
3652 I915_WRITE(intel_dp->output_reg, *DP);
3653 POSTING_READ(intel_dp->output_reg);
3654
3655 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3656 intel_dp->train_set, intel_dp->lane_count);
3657
3658 return ret == intel_dp->lane_count;
3659 }
3660
3661 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3662 {
3663 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3664 struct drm_device *dev = intel_dig_port->base.base.dev;
3665 struct drm_i915_private *dev_priv = dev->dev_private;
3666 enum port port = intel_dig_port->port;
3667 uint32_t val;
3668
3669 if (!HAS_DDI(dev))
3670 return;
3671
3672 val = I915_READ(DP_TP_CTL(port));
3673 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3674 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3675 I915_WRITE(DP_TP_CTL(port), val);
3676
3677 /*
3678 * On PORT_A we can have only eDP in SST mode. There the only reason
3679 * we need to set idle transmission mode is to work around a HW issue
3680 * where we enable the pipe while not in idle link-training mode.
3681 * In this case there is requirement to wait for a minimum number of
3682 * idle patterns to be sent.
3683 */
3684 if (port == PORT_A)
3685 return;
3686
3687 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3688 1))
3689 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3690 }
3691
3692 /* Enable corresponding port and start training pattern 1 */
3693 static void
3694 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3695 {
3696 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3697 struct drm_device *dev = encoder->dev;
3698 int i;
3699 uint8_t voltage;
3700 int voltage_tries, loop_tries;
3701 uint32_t DP = intel_dp->DP;
3702 uint8_t link_config[2];
3703 uint8_t link_bw, rate_select;
3704
3705 if (HAS_DDI(dev))
3706 intel_ddi_prepare_link_retrain(encoder);
3707
3708 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3709 &link_bw, &rate_select);
3710
3711 /* Write the link configuration data */
3712 link_config[0] = link_bw;
3713 link_config[1] = intel_dp->lane_count;
3714 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3715 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3716 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3717 if (intel_dp->num_sink_rates)
3718 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3719 &rate_select, 1);
3720
3721 link_config[0] = 0;
3722 link_config[1] = DP_SET_ANSI_8B10B;
3723 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3724
3725 DP |= DP_PORT_EN;
3726
3727 /* clock recovery */
3728 if (!intel_dp_reset_link_train(intel_dp, &DP,
3729 DP_TRAINING_PATTERN_1 |
3730 DP_LINK_SCRAMBLING_DISABLE)) {
3731 DRM_ERROR("failed to enable link training\n");
3732 return;
3733 }
3734
3735 voltage = 0xff;
3736 voltage_tries = 0;
3737 loop_tries = 0;
3738 for (;;) {
3739 uint8_t link_status[DP_LINK_STATUS_SIZE];
3740
3741 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3742 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3743 DRM_ERROR("failed to get link status\n");
3744 break;
3745 }
3746
3747 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3748 DRM_DEBUG_KMS("clock recovery OK\n");
3749 break;
3750 }
3751
3752 /*
3753 * if we used previously trained voltage and pre-emphasis values
3754 * and we don't get clock recovery, reset link training values
3755 */
3756 if (intel_dp->train_set_valid) {
3757 DRM_DEBUG_KMS("clock recovery not ok, reset");
3758 /* clear the flag as we are not reusing train set */
3759 intel_dp->train_set_valid = false;
3760 if (!intel_dp_reset_link_train(intel_dp, &DP,
3761 DP_TRAINING_PATTERN_1 |
3762 DP_LINK_SCRAMBLING_DISABLE)) {
3763 DRM_ERROR("failed to enable link training\n");
3764 return;
3765 }
3766 continue;
3767 }
3768
3769 /* Check to see if we've tried the max voltage */
3770 for (i = 0; i < intel_dp->lane_count; i++)
3771 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3772 break;
3773 if (i == intel_dp->lane_count) {
3774 ++loop_tries;
3775 if (loop_tries == 5) {
3776 DRM_ERROR("too many full retries, give up\n");
3777 break;
3778 }
3779 intel_dp_reset_link_train(intel_dp, &DP,
3780 DP_TRAINING_PATTERN_1 |
3781 DP_LINK_SCRAMBLING_DISABLE);
3782 voltage_tries = 0;
3783 continue;
3784 }
3785
3786 /* Check to see if we've tried the same voltage 5 times */
3787 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3788 ++voltage_tries;
3789 if (voltage_tries == 5) {
3790 DRM_ERROR("too many voltage retries, give up\n");
3791 break;
3792 }
3793 } else
3794 voltage_tries = 0;
3795 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3796
3797 /* Update training set as requested by target */
3798 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3799 DRM_ERROR("failed to update link training\n");
3800 break;
3801 }
3802 }
3803
3804 intel_dp->DP = DP;
3805 }
3806
3807 static void
3808 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3809 {
3810 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3811 struct drm_device *dev = dig_port->base.base.dev;
3812 bool channel_eq = false;
3813 int tries, cr_tries;
3814 uint32_t DP = intel_dp->DP;
3815 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3816
3817 /*
3818 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3819 *
3820 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3821 * also mandatory for downstream devices that support HBR2.
3822 *
3823 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3824 * supported but still not enabled.
3825 */
3826 if (intel_dp_source_supports_hbr2(dev) &&
3827 drm_dp_tps3_supported(intel_dp->dpcd))
3828 training_pattern = DP_TRAINING_PATTERN_3;
3829 else if (intel_dp->link_rate == 540000)
3830 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3831
3832 /* channel equalization */
3833 if (!intel_dp_set_link_train(intel_dp, &DP,
3834 training_pattern |
3835 DP_LINK_SCRAMBLING_DISABLE)) {
3836 DRM_ERROR("failed to start channel equalization\n");
3837 return;
3838 }
3839
3840 tries = 0;
3841 cr_tries = 0;
3842 channel_eq = false;
3843 for (;;) {
3844 uint8_t link_status[DP_LINK_STATUS_SIZE];
3845
3846 if (cr_tries > 5) {
3847 DRM_ERROR("failed to train DP, aborting\n");
3848 break;
3849 }
3850
3851 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3852 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3853 DRM_ERROR("failed to get link status\n");
3854 break;
3855 }
3856
3857 /* Make sure clock is still ok */
3858 if (!drm_dp_clock_recovery_ok(link_status,
3859 intel_dp->lane_count)) {
3860 intel_dp->train_set_valid = false;
3861 intel_dp_link_training_clock_recovery(intel_dp);
3862 intel_dp_set_link_train(intel_dp, &DP,
3863 training_pattern |
3864 DP_LINK_SCRAMBLING_DISABLE);
3865 cr_tries++;
3866 continue;
3867 }
3868
3869 if (drm_dp_channel_eq_ok(link_status,
3870 intel_dp->lane_count)) {
3871 channel_eq = true;
3872 break;
3873 }
3874
3875 /* Try 5 times, then try clock recovery if that fails */
3876 if (tries > 5) {
3877 intel_dp->train_set_valid = false;
3878 intel_dp_link_training_clock_recovery(intel_dp);
3879 intel_dp_set_link_train(intel_dp, &DP,
3880 training_pattern |
3881 DP_LINK_SCRAMBLING_DISABLE);
3882 tries = 0;
3883 cr_tries++;
3884 continue;
3885 }
3886
3887 /* Update training set as requested by target */
3888 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3889 DRM_ERROR("failed to update link training\n");
3890 break;
3891 }
3892 ++tries;
3893 }
3894
3895 intel_dp_set_idle_link_train(intel_dp);
3896
3897 intel_dp->DP = DP;
3898
3899 if (channel_eq) {
3900 intel_dp->train_set_valid = true;
3901 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3902 }
3903 }
3904
3905 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3906 {
3907 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3908 DP_TRAINING_PATTERN_DISABLE);
3909 }
3910
3911 void
3912 intel_dp_start_link_train(struct intel_dp *intel_dp)
3913 {
3914 intel_dp_link_training_clock_recovery(intel_dp);
3915 intel_dp_link_training_channel_equalization(intel_dp);
3916 }
3917
3918 static void
3919 intel_dp_link_down(struct intel_dp *intel_dp)
3920 {
3921 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3922 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3923 enum port port = intel_dig_port->port;
3924 struct drm_device *dev = intel_dig_port->base.base.dev;
3925 struct drm_i915_private *dev_priv = dev->dev_private;
3926 uint32_t DP = intel_dp->DP;
3927
3928 if (WARN_ON(HAS_DDI(dev)))
3929 return;
3930
3931 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3932 return;
3933
3934 DRM_DEBUG_KMS("\n");
3935
3936 if ((IS_GEN7(dev) && port == PORT_A) ||
3937 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3938 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3939 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3940 } else {
3941 if (IS_CHERRYVIEW(dev))
3942 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3943 else
3944 DP &= ~DP_LINK_TRAIN_MASK;
3945 DP |= DP_LINK_TRAIN_PAT_IDLE;
3946 }
3947 I915_WRITE(intel_dp->output_reg, DP);
3948 POSTING_READ(intel_dp->output_reg);
3949
3950 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3951 I915_WRITE(intel_dp->output_reg, DP);
3952 POSTING_READ(intel_dp->output_reg);
3953
3954 /*
3955 * HW workaround for IBX, we need to move the port
3956 * to transcoder A after disabling it to allow the
3957 * matching HDMI port to be enabled on transcoder A.
3958 */
3959 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3960 /* always enable with pattern 1 (as per spec) */
3961 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3962 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3963 I915_WRITE(intel_dp->output_reg, DP);
3964 POSTING_READ(intel_dp->output_reg);
3965
3966 DP &= ~DP_PORT_EN;
3967 I915_WRITE(intel_dp->output_reg, DP);
3968 POSTING_READ(intel_dp->output_reg);
3969 }
3970
3971 msleep(intel_dp->panel_power_down_delay);
3972 }
3973
3974 static bool
3975 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3976 {
3977 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3978 struct drm_device *dev = dig_port->base.base.dev;
3979 struct drm_i915_private *dev_priv = dev->dev_private;
3980 uint8_t rev;
3981
3982 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3983 sizeof(intel_dp->dpcd)) < 0)
3984 return false; /* aux transfer failed */
3985
3986 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3987
3988 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3989 return false; /* DPCD not present */
3990
3991 /* Check if the panel supports PSR */
3992 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3993 if (is_edp(intel_dp)) {
3994 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3995 intel_dp->psr_dpcd,
3996 sizeof(intel_dp->psr_dpcd));
3997 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3998 dev_priv->psr.sink_support = true;
3999 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4000 }
4001
4002 if (INTEL_INFO(dev)->gen >= 9 &&
4003 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4004 uint8_t frame_sync_cap;
4005
4006 dev_priv->psr.sink_support = true;
4007 intel_dp_dpcd_read_wake(&intel_dp->aux,
4008 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4009 &frame_sync_cap, 1);
4010 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4011 /* PSR2 needs frame sync as well */
4012 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4013 DRM_DEBUG_KMS("PSR2 %s on sink",
4014 dev_priv->psr.psr2_support ? "supported" : "not supported");
4015 }
4016 }
4017
4018 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4019 yesno(intel_dp_source_supports_hbr2(dev)),
4020 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4021
4022 /* Intermediate frequency support */
4023 if (is_edp(intel_dp) &&
4024 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4025 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4026 (rev >= 0x03)) { /* eDp v1.4 or higher */
4027 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4028 int i;
4029
4030 intel_dp_dpcd_read_wake(&intel_dp->aux,
4031 DP_SUPPORTED_LINK_RATES,
4032 sink_rates,
4033 sizeof(sink_rates));
4034
4035 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4036 int val = le16_to_cpu(sink_rates[i]);
4037
4038 if (val == 0)
4039 break;
4040
4041 /* Value read is in kHz while drm clock is saved in deca-kHz */
4042 intel_dp->sink_rates[i] = (val * 200) / 10;
4043 }
4044 intel_dp->num_sink_rates = i;
4045 }
4046
4047 intel_dp_print_rates(intel_dp);
4048
4049 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4050 DP_DWN_STRM_PORT_PRESENT))
4051 return true; /* native DP sink */
4052
4053 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4054 return true; /* no per-port downstream info */
4055
4056 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4057 intel_dp->downstream_ports,
4058 DP_MAX_DOWNSTREAM_PORTS) < 0)
4059 return false; /* downstream port status fetch failed */
4060
4061 return true;
4062 }
4063
4064 static void
4065 intel_dp_probe_oui(struct intel_dp *intel_dp)
4066 {
4067 u8 buf[3];
4068
4069 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4070 return;
4071
4072 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4073 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4074 buf[0], buf[1], buf[2]);
4075
4076 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4077 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4078 buf[0], buf[1], buf[2]);
4079 }
4080
4081 static bool
4082 intel_dp_probe_mst(struct intel_dp *intel_dp)
4083 {
4084 u8 buf[1];
4085
4086 if (!intel_dp->can_mst)
4087 return false;
4088
4089 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4090 return false;
4091
4092 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4093 if (buf[0] & DP_MST_CAP) {
4094 DRM_DEBUG_KMS("Sink is MST capable\n");
4095 intel_dp->is_mst = true;
4096 } else {
4097 DRM_DEBUG_KMS("Sink is not MST capable\n");
4098 intel_dp->is_mst = false;
4099 }
4100 }
4101
4102 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4103 return intel_dp->is_mst;
4104 }
4105
4106 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4107 {
4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4109 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4110 u8 buf;
4111 int ret = 0;
4112
4113 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4114 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4115 ret = -EIO;
4116 goto out;
4117 }
4118
4119 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4120 buf & ~DP_TEST_SINK_START) < 0) {
4121 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4122 ret = -EIO;
4123 goto out;
4124 }
4125
4126 intel_dp->sink_crc.started = false;
4127 out:
4128 hsw_enable_ips(intel_crtc);
4129 return ret;
4130 }
4131
4132 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4133 {
4134 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4135 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4136 u8 buf;
4137 int ret;
4138
4139 if (intel_dp->sink_crc.started) {
4140 ret = intel_dp_sink_crc_stop(intel_dp);
4141 if (ret)
4142 return ret;
4143 }
4144
4145 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4146 return -EIO;
4147
4148 if (!(buf & DP_TEST_CRC_SUPPORTED))
4149 return -ENOTTY;
4150
4151 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4152
4153 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4154 return -EIO;
4155
4156 hsw_disable_ips(intel_crtc);
4157
4158 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4159 buf | DP_TEST_SINK_START) < 0) {
4160 hsw_enable_ips(intel_crtc);
4161 return -EIO;
4162 }
4163
4164 intel_dp->sink_crc.started = true;
4165 return 0;
4166 }
4167
4168 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4169 {
4170 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4171 struct drm_device *dev = dig_port->base.base.dev;
4172 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4173 u8 buf;
4174 int count, ret;
4175 int attempts = 6;
4176 bool old_equal_new;
4177
4178 ret = intel_dp_sink_crc_start(intel_dp);
4179 if (ret)
4180 return ret;
4181
4182 do {
4183 intel_wait_for_vblank(dev, intel_crtc->pipe);
4184
4185 if (drm_dp_dpcd_readb(&intel_dp->aux,
4186 DP_TEST_SINK_MISC, &buf) < 0) {
4187 ret = -EIO;
4188 goto stop;
4189 }
4190 count = buf & DP_TEST_COUNT_MASK;
4191
4192 /*
4193 * Count might be reset during the loop. In this case
4194 * last known count needs to be reset as well.
4195 */
4196 if (count == 0)
4197 intel_dp->sink_crc.last_count = 0;
4198
4199 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4200 ret = -EIO;
4201 goto stop;
4202 }
4203
4204 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4205 !memcmp(intel_dp->sink_crc.last_crc, crc,
4206 6 * sizeof(u8)));
4207
4208 } while (--attempts && (count == 0 || old_equal_new));
4209
4210 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4211 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4212
4213 if (attempts == 0) {
4214 if (old_equal_new) {
4215 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4216 } else {
4217 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4218 ret = -ETIMEDOUT;
4219 goto stop;
4220 }
4221 }
4222
4223 stop:
4224 intel_dp_sink_crc_stop(intel_dp);
4225 return ret;
4226 }
4227
4228 static bool
4229 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4230 {
4231 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4232 DP_DEVICE_SERVICE_IRQ_VECTOR,
4233 sink_irq_vector, 1) == 1;
4234 }
4235
4236 static bool
4237 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4238 {
4239 int ret;
4240
4241 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4242 DP_SINK_COUNT_ESI,
4243 sink_irq_vector, 14);
4244 if (ret != 14)
4245 return false;
4246
4247 return true;
4248 }
4249
4250 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4251 {
4252 uint8_t test_result = DP_TEST_ACK;
4253 return test_result;
4254 }
4255
4256 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4257 {
4258 uint8_t test_result = DP_TEST_NAK;
4259 return test_result;
4260 }
4261
4262 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4263 {
4264 uint8_t test_result = DP_TEST_NAK;
4265 struct intel_connector *intel_connector = intel_dp->attached_connector;
4266 struct drm_connector *connector = &intel_connector->base;
4267
4268 if (intel_connector->detect_edid == NULL ||
4269 connector->edid_corrupt ||
4270 intel_dp->aux.i2c_defer_count > 6) {
4271 /* Check EDID read for NACKs, DEFERs and corruption
4272 * (DP CTS 1.2 Core r1.1)
4273 * 4.2.2.4 : Failed EDID read, I2C_NAK
4274 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4275 * 4.2.2.6 : EDID corruption detected
4276 * Use failsafe mode for all cases
4277 */
4278 if (intel_dp->aux.i2c_nack_count > 0 ||
4279 intel_dp->aux.i2c_defer_count > 0)
4280 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4281 intel_dp->aux.i2c_nack_count,
4282 intel_dp->aux.i2c_defer_count);
4283 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4284 } else {
4285 struct edid *block = intel_connector->detect_edid;
4286
4287 /* We have to write the checksum
4288 * of the last block read
4289 */
4290 block += intel_connector->detect_edid->extensions;
4291
4292 if (!drm_dp_dpcd_write(&intel_dp->aux,
4293 DP_TEST_EDID_CHECKSUM,
4294 &block->checksum,
4295 1))
4296 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4297
4298 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4299 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4300 }
4301
4302 /* Set test active flag here so userspace doesn't interrupt things */
4303 intel_dp->compliance_test_active = 1;
4304
4305 return test_result;
4306 }
4307
4308 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4309 {
4310 uint8_t test_result = DP_TEST_NAK;
4311 return test_result;
4312 }
4313
4314 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4315 {
4316 uint8_t response = DP_TEST_NAK;
4317 uint8_t rxdata = 0;
4318 int status = 0;
4319
4320 intel_dp->compliance_test_active = 0;
4321 intel_dp->compliance_test_type = 0;
4322 intel_dp->compliance_test_data = 0;
4323
4324 intel_dp->aux.i2c_nack_count = 0;
4325 intel_dp->aux.i2c_defer_count = 0;
4326
4327 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4328 if (status <= 0) {
4329 DRM_DEBUG_KMS("Could not read test request from sink\n");
4330 goto update_status;
4331 }
4332
4333 switch (rxdata) {
4334 case DP_TEST_LINK_TRAINING:
4335 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4336 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4337 response = intel_dp_autotest_link_training(intel_dp);
4338 break;
4339 case DP_TEST_LINK_VIDEO_PATTERN:
4340 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4341 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4342 response = intel_dp_autotest_video_pattern(intel_dp);
4343 break;
4344 case DP_TEST_LINK_EDID_READ:
4345 DRM_DEBUG_KMS("EDID test requested\n");
4346 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4347 response = intel_dp_autotest_edid(intel_dp);
4348 break;
4349 case DP_TEST_LINK_PHY_TEST_PATTERN:
4350 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4351 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4352 response = intel_dp_autotest_phy_pattern(intel_dp);
4353 break;
4354 default:
4355 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4356 break;
4357 }
4358
4359 update_status:
4360 status = drm_dp_dpcd_write(&intel_dp->aux,
4361 DP_TEST_RESPONSE,
4362 &response, 1);
4363 if (status <= 0)
4364 DRM_DEBUG_KMS("Could not write test response to sink\n");
4365 }
4366
4367 static int
4368 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4369 {
4370 bool bret;
4371
4372 if (intel_dp->is_mst) {
4373 u8 esi[16] = { 0 };
4374 int ret = 0;
4375 int retry;
4376 bool handled;
4377 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4378 go_again:
4379 if (bret == true) {
4380
4381 /* check link status - esi[10] = 0x200c */
4382 if (intel_dp->active_mst_links &&
4383 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4384 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4385 intel_dp_start_link_train(intel_dp);
4386 intel_dp_stop_link_train(intel_dp);
4387 }
4388
4389 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4390 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4391
4392 if (handled) {
4393 for (retry = 0; retry < 3; retry++) {
4394 int wret;
4395 wret = drm_dp_dpcd_write(&intel_dp->aux,
4396 DP_SINK_COUNT_ESI+1,
4397 &esi[1], 3);
4398 if (wret == 3) {
4399 break;
4400 }
4401 }
4402
4403 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4404 if (bret == true) {
4405 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4406 goto go_again;
4407 }
4408 } else
4409 ret = 0;
4410
4411 return ret;
4412 } else {
4413 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4414 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4415 intel_dp->is_mst = false;
4416 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4417 /* send a hotplug event */
4418 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4419 }
4420 }
4421 return -EINVAL;
4422 }
4423
4424 /*
4425 * According to DP spec
4426 * 5.1.2:
4427 * 1. Read DPCD
4428 * 2. Configure link according to Receiver Capabilities
4429 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4430 * 4. Check link status on receipt of hot-plug interrupt
4431 */
4432 static void
4433 intel_dp_check_link_status(struct intel_dp *intel_dp)
4434 {
4435 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4436 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4437 u8 sink_irq_vector;
4438 u8 link_status[DP_LINK_STATUS_SIZE];
4439
4440 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4441
4442 if (!intel_encoder->base.crtc)
4443 return;
4444
4445 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4446 return;
4447
4448 /* Try to read receiver status if the link appears to be up */
4449 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4450 return;
4451 }
4452
4453 /* Now read the DPCD to see if it's actually running */
4454 if (!intel_dp_get_dpcd(intel_dp)) {
4455 return;
4456 }
4457
4458 /* Try to read the source of the interrupt */
4459 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4460 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4461 /* Clear interrupt source */
4462 drm_dp_dpcd_writeb(&intel_dp->aux,
4463 DP_DEVICE_SERVICE_IRQ_VECTOR,
4464 sink_irq_vector);
4465
4466 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4467 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4468 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4469 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4470 }
4471
4472 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4473 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4474 intel_encoder->base.name);
4475 intel_dp_start_link_train(intel_dp);
4476 intel_dp_stop_link_train(intel_dp);
4477 }
4478 }
4479
4480 /* XXX this is probably wrong for multiple downstream ports */
4481 static enum drm_connector_status
4482 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4483 {
4484 uint8_t *dpcd = intel_dp->dpcd;
4485 uint8_t type;
4486
4487 if (!intel_dp_get_dpcd(intel_dp))
4488 return connector_status_disconnected;
4489
4490 /* if there's no downstream port, we're done */
4491 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4492 return connector_status_connected;
4493
4494 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4495 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4496 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4497 uint8_t reg;
4498
4499 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4500 &reg, 1) < 0)
4501 return connector_status_unknown;
4502
4503 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4504 : connector_status_disconnected;
4505 }
4506
4507 /* If no HPD, poke DDC gently */
4508 if (drm_probe_ddc(&intel_dp->aux.ddc))
4509 return connector_status_connected;
4510
4511 /* Well we tried, say unknown for unreliable port types */
4512 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4513 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4514 if (type == DP_DS_PORT_TYPE_VGA ||
4515 type == DP_DS_PORT_TYPE_NON_EDID)
4516 return connector_status_unknown;
4517 } else {
4518 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4519 DP_DWN_STRM_PORT_TYPE_MASK;
4520 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4521 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4522 return connector_status_unknown;
4523 }
4524
4525 /* Anything else is out of spec, warn and ignore */
4526 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4527 return connector_status_disconnected;
4528 }
4529
4530 static enum drm_connector_status
4531 edp_detect(struct intel_dp *intel_dp)
4532 {
4533 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4534 enum drm_connector_status status;
4535
4536 status = intel_panel_detect(dev);
4537 if (status == connector_status_unknown)
4538 status = connector_status_connected;
4539
4540 return status;
4541 }
4542
4543 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4544 struct intel_digital_port *port)
4545 {
4546 u32 bit;
4547
4548 switch (port->port) {
4549 case PORT_A:
4550 return true;
4551 case PORT_B:
4552 bit = SDE_PORTB_HOTPLUG;
4553 break;
4554 case PORT_C:
4555 bit = SDE_PORTC_HOTPLUG;
4556 break;
4557 case PORT_D:
4558 bit = SDE_PORTD_HOTPLUG;
4559 break;
4560 default:
4561 MISSING_CASE(port->port);
4562 return false;
4563 }
4564
4565 return I915_READ(SDEISR) & bit;
4566 }
4567
4568 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4569 struct intel_digital_port *port)
4570 {
4571 u32 bit;
4572
4573 switch (port->port) {
4574 case PORT_A:
4575 return true;
4576 case PORT_B:
4577 bit = SDE_PORTB_HOTPLUG_CPT;
4578 break;
4579 case PORT_C:
4580 bit = SDE_PORTC_HOTPLUG_CPT;
4581 break;
4582 case PORT_D:
4583 bit = SDE_PORTD_HOTPLUG_CPT;
4584 break;
4585 case PORT_E:
4586 bit = SDE_PORTE_HOTPLUG_SPT;
4587 break;
4588 default:
4589 MISSING_CASE(port->port);
4590 return false;
4591 }
4592
4593 return I915_READ(SDEISR) & bit;
4594 }
4595
4596 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4597 struct intel_digital_port *port)
4598 {
4599 u32 bit;
4600
4601 switch (port->port) {
4602 case PORT_B:
4603 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4604 break;
4605 case PORT_C:
4606 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4607 break;
4608 case PORT_D:
4609 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4610 break;
4611 default:
4612 MISSING_CASE(port->port);
4613 return false;
4614 }
4615
4616 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4617 }
4618
4619 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4620 struct intel_digital_port *port)
4621 {
4622 u32 bit;
4623
4624 switch (port->port) {
4625 case PORT_B:
4626 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4627 break;
4628 case PORT_C:
4629 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4630 break;
4631 case PORT_D:
4632 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4633 break;
4634 default:
4635 MISSING_CASE(port->port);
4636 return false;
4637 }
4638
4639 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4640 }
4641
4642 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4643 struct intel_digital_port *intel_dig_port)
4644 {
4645 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4646 enum port port;
4647 u32 bit;
4648
4649 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4650 switch (port) {
4651 case PORT_A:
4652 bit = BXT_DE_PORT_HP_DDIA;
4653 break;
4654 case PORT_B:
4655 bit = BXT_DE_PORT_HP_DDIB;
4656 break;
4657 case PORT_C:
4658 bit = BXT_DE_PORT_HP_DDIC;
4659 break;
4660 default:
4661 MISSING_CASE(port);
4662 return false;
4663 }
4664
4665 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4666 }
4667
4668 /*
4669 * intel_digital_port_connected - is the specified port connected?
4670 * @dev_priv: i915 private structure
4671 * @port: the port to test
4672 *
4673 * Return %true if @port is connected, %false otherwise.
4674 */
4675 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4676 struct intel_digital_port *port)
4677 {
4678 if (HAS_PCH_IBX(dev_priv))
4679 return ibx_digital_port_connected(dev_priv, port);
4680 if (HAS_PCH_SPLIT(dev_priv))
4681 return cpt_digital_port_connected(dev_priv, port);
4682 else if (IS_BROXTON(dev_priv))
4683 return bxt_digital_port_connected(dev_priv, port);
4684 else if (IS_VALLEYVIEW(dev_priv))
4685 return vlv_digital_port_connected(dev_priv, port);
4686 else
4687 return g4x_digital_port_connected(dev_priv, port);
4688 }
4689
4690 static enum drm_connector_status
4691 ironlake_dp_detect(struct intel_dp *intel_dp)
4692 {
4693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4694 struct drm_i915_private *dev_priv = dev->dev_private;
4695 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4696
4697 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4698 return connector_status_disconnected;
4699
4700 return intel_dp_detect_dpcd(intel_dp);
4701 }
4702
4703 static enum drm_connector_status
4704 g4x_dp_detect(struct intel_dp *intel_dp)
4705 {
4706 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4708
4709 /* Can't disconnect eDP, but you can close the lid... */
4710 if (is_edp(intel_dp)) {
4711 enum drm_connector_status status;
4712
4713 status = intel_panel_detect(dev);
4714 if (status == connector_status_unknown)
4715 status = connector_status_connected;
4716 return status;
4717 }
4718
4719 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4720 return connector_status_disconnected;
4721
4722 return intel_dp_detect_dpcd(intel_dp);
4723 }
4724
4725 static struct edid *
4726 intel_dp_get_edid(struct intel_dp *intel_dp)
4727 {
4728 struct intel_connector *intel_connector = intel_dp->attached_connector;
4729
4730 /* use cached edid if we have one */
4731 if (intel_connector->edid) {
4732 /* invalid edid */
4733 if (IS_ERR(intel_connector->edid))
4734 return NULL;
4735
4736 return drm_edid_duplicate(intel_connector->edid);
4737 } else
4738 return drm_get_edid(&intel_connector->base,
4739 &intel_dp->aux.ddc);
4740 }
4741
4742 static void
4743 intel_dp_set_edid(struct intel_dp *intel_dp)
4744 {
4745 struct intel_connector *intel_connector = intel_dp->attached_connector;
4746 struct edid *edid;
4747
4748 edid = intel_dp_get_edid(intel_dp);
4749 intel_connector->detect_edid = edid;
4750
4751 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4752 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4753 else
4754 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4755 }
4756
4757 static void
4758 intel_dp_unset_edid(struct intel_dp *intel_dp)
4759 {
4760 struct intel_connector *intel_connector = intel_dp->attached_connector;
4761
4762 kfree(intel_connector->detect_edid);
4763 intel_connector->detect_edid = NULL;
4764
4765 intel_dp->has_audio = false;
4766 }
4767
4768 static enum intel_display_power_domain
4769 intel_dp_power_get(struct intel_dp *dp)
4770 {
4771 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4772 enum intel_display_power_domain power_domain;
4773
4774 power_domain = intel_display_port_power_domain(encoder);
4775 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4776
4777 return power_domain;
4778 }
4779
4780 static void
4781 intel_dp_power_put(struct intel_dp *dp,
4782 enum intel_display_power_domain power_domain)
4783 {
4784 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4785 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4786 }
4787
4788 static enum drm_connector_status
4789 intel_dp_detect(struct drm_connector *connector, bool force)
4790 {
4791 struct intel_dp *intel_dp = intel_attached_dp(connector);
4792 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4793 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4794 struct drm_device *dev = connector->dev;
4795 enum drm_connector_status status;
4796 enum intel_display_power_domain power_domain;
4797 bool ret;
4798 u8 sink_irq_vector;
4799
4800 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4801 connector->base.id, connector->name);
4802 intel_dp_unset_edid(intel_dp);
4803
4804 if (intel_dp->is_mst) {
4805 /* MST devices are disconnected from a monitor POV */
4806 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4807 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4808 return connector_status_disconnected;
4809 }
4810
4811 power_domain = intel_dp_power_get(intel_dp);
4812
4813 /* Can't disconnect eDP, but you can close the lid... */
4814 if (is_edp(intel_dp))
4815 status = edp_detect(intel_dp);
4816 else if (HAS_PCH_SPLIT(dev))
4817 status = ironlake_dp_detect(intel_dp);
4818 else
4819 status = g4x_dp_detect(intel_dp);
4820 if (status != connector_status_connected)
4821 goto out;
4822
4823 intel_dp_probe_oui(intel_dp);
4824
4825 ret = intel_dp_probe_mst(intel_dp);
4826 if (ret) {
4827 /* if we are in MST mode then this connector
4828 won't appear connected or have anything with EDID on it */
4829 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4830 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4831 status = connector_status_disconnected;
4832 goto out;
4833 }
4834
4835 intel_dp_set_edid(intel_dp);
4836
4837 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4838 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4839 status = connector_status_connected;
4840
4841 /* Try to read the source of the interrupt */
4842 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4843 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4844 /* Clear interrupt source */
4845 drm_dp_dpcd_writeb(&intel_dp->aux,
4846 DP_DEVICE_SERVICE_IRQ_VECTOR,
4847 sink_irq_vector);
4848
4849 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4850 intel_dp_handle_test_request(intel_dp);
4851 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4852 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4853 }
4854
4855 out:
4856 intel_dp_power_put(intel_dp, power_domain);
4857 return status;
4858 }
4859
4860 static void
4861 intel_dp_force(struct drm_connector *connector)
4862 {
4863 struct intel_dp *intel_dp = intel_attached_dp(connector);
4864 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4865 enum intel_display_power_domain power_domain;
4866
4867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4868 connector->base.id, connector->name);
4869 intel_dp_unset_edid(intel_dp);
4870
4871 if (connector->status != connector_status_connected)
4872 return;
4873
4874 power_domain = intel_dp_power_get(intel_dp);
4875
4876 intel_dp_set_edid(intel_dp);
4877
4878 intel_dp_power_put(intel_dp, power_domain);
4879
4880 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4881 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4882 }
4883
4884 static int intel_dp_get_modes(struct drm_connector *connector)
4885 {
4886 struct intel_connector *intel_connector = to_intel_connector(connector);
4887 struct edid *edid;
4888
4889 edid = intel_connector->detect_edid;
4890 if (edid) {
4891 int ret = intel_connector_update_modes(connector, edid);
4892 if (ret)
4893 return ret;
4894 }
4895
4896 /* if eDP has no EDID, fall back to fixed mode */
4897 if (is_edp(intel_attached_dp(connector)) &&
4898 intel_connector->panel.fixed_mode) {
4899 struct drm_display_mode *mode;
4900
4901 mode = drm_mode_duplicate(connector->dev,
4902 intel_connector->panel.fixed_mode);
4903 if (mode) {
4904 drm_mode_probed_add(connector, mode);
4905 return 1;
4906 }
4907 }
4908
4909 return 0;
4910 }
4911
4912 static bool
4913 intel_dp_detect_audio(struct drm_connector *connector)
4914 {
4915 bool has_audio = false;
4916 struct edid *edid;
4917
4918 edid = to_intel_connector(connector)->detect_edid;
4919 if (edid)
4920 has_audio = drm_detect_monitor_audio(edid);
4921
4922 return has_audio;
4923 }
4924
4925 static int
4926 intel_dp_set_property(struct drm_connector *connector,
4927 struct drm_property *property,
4928 uint64_t val)
4929 {
4930 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4931 struct intel_connector *intel_connector = to_intel_connector(connector);
4932 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4933 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4934 int ret;
4935
4936 ret = drm_object_property_set_value(&connector->base, property, val);
4937 if (ret)
4938 return ret;
4939
4940 if (property == dev_priv->force_audio_property) {
4941 int i = val;
4942 bool has_audio;
4943
4944 if (i == intel_dp->force_audio)
4945 return 0;
4946
4947 intel_dp->force_audio = i;
4948
4949 if (i == HDMI_AUDIO_AUTO)
4950 has_audio = intel_dp_detect_audio(connector);
4951 else
4952 has_audio = (i == HDMI_AUDIO_ON);
4953
4954 if (has_audio == intel_dp->has_audio)
4955 return 0;
4956
4957 intel_dp->has_audio = has_audio;
4958 goto done;
4959 }
4960
4961 if (property == dev_priv->broadcast_rgb_property) {
4962 bool old_auto = intel_dp->color_range_auto;
4963 bool old_range = intel_dp->limited_color_range;
4964
4965 switch (val) {
4966 case INTEL_BROADCAST_RGB_AUTO:
4967 intel_dp->color_range_auto = true;
4968 break;
4969 case INTEL_BROADCAST_RGB_FULL:
4970 intel_dp->color_range_auto = false;
4971 intel_dp->limited_color_range = false;
4972 break;
4973 case INTEL_BROADCAST_RGB_LIMITED:
4974 intel_dp->color_range_auto = false;
4975 intel_dp->limited_color_range = true;
4976 break;
4977 default:
4978 return -EINVAL;
4979 }
4980
4981 if (old_auto == intel_dp->color_range_auto &&
4982 old_range == intel_dp->limited_color_range)
4983 return 0;
4984
4985 goto done;
4986 }
4987
4988 if (is_edp(intel_dp) &&
4989 property == connector->dev->mode_config.scaling_mode_property) {
4990 if (val == DRM_MODE_SCALE_NONE) {
4991 DRM_DEBUG_KMS("no scaling not supported\n");
4992 return -EINVAL;
4993 }
4994
4995 if (intel_connector->panel.fitting_mode == val) {
4996 /* the eDP scaling property is not changed */
4997 return 0;
4998 }
4999 intel_connector->panel.fitting_mode = val;
5000
5001 goto done;
5002 }
5003
5004 return -EINVAL;
5005
5006 done:
5007 if (intel_encoder->base.crtc)
5008 intel_crtc_restore_mode(intel_encoder->base.crtc);
5009
5010 return 0;
5011 }
5012
5013 static void
5014 intel_dp_connector_destroy(struct drm_connector *connector)
5015 {
5016 struct intel_connector *intel_connector = to_intel_connector(connector);
5017
5018 kfree(intel_connector->detect_edid);
5019
5020 if (!IS_ERR_OR_NULL(intel_connector->edid))
5021 kfree(intel_connector->edid);
5022
5023 /* Can't call is_edp() since the encoder may have been destroyed
5024 * already. */
5025 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5026 intel_panel_fini(&intel_connector->panel);
5027
5028 drm_connector_cleanup(connector);
5029 kfree(connector);
5030 }
5031
5032 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5033 {
5034 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5035 struct intel_dp *intel_dp = &intel_dig_port->dp;
5036
5037 drm_dp_aux_unregister(&intel_dp->aux);
5038 intel_dp_mst_encoder_cleanup(intel_dig_port);
5039 if (is_edp(intel_dp)) {
5040 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5041 /*
5042 * vdd might still be enabled do to the delayed vdd off.
5043 * Make sure vdd is actually turned off here.
5044 */
5045 pps_lock(intel_dp);
5046 edp_panel_vdd_off_sync(intel_dp);
5047 pps_unlock(intel_dp);
5048
5049 if (intel_dp->edp_notifier.notifier_call) {
5050 unregister_reboot_notifier(&intel_dp->edp_notifier);
5051 intel_dp->edp_notifier.notifier_call = NULL;
5052 }
5053 }
5054 drm_encoder_cleanup(encoder);
5055 kfree(intel_dig_port);
5056 }
5057
5058 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5059 {
5060 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5061
5062 if (!is_edp(intel_dp))
5063 return;
5064
5065 /*
5066 * vdd might still be enabled do to the delayed vdd off.
5067 * Make sure vdd is actually turned off here.
5068 */
5069 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5070 pps_lock(intel_dp);
5071 edp_panel_vdd_off_sync(intel_dp);
5072 pps_unlock(intel_dp);
5073 }
5074
5075 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5076 {
5077 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5078 struct drm_device *dev = intel_dig_port->base.base.dev;
5079 struct drm_i915_private *dev_priv = dev->dev_private;
5080 enum intel_display_power_domain power_domain;
5081
5082 lockdep_assert_held(&dev_priv->pps_mutex);
5083
5084 if (!edp_have_panel_vdd(intel_dp))
5085 return;
5086
5087 /*
5088 * The VDD bit needs a power domain reference, so if the bit is
5089 * already enabled when we boot or resume, grab this reference and
5090 * schedule a vdd off, so we don't hold on to the reference
5091 * indefinitely.
5092 */
5093 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5094 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5095 intel_display_power_get(dev_priv, power_domain);
5096
5097 edp_panel_vdd_schedule_off(intel_dp);
5098 }
5099
5100 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5101 {
5102 struct intel_dp *intel_dp;
5103
5104 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5105 return;
5106
5107 intel_dp = enc_to_intel_dp(encoder);
5108
5109 pps_lock(intel_dp);
5110
5111 /*
5112 * Read out the current power sequencer assignment,
5113 * in case the BIOS did something with it.
5114 */
5115 if (IS_VALLEYVIEW(encoder->dev))
5116 vlv_initial_power_sequencer_setup(intel_dp);
5117
5118 intel_edp_panel_vdd_sanitize(intel_dp);
5119
5120 pps_unlock(intel_dp);
5121 }
5122
5123 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5124 .dpms = drm_atomic_helper_connector_dpms,
5125 .detect = intel_dp_detect,
5126 .force = intel_dp_force,
5127 .fill_modes = drm_helper_probe_single_connector_modes,
5128 .set_property = intel_dp_set_property,
5129 .atomic_get_property = intel_connector_atomic_get_property,
5130 .destroy = intel_dp_connector_destroy,
5131 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5132 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5133 };
5134
5135 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5136 .get_modes = intel_dp_get_modes,
5137 .mode_valid = intel_dp_mode_valid,
5138 .best_encoder = intel_best_encoder,
5139 };
5140
5141 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5142 .reset = intel_dp_encoder_reset,
5143 .destroy = intel_dp_encoder_destroy,
5144 };
5145
5146 enum irqreturn
5147 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5148 {
5149 struct intel_dp *intel_dp = &intel_dig_port->dp;
5150 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5151 struct drm_device *dev = intel_dig_port->base.base.dev;
5152 struct drm_i915_private *dev_priv = dev->dev_private;
5153 enum intel_display_power_domain power_domain;
5154 enum irqreturn ret = IRQ_NONE;
5155
5156 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5157 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5158
5159 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5160 /*
5161 * vdd off can generate a long pulse on eDP which
5162 * would require vdd on to handle it, and thus we
5163 * would end up in an endless cycle of
5164 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5165 */
5166 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5167 port_name(intel_dig_port->port));
5168 return IRQ_HANDLED;
5169 }
5170
5171 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5172 port_name(intel_dig_port->port),
5173 long_hpd ? "long" : "short");
5174
5175 power_domain = intel_display_port_power_domain(intel_encoder);
5176 intel_display_power_get(dev_priv, power_domain);
5177
5178 if (long_hpd) {
5179 /* indicate that we need to restart link training */
5180 intel_dp->train_set_valid = false;
5181
5182 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5183 goto mst_fail;
5184
5185 if (!intel_dp_get_dpcd(intel_dp)) {
5186 goto mst_fail;
5187 }
5188
5189 intel_dp_probe_oui(intel_dp);
5190
5191 if (!intel_dp_probe_mst(intel_dp)) {
5192 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5193 intel_dp_check_link_status(intel_dp);
5194 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5195 goto mst_fail;
5196 }
5197 } else {
5198 if (intel_dp->is_mst) {
5199 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5200 goto mst_fail;
5201 }
5202
5203 if (!intel_dp->is_mst) {
5204 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5205 intel_dp_check_link_status(intel_dp);
5206 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5207 }
5208 }
5209
5210 ret = IRQ_HANDLED;
5211
5212 goto put_power;
5213 mst_fail:
5214 /* if we were in MST mode, and device is not there get out of MST mode */
5215 if (intel_dp->is_mst) {
5216 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5217 intel_dp->is_mst = false;
5218 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5219 }
5220 put_power:
5221 intel_display_power_put(dev_priv, power_domain);
5222
5223 return ret;
5224 }
5225
5226 /* Return which DP Port should be selected for Transcoder DP control */
5227 int
5228 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5229 {
5230 struct drm_device *dev = crtc->dev;
5231 struct intel_encoder *intel_encoder;
5232 struct intel_dp *intel_dp;
5233
5234 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5235 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5236
5237 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5238 intel_encoder->type == INTEL_OUTPUT_EDP)
5239 return intel_dp->output_reg;
5240 }
5241
5242 return -1;
5243 }
5244
5245 /* check the VBT to see whether the eDP is on another port */
5246 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5247 {
5248 struct drm_i915_private *dev_priv = dev->dev_private;
5249 union child_device_config *p_child;
5250 int i;
5251 static const short port_mapping[] = {
5252 [PORT_B] = DVO_PORT_DPB,
5253 [PORT_C] = DVO_PORT_DPC,
5254 [PORT_D] = DVO_PORT_DPD,
5255 [PORT_E] = DVO_PORT_DPE,
5256 };
5257
5258 /*
5259 * eDP not supported on g4x. so bail out early just
5260 * for a bit extra safety in case the VBT is bonkers.
5261 */
5262 if (INTEL_INFO(dev)->gen < 5)
5263 return false;
5264
5265 if (port == PORT_A)
5266 return true;
5267
5268 if (!dev_priv->vbt.child_dev_num)
5269 return false;
5270
5271 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5272 p_child = dev_priv->vbt.child_dev + i;
5273
5274 if (p_child->common.dvo_port == port_mapping[port] &&
5275 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5276 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5277 return true;
5278 }
5279 return false;
5280 }
5281
5282 void
5283 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5284 {
5285 struct intel_connector *intel_connector = to_intel_connector(connector);
5286
5287 intel_attach_force_audio_property(connector);
5288 intel_attach_broadcast_rgb_property(connector);
5289 intel_dp->color_range_auto = true;
5290
5291 if (is_edp(intel_dp)) {
5292 drm_mode_create_scaling_mode_property(connector->dev);
5293 drm_object_attach_property(
5294 &connector->base,
5295 connector->dev->mode_config.scaling_mode_property,
5296 DRM_MODE_SCALE_ASPECT);
5297 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5298 }
5299 }
5300
5301 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5302 {
5303 intel_dp->last_power_cycle = jiffies;
5304 intel_dp->last_power_on = jiffies;
5305 intel_dp->last_backlight_off = jiffies;
5306 }
5307
5308 static void
5309 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5310 struct intel_dp *intel_dp)
5311 {
5312 struct drm_i915_private *dev_priv = dev->dev_private;
5313 struct edp_power_seq cur, vbt, spec,
5314 *final = &intel_dp->pps_delays;
5315 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5316 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5317
5318 lockdep_assert_held(&dev_priv->pps_mutex);
5319
5320 /* already initialized? */
5321 if (final->t11_t12 != 0)
5322 return;
5323
5324 if (IS_BROXTON(dev)) {
5325 /*
5326 * TODO: BXT has 2 sets of PPS registers.
5327 * Correct Register for Broxton need to be identified
5328 * using VBT. hardcoding for now
5329 */
5330 pp_ctrl_reg = BXT_PP_CONTROL(0);
5331 pp_on_reg = BXT_PP_ON_DELAYS(0);
5332 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5333 } else if (HAS_PCH_SPLIT(dev)) {
5334 pp_ctrl_reg = PCH_PP_CONTROL;
5335 pp_on_reg = PCH_PP_ON_DELAYS;
5336 pp_off_reg = PCH_PP_OFF_DELAYS;
5337 pp_div_reg = PCH_PP_DIVISOR;
5338 } else {
5339 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5340
5341 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5342 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5343 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5344 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5345 }
5346
5347 /* Workaround: Need to write PP_CONTROL with the unlock key as
5348 * the very first thing. */
5349 pp_ctl = ironlake_get_pp_control(intel_dp);
5350
5351 pp_on = I915_READ(pp_on_reg);
5352 pp_off = I915_READ(pp_off_reg);
5353 if (!IS_BROXTON(dev)) {
5354 I915_WRITE(pp_ctrl_reg, pp_ctl);
5355 pp_div = I915_READ(pp_div_reg);
5356 }
5357
5358 /* Pull timing values out of registers */
5359 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5360 PANEL_POWER_UP_DELAY_SHIFT;
5361
5362 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5363 PANEL_LIGHT_ON_DELAY_SHIFT;
5364
5365 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5366 PANEL_LIGHT_OFF_DELAY_SHIFT;
5367
5368 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5369 PANEL_POWER_DOWN_DELAY_SHIFT;
5370
5371 if (IS_BROXTON(dev)) {
5372 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5373 BXT_POWER_CYCLE_DELAY_SHIFT;
5374 if (tmp > 0)
5375 cur.t11_t12 = (tmp - 1) * 1000;
5376 else
5377 cur.t11_t12 = 0;
5378 } else {
5379 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5380 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5381 }
5382
5383 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5384 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5385
5386 vbt = dev_priv->vbt.edp_pps;
5387
5388 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5389 * our hw here, which are all in 100usec. */
5390 spec.t1_t3 = 210 * 10;
5391 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5392 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5393 spec.t10 = 500 * 10;
5394 /* This one is special and actually in units of 100ms, but zero
5395 * based in the hw (so we need to add 100 ms). But the sw vbt
5396 * table multiplies it with 1000 to make it in units of 100usec,
5397 * too. */
5398 spec.t11_t12 = (510 + 100) * 10;
5399
5400 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5401 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5402
5403 /* Use the max of the register settings and vbt. If both are
5404 * unset, fall back to the spec limits. */
5405 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5406 spec.field : \
5407 max(cur.field, vbt.field))
5408 assign_final(t1_t3);
5409 assign_final(t8);
5410 assign_final(t9);
5411 assign_final(t10);
5412 assign_final(t11_t12);
5413 #undef assign_final
5414
5415 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5416 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5417 intel_dp->backlight_on_delay = get_delay(t8);
5418 intel_dp->backlight_off_delay = get_delay(t9);
5419 intel_dp->panel_power_down_delay = get_delay(t10);
5420 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5421 #undef get_delay
5422
5423 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5424 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5425 intel_dp->panel_power_cycle_delay);
5426
5427 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5428 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5429 }
5430
5431 static void
5432 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5433 struct intel_dp *intel_dp)
5434 {
5435 struct drm_i915_private *dev_priv = dev->dev_private;
5436 u32 pp_on, pp_off, pp_div, port_sel = 0;
5437 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5438 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5439 enum port port = dp_to_dig_port(intel_dp)->port;
5440 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5441
5442 lockdep_assert_held(&dev_priv->pps_mutex);
5443
5444 if (IS_BROXTON(dev)) {
5445 /*
5446 * TODO: BXT has 2 sets of PPS registers.
5447 * Correct Register for Broxton need to be identified
5448 * using VBT. hardcoding for now
5449 */
5450 pp_ctrl_reg = BXT_PP_CONTROL(0);
5451 pp_on_reg = BXT_PP_ON_DELAYS(0);
5452 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5453
5454 } else if (HAS_PCH_SPLIT(dev)) {
5455 pp_on_reg = PCH_PP_ON_DELAYS;
5456 pp_off_reg = PCH_PP_OFF_DELAYS;
5457 pp_div_reg = PCH_PP_DIVISOR;
5458 } else {
5459 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5460
5461 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5462 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5463 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5464 }
5465
5466 /*
5467 * And finally store the new values in the power sequencer. The
5468 * backlight delays are set to 1 because we do manual waits on them. For
5469 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5470 * we'll end up waiting for the backlight off delay twice: once when we
5471 * do the manual sleep, and once when we disable the panel and wait for
5472 * the PP_STATUS bit to become zero.
5473 */
5474 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5475 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5476 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5477 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5478 /* Compute the divisor for the pp clock, simply match the Bspec
5479 * formula. */
5480 if (IS_BROXTON(dev)) {
5481 pp_div = I915_READ(pp_ctrl_reg);
5482 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5483 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5484 << BXT_POWER_CYCLE_DELAY_SHIFT);
5485 } else {
5486 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5487 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5488 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5489 }
5490
5491 /* Haswell doesn't have any port selection bits for the panel
5492 * power sequencer any more. */
5493 if (IS_VALLEYVIEW(dev)) {
5494 port_sel = PANEL_PORT_SELECT_VLV(port);
5495 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5496 if (port == PORT_A)
5497 port_sel = PANEL_PORT_SELECT_DPA;
5498 else
5499 port_sel = PANEL_PORT_SELECT_DPD;
5500 }
5501
5502 pp_on |= port_sel;
5503
5504 I915_WRITE(pp_on_reg, pp_on);
5505 I915_WRITE(pp_off_reg, pp_off);
5506 if (IS_BROXTON(dev))
5507 I915_WRITE(pp_ctrl_reg, pp_div);
5508 else
5509 I915_WRITE(pp_div_reg, pp_div);
5510
5511 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5512 I915_READ(pp_on_reg),
5513 I915_READ(pp_off_reg),
5514 IS_BROXTON(dev) ?
5515 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5516 I915_READ(pp_div_reg));
5517 }
5518
5519 /**
5520 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5521 * @dev: DRM device
5522 * @refresh_rate: RR to be programmed
5523 *
5524 * This function gets called when refresh rate (RR) has to be changed from
5525 * one frequency to another. Switches can be between high and low RR
5526 * supported by the panel or to any other RR based on media playback (in
5527 * this case, RR value needs to be passed from user space).
5528 *
5529 * The caller of this function needs to take a lock on dev_priv->drrs.
5530 */
5531 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5532 {
5533 struct drm_i915_private *dev_priv = dev->dev_private;
5534 struct intel_encoder *encoder;
5535 struct intel_digital_port *dig_port = NULL;
5536 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5537 struct intel_crtc_state *config = NULL;
5538 struct intel_crtc *intel_crtc = NULL;
5539 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5540
5541 if (refresh_rate <= 0) {
5542 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5543 return;
5544 }
5545
5546 if (intel_dp == NULL) {
5547 DRM_DEBUG_KMS("DRRS not supported.\n");
5548 return;
5549 }
5550
5551 /*
5552 * FIXME: This needs proper synchronization with psr state for some
5553 * platforms that cannot have PSR and DRRS enabled at the same time.
5554 */
5555
5556 dig_port = dp_to_dig_port(intel_dp);
5557 encoder = &dig_port->base;
5558 intel_crtc = to_intel_crtc(encoder->base.crtc);
5559
5560 if (!intel_crtc) {
5561 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5562 return;
5563 }
5564
5565 config = intel_crtc->config;
5566
5567 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5568 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5569 return;
5570 }
5571
5572 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5573 refresh_rate)
5574 index = DRRS_LOW_RR;
5575
5576 if (index == dev_priv->drrs.refresh_rate_type) {
5577 DRM_DEBUG_KMS(
5578 "DRRS requested for previously set RR...ignoring\n");
5579 return;
5580 }
5581
5582 if (!intel_crtc->active) {
5583 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5584 return;
5585 }
5586
5587 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5588 switch (index) {
5589 case DRRS_HIGH_RR:
5590 intel_dp_set_m_n(intel_crtc, M1_N1);
5591 break;
5592 case DRRS_LOW_RR:
5593 intel_dp_set_m_n(intel_crtc, M2_N2);
5594 break;
5595 case DRRS_MAX_RR:
5596 default:
5597 DRM_ERROR("Unsupported refreshrate type\n");
5598 }
5599 } else if (INTEL_INFO(dev)->gen > 6) {
5600 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5601 u32 val;
5602
5603 val = I915_READ(reg);
5604 if (index > DRRS_HIGH_RR) {
5605 if (IS_VALLEYVIEW(dev))
5606 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5607 else
5608 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5609 } else {
5610 if (IS_VALLEYVIEW(dev))
5611 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5612 else
5613 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5614 }
5615 I915_WRITE(reg, val);
5616 }
5617
5618 dev_priv->drrs.refresh_rate_type = index;
5619
5620 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5621 }
5622
5623 /**
5624 * intel_edp_drrs_enable - init drrs struct if supported
5625 * @intel_dp: DP struct
5626 *
5627 * Initializes frontbuffer_bits and drrs.dp
5628 */
5629 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5630 {
5631 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5632 struct drm_i915_private *dev_priv = dev->dev_private;
5633 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5634 struct drm_crtc *crtc = dig_port->base.base.crtc;
5635 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5636
5637 if (!intel_crtc->config->has_drrs) {
5638 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5639 return;
5640 }
5641
5642 mutex_lock(&dev_priv->drrs.mutex);
5643 if (WARN_ON(dev_priv->drrs.dp)) {
5644 DRM_ERROR("DRRS already enabled\n");
5645 goto unlock;
5646 }
5647
5648 dev_priv->drrs.busy_frontbuffer_bits = 0;
5649
5650 dev_priv->drrs.dp = intel_dp;
5651
5652 unlock:
5653 mutex_unlock(&dev_priv->drrs.mutex);
5654 }
5655
5656 /**
5657 * intel_edp_drrs_disable - Disable DRRS
5658 * @intel_dp: DP struct
5659 *
5660 */
5661 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5662 {
5663 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5664 struct drm_i915_private *dev_priv = dev->dev_private;
5665 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5666 struct drm_crtc *crtc = dig_port->base.base.crtc;
5667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5668
5669 if (!intel_crtc->config->has_drrs)
5670 return;
5671
5672 mutex_lock(&dev_priv->drrs.mutex);
5673 if (!dev_priv->drrs.dp) {
5674 mutex_unlock(&dev_priv->drrs.mutex);
5675 return;
5676 }
5677
5678 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5679 intel_dp_set_drrs_state(dev_priv->dev,
5680 intel_dp->attached_connector->panel.
5681 fixed_mode->vrefresh);
5682
5683 dev_priv->drrs.dp = NULL;
5684 mutex_unlock(&dev_priv->drrs.mutex);
5685
5686 cancel_delayed_work_sync(&dev_priv->drrs.work);
5687 }
5688
5689 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5690 {
5691 struct drm_i915_private *dev_priv =
5692 container_of(work, typeof(*dev_priv), drrs.work.work);
5693 struct intel_dp *intel_dp;
5694
5695 mutex_lock(&dev_priv->drrs.mutex);
5696
5697 intel_dp = dev_priv->drrs.dp;
5698
5699 if (!intel_dp)
5700 goto unlock;
5701
5702 /*
5703 * The delayed work can race with an invalidate hence we need to
5704 * recheck.
5705 */
5706
5707 if (dev_priv->drrs.busy_frontbuffer_bits)
5708 goto unlock;
5709
5710 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5711 intel_dp_set_drrs_state(dev_priv->dev,
5712 intel_dp->attached_connector->panel.
5713 downclock_mode->vrefresh);
5714
5715 unlock:
5716 mutex_unlock(&dev_priv->drrs.mutex);
5717 }
5718
5719 /**
5720 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5721 * @dev: DRM device
5722 * @frontbuffer_bits: frontbuffer plane tracking bits
5723 *
5724 * This function gets called everytime rendering on the given planes start.
5725 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5726 *
5727 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5728 */
5729 void intel_edp_drrs_invalidate(struct drm_device *dev,
5730 unsigned frontbuffer_bits)
5731 {
5732 struct drm_i915_private *dev_priv = dev->dev_private;
5733 struct drm_crtc *crtc;
5734 enum pipe pipe;
5735
5736 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5737 return;
5738
5739 cancel_delayed_work(&dev_priv->drrs.work);
5740
5741 mutex_lock(&dev_priv->drrs.mutex);
5742 if (!dev_priv->drrs.dp) {
5743 mutex_unlock(&dev_priv->drrs.mutex);
5744 return;
5745 }
5746
5747 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5748 pipe = to_intel_crtc(crtc)->pipe;
5749
5750 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5751 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5752
5753 /* invalidate means busy screen hence upclock */
5754 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5755 intel_dp_set_drrs_state(dev_priv->dev,
5756 dev_priv->drrs.dp->attached_connector->panel.
5757 fixed_mode->vrefresh);
5758
5759 mutex_unlock(&dev_priv->drrs.mutex);
5760 }
5761
5762 /**
5763 * intel_edp_drrs_flush - Restart Idleness DRRS
5764 * @dev: DRM device
5765 * @frontbuffer_bits: frontbuffer plane tracking bits
5766 *
5767 * This function gets called every time rendering on the given planes has
5768 * completed or flip on a crtc is completed. So DRRS should be upclocked
5769 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5770 * if no other planes are dirty.
5771 *
5772 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5773 */
5774 void intel_edp_drrs_flush(struct drm_device *dev,
5775 unsigned frontbuffer_bits)
5776 {
5777 struct drm_i915_private *dev_priv = dev->dev_private;
5778 struct drm_crtc *crtc;
5779 enum pipe pipe;
5780
5781 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5782 return;
5783
5784 cancel_delayed_work(&dev_priv->drrs.work);
5785
5786 mutex_lock(&dev_priv->drrs.mutex);
5787 if (!dev_priv->drrs.dp) {
5788 mutex_unlock(&dev_priv->drrs.mutex);
5789 return;
5790 }
5791
5792 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5793 pipe = to_intel_crtc(crtc)->pipe;
5794
5795 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5796 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5797
5798 /* flush means busy screen hence upclock */
5799 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5800 intel_dp_set_drrs_state(dev_priv->dev,
5801 dev_priv->drrs.dp->attached_connector->panel.
5802 fixed_mode->vrefresh);
5803
5804 /*
5805 * flush also means no more activity hence schedule downclock, if all
5806 * other fbs are quiescent too
5807 */
5808 if (!dev_priv->drrs.busy_frontbuffer_bits)
5809 schedule_delayed_work(&dev_priv->drrs.work,
5810 msecs_to_jiffies(1000));
5811 mutex_unlock(&dev_priv->drrs.mutex);
5812 }
5813
5814 /**
5815 * DOC: Display Refresh Rate Switching (DRRS)
5816 *
5817 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5818 * which enables swtching between low and high refresh rates,
5819 * dynamically, based on the usage scenario. This feature is applicable
5820 * for internal panels.
5821 *
5822 * Indication that the panel supports DRRS is given by the panel EDID, which
5823 * would list multiple refresh rates for one resolution.
5824 *
5825 * DRRS is of 2 types - static and seamless.
5826 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5827 * (may appear as a blink on screen) and is used in dock-undock scenario.
5828 * Seamless DRRS involves changing RR without any visual effect to the user
5829 * and can be used during normal system usage. This is done by programming
5830 * certain registers.
5831 *
5832 * Support for static/seamless DRRS may be indicated in the VBT based on
5833 * inputs from the panel spec.
5834 *
5835 * DRRS saves power by switching to low RR based on usage scenarios.
5836 *
5837 * eDP DRRS:-
5838 * The implementation is based on frontbuffer tracking implementation.
5839 * When there is a disturbance on the screen triggered by user activity or a
5840 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5841 * When there is no movement on screen, after a timeout of 1 second, a switch
5842 * to low RR is made.
5843 * For integration with frontbuffer tracking code,
5844 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5845 *
5846 * DRRS can be further extended to support other internal panels and also
5847 * the scenario of video playback wherein RR is set based on the rate
5848 * requested by userspace.
5849 */
5850
5851 /**
5852 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5853 * @intel_connector: eDP connector
5854 * @fixed_mode: preferred mode of panel
5855 *
5856 * This function is called only once at driver load to initialize basic
5857 * DRRS stuff.
5858 *
5859 * Returns:
5860 * Downclock mode if panel supports it, else return NULL.
5861 * DRRS support is determined by the presence of downclock mode (apart
5862 * from VBT setting).
5863 */
5864 static struct drm_display_mode *
5865 intel_dp_drrs_init(struct intel_connector *intel_connector,
5866 struct drm_display_mode *fixed_mode)
5867 {
5868 struct drm_connector *connector = &intel_connector->base;
5869 struct drm_device *dev = connector->dev;
5870 struct drm_i915_private *dev_priv = dev->dev_private;
5871 struct drm_display_mode *downclock_mode = NULL;
5872
5873 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5874 mutex_init(&dev_priv->drrs.mutex);
5875
5876 if (INTEL_INFO(dev)->gen <= 6) {
5877 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5878 return NULL;
5879 }
5880
5881 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5882 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5883 return NULL;
5884 }
5885
5886 downclock_mode = intel_find_panel_downclock
5887 (dev, fixed_mode, connector);
5888
5889 if (!downclock_mode) {
5890 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5891 return NULL;
5892 }
5893
5894 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5895
5896 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5897 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5898 return downclock_mode;
5899 }
5900
5901 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5902 struct intel_connector *intel_connector)
5903 {
5904 struct drm_connector *connector = &intel_connector->base;
5905 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5906 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5907 struct drm_device *dev = intel_encoder->base.dev;
5908 struct drm_i915_private *dev_priv = dev->dev_private;
5909 struct drm_display_mode *fixed_mode = NULL;
5910 struct drm_display_mode *downclock_mode = NULL;
5911 bool has_dpcd;
5912 struct drm_display_mode *scan;
5913 struct edid *edid;
5914 enum pipe pipe = INVALID_PIPE;
5915
5916 if (!is_edp(intel_dp))
5917 return true;
5918
5919 pps_lock(intel_dp);
5920 intel_edp_panel_vdd_sanitize(intel_dp);
5921 pps_unlock(intel_dp);
5922
5923 /* Cache DPCD and EDID for edp. */
5924 has_dpcd = intel_dp_get_dpcd(intel_dp);
5925
5926 if (has_dpcd) {
5927 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5928 dev_priv->no_aux_handshake =
5929 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5930 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5931 } else {
5932 /* if this fails, presume the device is a ghost */
5933 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5934 return false;
5935 }
5936
5937 /* We now know it's not a ghost, init power sequence regs. */
5938 pps_lock(intel_dp);
5939 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5940 pps_unlock(intel_dp);
5941
5942 mutex_lock(&dev->mode_config.mutex);
5943 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5944 if (edid) {
5945 if (drm_add_edid_modes(connector, edid)) {
5946 drm_mode_connector_update_edid_property(connector,
5947 edid);
5948 drm_edid_to_eld(connector, edid);
5949 } else {
5950 kfree(edid);
5951 edid = ERR_PTR(-EINVAL);
5952 }
5953 } else {
5954 edid = ERR_PTR(-ENOENT);
5955 }
5956 intel_connector->edid = edid;
5957
5958 /* prefer fixed mode from EDID if available */
5959 list_for_each_entry(scan, &connector->probed_modes, head) {
5960 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5961 fixed_mode = drm_mode_duplicate(dev, scan);
5962 downclock_mode = intel_dp_drrs_init(
5963 intel_connector, fixed_mode);
5964 break;
5965 }
5966 }
5967
5968 /* fallback to VBT if available for eDP */
5969 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5970 fixed_mode = drm_mode_duplicate(dev,
5971 dev_priv->vbt.lfp_lvds_vbt_mode);
5972 if (fixed_mode)
5973 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5974 }
5975 mutex_unlock(&dev->mode_config.mutex);
5976
5977 if (IS_VALLEYVIEW(dev)) {
5978 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5979 register_reboot_notifier(&intel_dp->edp_notifier);
5980
5981 /*
5982 * Figure out the current pipe for the initial backlight setup.
5983 * If the current pipe isn't valid, try the PPS pipe, and if that
5984 * fails just assume pipe A.
5985 */
5986 if (IS_CHERRYVIEW(dev))
5987 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5988 else
5989 pipe = PORT_TO_PIPE(intel_dp->DP);
5990
5991 if (pipe != PIPE_A && pipe != PIPE_B)
5992 pipe = intel_dp->pps_pipe;
5993
5994 if (pipe != PIPE_A && pipe != PIPE_B)
5995 pipe = PIPE_A;
5996
5997 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5998 pipe_name(pipe));
5999 }
6000
6001 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6002 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6003 intel_panel_setup_backlight(connector, pipe);
6004
6005 return true;
6006 }
6007
6008 bool
6009 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6010 struct intel_connector *intel_connector)
6011 {
6012 struct drm_connector *connector = &intel_connector->base;
6013 struct intel_dp *intel_dp = &intel_dig_port->dp;
6014 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6015 struct drm_device *dev = intel_encoder->base.dev;
6016 struct drm_i915_private *dev_priv = dev->dev_private;
6017 enum port port = intel_dig_port->port;
6018 int type;
6019
6020 intel_dp->pps_pipe = INVALID_PIPE;
6021
6022 /* intel_dp vfuncs */
6023 if (INTEL_INFO(dev)->gen >= 9)
6024 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6025 else if (IS_VALLEYVIEW(dev))
6026 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6027 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6028 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6029 else if (HAS_PCH_SPLIT(dev))
6030 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6031 else
6032 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6033
6034 if (INTEL_INFO(dev)->gen >= 9)
6035 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6036 else
6037 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6038
6039 /* Preserve the current hw state. */
6040 intel_dp->DP = I915_READ(intel_dp->output_reg);
6041 intel_dp->attached_connector = intel_connector;
6042
6043 if (intel_dp_is_edp(dev, port))
6044 type = DRM_MODE_CONNECTOR_eDP;
6045 else
6046 type = DRM_MODE_CONNECTOR_DisplayPort;
6047
6048 /*
6049 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6050 * for DP the encoder type can be set by the caller to
6051 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6052 */
6053 if (type == DRM_MODE_CONNECTOR_eDP)
6054 intel_encoder->type = INTEL_OUTPUT_EDP;
6055
6056 /* eDP only on port B and/or C on vlv/chv */
6057 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6058 port != PORT_B && port != PORT_C))
6059 return false;
6060
6061 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6062 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6063 port_name(port));
6064
6065 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6066 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6067
6068 connector->interlace_allowed = true;
6069 connector->doublescan_allowed = 0;
6070
6071 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6072 edp_panel_vdd_work);
6073
6074 intel_connector_attach_encoder(intel_connector, intel_encoder);
6075 drm_connector_register(connector);
6076
6077 if (HAS_DDI(dev))
6078 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6079 else
6080 intel_connector->get_hw_state = intel_connector_get_hw_state;
6081 intel_connector->unregister = intel_dp_connector_unregister;
6082
6083 /* Set up the hotplug pin. */
6084 switch (port) {
6085 case PORT_A:
6086 intel_encoder->hpd_pin = HPD_PORT_A;
6087 break;
6088 case PORT_B:
6089 intel_encoder->hpd_pin = HPD_PORT_B;
6090 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6091 intel_encoder->hpd_pin = HPD_PORT_A;
6092 break;
6093 case PORT_C:
6094 intel_encoder->hpd_pin = HPD_PORT_C;
6095 break;
6096 case PORT_D:
6097 intel_encoder->hpd_pin = HPD_PORT_D;
6098 break;
6099 case PORT_E:
6100 intel_encoder->hpd_pin = HPD_PORT_E;
6101 break;
6102 default:
6103 BUG();
6104 }
6105
6106 if (is_edp(intel_dp)) {
6107 pps_lock(intel_dp);
6108 intel_dp_init_panel_power_timestamps(intel_dp);
6109 if (IS_VALLEYVIEW(dev))
6110 vlv_initial_power_sequencer_setup(intel_dp);
6111 else
6112 intel_dp_init_panel_power_sequencer(dev, intel_dp);
6113 pps_unlock(intel_dp);
6114 }
6115
6116 intel_dp_aux_init(intel_dp, intel_connector);
6117
6118 /* init MST on ports that can support it */
6119 if (HAS_DP_MST(dev) &&
6120 (port == PORT_B || port == PORT_C || port == PORT_D))
6121 intel_dp_mst_encoder_init(intel_dig_port,
6122 intel_connector->base.base.id);
6123
6124 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6125 drm_dp_aux_unregister(&intel_dp->aux);
6126 if (is_edp(intel_dp)) {
6127 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6128 /*
6129 * vdd might still be enabled do to the delayed vdd off.
6130 * Make sure vdd is actually turned off here.
6131 */
6132 pps_lock(intel_dp);
6133 edp_panel_vdd_off_sync(intel_dp);
6134 pps_unlock(intel_dp);
6135 }
6136 drm_connector_unregister(connector);
6137 drm_connector_cleanup(connector);
6138 return false;
6139 }
6140
6141 intel_dp_add_properties(intel_dp, connector);
6142
6143 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6144 * 0xd. Failure to do so will result in spurious interrupts being
6145 * generated on the port when a cable is not attached.
6146 */
6147 if (IS_G4X(dev) && !IS_GM45(dev)) {
6148 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6149 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6150 }
6151
6152 i915_debugfs_connector_add(connector);
6153
6154 return true;
6155 }
6156
6157 void
6158 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6159 {
6160 struct drm_i915_private *dev_priv = dev->dev_private;
6161 struct intel_digital_port *intel_dig_port;
6162 struct intel_encoder *intel_encoder;
6163 struct drm_encoder *encoder;
6164 struct intel_connector *intel_connector;
6165
6166 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6167 if (!intel_dig_port)
6168 return;
6169
6170 intel_connector = intel_connector_alloc();
6171 if (!intel_connector)
6172 goto err_connector_alloc;
6173
6174 intel_encoder = &intel_dig_port->base;
6175 encoder = &intel_encoder->base;
6176
6177 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6178 DRM_MODE_ENCODER_TMDS);
6179
6180 intel_encoder->compute_config = intel_dp_compute_config;
6181 intel_encoder->disable = intel_disable_dp;
6182 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6183 intel_encoder->get_config = intel_dp_get_config;
6184 intel_encoder->suspend = intel_dp_encoder_suspend;
6185 if (IS_CHERRYVIEW(dev)) {
6186 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6187 intel_encoder->pre_enable = chv_pre_enable_dp;
6188 intel_encoder->enable = vlv_enable_dp;
6189 intel_encoder->post_disable = chv_post_disable_dp;
6190 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6191 } else if (IS_VALLEYVIEW(dev)) {
6192 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6193 intel_encoder->pre_enable = vlv_pre_enable_dp;
6194 intel_encoder->enable = vlv_enable_dp;
6195 intel_encoder->post_disable = vlv_post_disable_dp;
6196 } else {
6197 intel_encoder->pre_enable = g4x_pre_enable_dp;
6198 intel_encoder->enable = g4x_enable_dp;
6199 if (INTEL_INFO(dev)->gen >= 5)
6200 intel_encoder->post_disable = ilk_post_disable_dp;
6201 }
6202
6203 intel_dig_port->port = port;
6204 intel_dig_port->dp.output_reg = output_reg;
6205
6206 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6207 if (IS_CHERRYVIEW(dev)) {
6208 if (port == PORT_D)
6209 intel_encoder->crtc_mask = 1 << 2;
6210 else
6211 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6212 } else {
6213 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6214 }
6215 intel_encoder->cloneable = 0;
6216
6217 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6218 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6219
6220 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6221 goto err_init_connector;
6222
6223 return;
6224
6225 err_init_connector:
6226 drm_encoder_cleanup(encoder);
6227 kfree(intel_connector);
6228 err_connector_alloc:
6229 kfree(intel_dig_port);
6230
6231 return;
6232 }
6233
6234 void intel_dp_mst_suspend(struct drm_device *dev)
6235 {
6236 struct drm_i915_private *dev_priv = dev->dev_private;
6237 int i;
6238
6239 /* disable MST */
6240 for (i = 0; i < I915_MAX_PORTS; i++) {
6241 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6242 if (!intel_dig_port)
6243 continue;
6244
6245 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6246 if (!intel_dig_port->dp.can_mst)
6247 continue;
6248 if (intel_dig_port->dp.is_mst)
6249 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6250 }
6251 }
6252 }
6253
6254 void intel_dp_mst_resume(struct drm_device *dev)
6255 {
6256 struct drm_i915_private *dev_priv = dev->dev_private;
6257 int i;
6258
6259 for (i = 0; i < I915_MAX_PORTS; i++) {
6260 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6261 if (!intel_dig_port)
6262 continue;
6263 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6264 int ret;
6265
6266 if (!intel_dig_port->dp.can_mst)
6267 continue;
6268
6269 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6270 if (ret != 0) {
6271 intel_dp_check_mst_status(&intel_dig_port->dp);
6272 }
6273 }
6274 }
6275 }