]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/i915/display/intel_dp.c
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / drivers / gpu / drm / i915 / display / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
2d1a8a48 28#include <linux/export.h>
331c201a 29#include <linux/i2c.h>
01527b31
CT
30#include <linux/notifier.h>
31#include <linux/reboot.h>
331c201a
JN
32#include <linux/slab.h>
33#include <linux/types.h>
56c5098f 34
611032bf 35#include <asm/byteorder.h>
331c201a 36
c6f95f27 37#include <drm/drm_atomic_helper.h>
760285e7 38#include <drm/drm_crtc.h>
20f24d77 39#include <drm/drm_dp_helper.h>
760285e7 40#include <drm/drm_edid.h>
20f24d77 41#include <drm/drm_hdcp.h>
fcd70cd3 42#include <drm/drm_probe_helper.h>
331c201a 43
2126d3e9 44#include "i915_debugfs.h"
a4fc5ed6 45#include "i915_drv.h"
a09d9a80 46#include "i915_trace.h"
12392a74 47#include "intel_atomic.h"
331c201a 48#include "intel_audio.h"
ec7f29ff 49#include "intel_connector.h"
fdc24cf3 50#include "intel_ddi.h"
926b005c 51#include "intel_display_debugfs.h"
1d455f8d 52#include "intel_display_types.h"
27fec1f9 53#include "intel_dp.h"
e075094f 54#include "intel_dp_link_training.h"
46f2066e 55#include "intel_dp_mst.h"
b1ad4c39 56#include "intel_dpio_phy.h"
8834e365 57#include "intel_fifo_underrun.h"
408bd917 58#include "intel_hdcp.h"
0550691d 59#include "intel_hdmi.h"
dbeb38d9 60#include "intel_hotplug.h"
f3e18947 61#include "intel_lspcon.h"
42406fdc 62#include "intel_lvds.h"
44c1220a 63#include "intel_panel.h"
55367a27 64#include "intel_psr.h"
56c5098f 65#include "intel_sideband.h"
bc85328f 66#include "intel_tc.h"
b375d0ef 67#include "intel_vdsc.h"
a4fc5ed6 68
e8b2577c 69#define DP_DPRX_ESI_LEN 14
a4fc5ed6 70
d9218c8f
MN
71/* DP DSC throughput values used for slice count calculations KPixels/s */
72#define DP_DSC_PEAK_PIXEL_RATE 2720000
73#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
74#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
75
ed06efb8
ML
76/* DP DSC FEC Overhead factor = 1/(0.972261) */
77#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
d9218c8f 78
559be30c
TP
79/* Compliance test status bits */
80#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
81#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
83#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
84
9dd4ffdf 85struct dp_link_dpll {
840b32b7 86 int clock;
9dd4ffdf
CML
87 struct dpll dpll;
88};
89
45101e93 90static const struct dp_link_dpll g4x_dpll[] = {
840b32b7 91 { 162000,
9dd4ffdf 92 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 93 { 270000,
9dd4ffdf
CML
94 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
95};
96
97static const struct dp_link_dpll pch_dpll[] = {
840b32b7 98 { 162000,
9dd4ffdf 99 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 100 { 270000,
9dd4ffdf
CML
101 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
102};
103
65ce4bf5 104static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 105 { 162000,
58f6e632 106 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 107 { 270000,
65ce4bf5
CML
108 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
109};
110
ef9348c8
CML
111/*
112 * CHV supports eDP 1.4 that have more link rates.
113 * Below only provides the fixed rate but exclude variable rate.
114 */
115static const struct dp_link_dpll chv_dpll[] = {
116 /*
117 * CHV requires to program fractional division for m2.
118 * m2 is stored in fixed point format using formula below
119 * (m2_int << 22) | m2_fraction
120 */
840b32b7 121 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 122 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 123 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 124 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
ef9348c8 125};
637a9c63 126
d9218c8f
MN
127/* Constants for DP DSC configurations */
128static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
129
130/* With Single pipe configuration, HW is capable of supporting maximum
131 * of 4 slices per line.
132 */
133static const u8 valid_dsc_slicecount[] = {1, 2, 4};
134
cfcb0fc9 135/**
1853a9da 136 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
cfcb0fc9
JB
137 * @intel_dp: DP struct
138 *
139 * If a CPU or PCH DP output is attached to an eDP panel, this function
140 * will return true, and false otherwise.
141 */
1853a9da 142bool intel_dp_is_edp(struct intel_dp *intel_dp)
cfcb0fc9 143{
da63a9f2
PZ
144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
145
146 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
147}
148
adc10304
VS
149static void intel_dp_link_down(struct intel_encoder *encoder,
150 const struct intel_crtc_state *old_crtc_state);
1e0560e0 151static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 152static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
adc10304
VS
153static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
154 const struct intel_crtc_state *crtc_state);
46bd8383 155static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a8c3344e 156 enum pipe pipe);
f21a2198 157static void intel_dp_unset_edid(struct intel_dp *intel_dp);
a4fc5ed6 158
68f357cb
JN
159/* update sink rates from dpcd */
160static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
161{
229675d5 162 static const int dp_rates[] = {
c71b53cc 163 162000, 270000, 540000, 810000
229675d5 164 };
a8a08886 165 int i, max_rate;
68f357cb 166
a8a08886 167 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
68f357cb 168
229675d5
JN
169 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
170 if (dp_rates[i] > max_rate)
a8a08886 171 break;
229675d5 172 intel_dp->sink_rates[i] = dp_rates[i];
a8a08886 173 }
68f357cb 174
a8a08886 175 intel_dp->num_sink_rates = i;
68f357cb
JN
176}
177
10ebb736
JN
178/* Get length of rates array potentially limited by max_rate. */
179static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
180{
181 int i;
182
183 /* Limit results by potentially reduced max rate */
184 for (i = 0; i < len; i++) {
185 if (rates[len - i - 1] <= max_rate)
186 return len - i;
187 }
188
189 return 0;
190}
191
192/* Get length of common rates array potentially limited by max_rate. */
193static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
194 int max_rate)
195{
196 return intel_dp_rate_limit_len(intel_dp->common_rates,
197 intel_dp->num_common_rates, max_rate);
198}
199
540b0b7f
JN
200/* Theoretical max between source and sink */
201static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
a4fc5ed6 202{
540b0b7f 203 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
a4fc5ed6
KP
204}
205
540b0b7f
JN
206/* Theoretical max between source and sink */
207static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
eeb6324d
PZ
208{
209 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
540b0b7f
JN
210 int source_max = intel_dig_port->max_lanes;
211 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
bc85328f 212 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
eeb6324d 213
db7295c2 214 return min3(source_max, sink_max, fia_max);
eeb6324d
PZ
215}
216
3d65a735 217int intel_dp_max_lane_count(struct intel_dp *intel_dp)
540b0b7f
JN
218{
219 return intel_dp->max_link_lane_count;
220}
221
22a2c8e0 222int
c898261c 223intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 224{
fd81c44e
DP
225 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
226 return DIV_ROUND_UP(pixel_clock * bpp, 8);
a4fc5ed6
KP
227}
228
22a2c8e0 229int
fe27d53e
DA
230intel_dp_max_data_rate(int max_link_clock, int max_lanes)
231{
fd81c44e
DP
232 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
233 * link rate that is generally expressed in Gbps. Since, 8 bits of data
234 * is transmitted every LS_Clk per lane, there is no need to account for
235 * the channel encoding that is done in the PHY layer here.
236 */
237
238 return max_link_clock * max_lanes;
fe27d53e
DA
239}
240
70ec0645
MK
241static int
242intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
243{
244 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
245 struct intel_encoder *encoder = &intel_dig_port->base;
246 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
247 int max_dotclk = dev_priv->max_dotclk_freq;
248 int ds_max_dotclk;
249
250 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
251
252 if (type != DP_DS_PORT_TYPE_VGA)
253 return max_dotclk;
254
255 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
256 intel_dp->downstream_ports);
257
258 if (ds_max_dotclk != 0)
259 max_dotclk = min(max_dotclk, ds_max_dotclk);
260
261 return max_dotclk;
262}
263
4ba285d4 264static int cnl_max_source_rate(struct intel_dp *intel_dp)
53ddb3cd
RV
265{
266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
267 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
268 enum port port = dig_port->base.port;
269
b4e33881 270 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
53ddb3cd
RV
271
272 /* Low voltage SKUs are limited to max of 5.4G */
273 if (voltage == VOLTAGE_INFO_0_85V)
4ba285d4 274 return 540000;
53ddb3cd
RV
275
276 /* For this SKU 8.1G is supported in all ports */
277 if (IS_CNL_WITH_PORT_F(dev_priv))
4ba285d4 278 return 810000;
53ddb3cd 279
3758d968 280 /* For other SKUs, max rate on ports A and D is 5.4G */
53ddb3cd 281 if (port == PORT_A || port == PORT_D)
4ba285d4 282 return 540000;
53ddb3cd 283
4ba285d4 284 return 810000;
53ddb3cd
RV
285}
286
46b527d1
MN
287static int icl_max_source_rate(struct intel_dp *intel_dp)
288{
289 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
b265a2a6 290 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
d8fe2ab6 291 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
46b527d1 292
d8fe2ab6 293 if (intel_phy_is_combo(dev_priv, phy) &&
b7143860 294 !IS_ELKHARTLAKE(dev_priv) &&
b265a2a6 295 !intel_dp_is_edp(intel_dp))
46b527d1
MN
296 return 540000;
297
298 return 810000;
299}
300
55cfc580
JN
301static void
302intel_dp_set_source_rates(struct intel_dp *intel_dp)
40dba341 303{
229675d5
JN
304 /* The values must be in increasing order */
305 static const int cnl_rates[] = {
306 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
307 };
308 static const int bxt_rates[] = {
309 162000, 216000, 243000, 270000, 324000, 432000, 540000
310 };
311 static const int skl_rates[] = {
312 162000, 216000, 270000, 324000, 432000, 540000
313 };
314 static const int hsw_rates[] = {
315 162000, 270000, 540000
316 };
317 static const int g4x_rates[] = {
318 162000, 270000
319 };
40dba341 320 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
f83acdab 321 struct intel_encoder *encoder = &dig_port->base;
40dba341 322 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
55cfc580 323 const int *source_rates;
f83acdab 324 int size, max_rate = 0, vbt_max_rate;
40dba341 325
55cfc580 326 /* This should only be done once */
eb020ca3
PB
327 drm_WARN_ON(&dev_priv->drm,
328 intel_dp->source_rates || intel_dp->num_source_rates);
55cfc580 329
46b527d1 330 if (INTEL_GEN(dev_priv) >= 10) {
d907b665 331 source_rates = cnl_rates;
4ba285d4 332 size = ARRAY_SIZE(cnl_rates);
cf819eff 333 if (IS_GEN(dev_priv, 10))
46b527d1
MN
334 max_rate = cnl_max_source_rate(intel_dp);
335 else
336 max_rate = icl_max_source_rate(intel_dp);
ba1c06a5
MN
337 } else if (IS_GEN9_LP(dev_priv)) {
338 source_rates = bxt_rates;
339 size = ARRAY_SIZE(bxt_rates);
b976dc53 340 } else if (IS_GEN9_BC(dev_priv)) {
55cfc580 341 source_rates = skl_rates;
40dba341 342 size = ARRAY_SIZE(skl_rates);
fc603ca7
JN
343 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
344 IS_BROADWELL(dev_priv)) {
229675d5
JN
345 source_rates = hsw_rates;
346 size = ARRAY_SIZE(hsw_rates);
fc603ca7 347 } else {
229675d5
JN
348 source_rates = g4x_rates;
349 size = ARRAY_SIZE(g4x_rates);
40dba341
NM
350 }
351
f83acdab 352 vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
99b91bda
JN
353 if (max_rate && vbt_max_rate)
354 max_rate = min(max_rate, vbt_max_rate);
355 else if (vbt_max_rate)
356 max_rate = vbt_max_rate;
357
4ba285d4
JN
358 if (max_rate)
359 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
360
55cfc580
JN
361 intel_dp->source_rates = source_rates;
362 intel_dp->num_source_rates = size;
40dba341
NM
363}
364
365static int intersect_rates(const int *source_rates, int source_len,
366 const int *sink_rates, int sink_len,
367 int *common_rates)
368{
369 int i = 0, j = 0, k = 0;
370
371 while (i < source_len && j < sink_len) {
372 if (source_rates[i] == sink_rates[j]) {
373 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
374 return k;
375 common_rates[k] = source_rates[i];
376 ++k;
377 ++i;
378 ++j;
379 } else if (source_rates[i] < sink_rates[j]) {
380 ++i;
381 } else {
382 ++j;
383 }
384 }
385 return k;
386}
387
8001b754
JN
388/* return index of rate in rates array, or -1 if not found */
389static int intel_dp_rate_index(const int *rates, int len, int rate)
390{
391 int i;
392
393 for (i = 0; i < len; i++)
394 if (rate == rates[i])
395 return i;
396
397 return -1;
398}
399
975ee5fc 400static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
40dba341 401{
975ee5fc 402 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
40dba341 403
975ee5fc
JN
404 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
405 intel_dp->num_source_rates,
406 intel_dp->sink_rates,
407 intel_dp->num_sink_rates,
408 intel_dp->common_rates);
409
410 /* Paranoia, there should always be something in common. */
411 if (WARN_ON(intel_dp->num_common_rates == 0)) {
229675d5 412 intel_dp->common_rates[0] = 162000;
975ee5fc
JN
413 intel_dp->num_common_rates = 1;
414 }
415}
416
1a92c70e 417static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
830de422 418 u8 lane_count)
14c562c0
MN
419{
420 /*
421 * FIXME: we need to synchronize the current link parameters with
422 * hardware readout. Currently fast link training doesn't work on
423 * boot-up.
424 */
1a92c70e
MN
425 if (link_rate == 0 ||
426 link_rate > intel_dp->max_link_rate)
14c562c0
MN
427 return false;
428
1a92c70e
MN
429 if (lane_count == 0 ||
430 lane_count > intel_dp_max_lane_count(intel_dp))
14c562c0
MN
431 return false;
432
433 return true;
434}
435
1e712535
MN
436static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
437 int link_rate,
830de422 438 u8 lane_count)
1e712535
MN
439{
440 const struct drm_display_mode *fixed_mode =
441 intel_dp->attached_connector->panel.fixed_mode;
442 int mode_rate, max_rate;
443
444 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
445 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
446 if (mode_rate > max_rate)
447 return false;
448
449 return true;
450}
451
fdb14d33 452int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
830de422 453 int link_rate, u8 lane_count)
fdb14d33 454{
b1810a74 455 int index;
fdb14d33 456
b1810a74
JN
457 index = intel_dp_rate_index(intel_dp->common_rates,
458 intel_dp->num_common_rates,
459 link_rate);
460 if (index > 0) {
1e712535
MN
461 if (intel_dp_is_edp(intel_dp) &&
462 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
463 intel_dp->common_rates[index - 1],
464 lane_count)) {
465 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
466 return 0;
467 }
e6c0c64a
JN
468 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
469 intel_dp->max_link_lane_count = lane_count;
fdb14d33 470 } else if (lane_count > 1) {
1e712535
MN
471 if (intel_dp_is_edp(intel_dp) &&
472 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
473 intel_dp_max_common_rate(intel_dp),
474 lane_count >> 1)) {
475 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
476 return 0;
477 }
540b0b7f 478 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
e6c0c64a 479 intel_dp->max_link_lane_count = lane_count >> 1;
fdb14d33
MN
480 } else {
481 DRM_ERROR("Link Training Unsuccessful\n");
482 return -1;
483 }
484
485 return 0;
486}
487
ed06efb8
ML
488u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
489{
490 return div_u64(mul_u32_u32(mode_clock, 1000000U),
491 DP_DSC_FEC_OVERHEAD_FACTOR);
492}
493
45d3c5cd
MR
494static int
495small_joiner_ram_size_bits(struct drm_i915_private *i915)
496{
497 if (INTEL_GEN(i915) >= 11)
498 return 7680 * 8;
499 else
500 return 6144 * 8;
501}
502
503static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
504 u32 link_clock, u32 lane_count,
ed06efb8
ML
505 u32 mode_clock, u32 mode_hdisplay)
506{
507 u32 bits_per_pixel, max_bpp_small_joiner_ram;
508 int i;
509
510 /*
511 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
512 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
513 * for SST -> TimeSlotsPerMTP is 1,
514 * for MST -> TimeSlotsPerMTP has to be calculated
515 */
516 bits_per_pixel = (link_clock * lane_count * 8) /
517 intel_dp_mode_to_fec_clock(mode_clock);
bdc6114e 518 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
ed06efb8
ML
519
520 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
45d3c5cd
MR
521 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
522 mode_hdisplay;
bdc6114e
WK
523 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
524 max_bpp_small_joiner_ram);
ed06efb8
ML
525
526 /*
527 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
528 * check, output bpp from small joiner RAM check)
529 */
530 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
531
532 /* Error out if the max bpp is less than smallest allowed valid bpp */
533 if (bits_per_pixel < valid_dsc_bpp[0]) {
bdc6114e
WK
534 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
535 bits_per_pixel, valid_dsc_bpp[0]);
ed06efb8
ML
536 return 0;
537 }
538
539 /* Find the nearest match in the array of known BPPs from VESA */
540 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
541 if (bits_per_pixel < valid_dsc_bpp[i + 1])
542 break;
543 }
544 bits_per_pixel = valid_dsc_bpp[i];
545
546 /*
547 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
548 * fractional part is 0
549 */
550 return bits_per_pixel << 4;
551}
552
553static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
554 int mode_clock, int mode_hdisplay)
555{
556 u8 min_slice_count, i;
557 int max_slice_width;
558
559 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
560 min_slice_count = DIV_ROUND_UP(mode_clock,
561 DP_DSC_MAX_ENC_THROUGHPUT_0);
562 else
563 min_slice_count = DIV_ROUND_UP(mode_clock,
564 DP_DSC_MAX_ENC_THROUGHPUT_1);
565
566 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
567 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
568 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
569 max_slice_width);
570 return 0;
571 }
572 /* Also take into account max slice width */
573 min_slice_count = min_t(u8, min_slice_count,
574 DIV_ROUND_UP(mode_hdisplay,
575 max_slice_width));
576
577 /* Find the closest match to the valid slice count values */
578 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
579 if (valid_dsc_slicecount[i] >
580 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
581 false))
582 break;
583 if (min_slice_count <= valid_dsc_slicecount[i])
584 return valid_dsc_slicecount[i];
585 }
586
587 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
588 return 0;
589}
590
98c93394
VS
591static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
592 int hdisplay)
593{
594 /*
595 * Older platforms don't like hdisplay==4096 with DP.
596 *
597 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
598 * and frame counter increment), but we don't get vblank interrupts,
599 * and the pipe underruns immediately. The link also doesn't seem
600 * to get trained properly.
601 *
602 * On CHV the vblank interrupts don't seem to disappear but
603 * otherwise the symptoms are similar.
604 *
605 * TODO: confirm the behaviour on HSW+
606 */
607 return hdisplay == 4096 && !HAS_DDI(dev_priv);
608}
609
c19de8eb 610static enum drm_mode_status
a4fc5ed6
KP
611intel_dp_mode_valid(struct drm_connector *connector,
612 struct drm_display_mode *mode)
613{
43a6d19c 614 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
dd06f90e
JN
615 struct intel_connector *intel_connector = to_intel_connector(connector);
616 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
6cfd04b0 617 struct drm_i915_private *dev_priv = to_i915(connector->dev);
36008365
DV
618 int target_clock = mode->clock;
619 int max_rate, mode_rate, max_lanes, max_link_clock;
70ec0645 620 int max_dotclk;
6cfd04b0
MN
621 u16 dsc_max_output_bpp = 0;
622 u8 dsc_slice_count = 0;
70ec0645 623
e4dd27aa
VS
624 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
625 return MODE_NO_DBLESCAN;
626
70ec0645 627 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
a4fc5ed6 628
1853a9da 629 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
dd06f90e 630 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
631 return MODE_PANEL;
632
dd06f90e 633 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 634 return MODE_PANEL;
03afc4a2
DV
635
636 target_clock = fixed_mode->clock;
7de56f43
ZY
637 }
638
50fec21a 639 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 640 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
641
642 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
643 mode_rate = intel_dp_link_required(target_clock, 18);
644
98c93394
VS
645 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
646 return MODE_H_ILLEGAL;
647
6cfd04b0
MN
648 /*
649 * Output bpp is stored in 6.4 format so right shift by 4 to get the
650 * integer value since we support only integer values of bpp.
651 */
652 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
653 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
654 if (intel_dp_is_edp(intel_dp)) {
655 dsc_max_output_bpp =
656 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
657 dsc_slice_count =
658 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
659 true);
240999cf 660 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
6cfd04b0 661 dsc_max_output_bpp =
45d3c5cd
MR
662 intel_dp_dsc_get_output_bpp(dev_priv,
663 max_link_clock,
6cfd04b0
MN
664 max_lanes,
665 target_clock,
666 mode->hdisplay) >> 4;
667 dsc_slice_count =
668 intel_dp_dsc_get_slice_count(intel_dp,
669 target_clock,
670 mode->hdisplay);
671 }
672 }
673
674 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
675 target_clock > max_dotclk)
c4867936 676 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
677
678 if (mode->clock < 10000)
679 return MODE_CLOCK_LOW;
680
0af78a2b
DV
681 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
682 return MODE_H_ILLEGAL;
683
2d20411e 684 return intel_mode_valid_max_plane_size(dev_priv, mode);
a4fc5ed6
KP
685}
686
830de422 687u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
a4fc5ed6 688{
830de422
JN
689 int i;
690 u32 v = 0;
a4fc5ed6
KP
691
692 if (src_bytes > 4)
693 src_bytes = 4;
694 for (i = 0; i < src_bytes; i++)
830de422 695 v |= ((u32)src[i]) << ((3 - i) * 8);
a4fc5ed6
KP
696 return v;
697}
698
830de422 699static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
a4fc5ed6
KP
700{
701 int i;
702 if (dst_bytes > 4)
703 dst_bytes = 4;
704 for (i = 0; i < dst_bytes; i++)
705 dst[i] = src >> ((3-i) * 8);
706}
707
bf13e81b 708static void
46bd8383 709intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
bf13e81b 710static void
46bd8383 711intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 712 bool force_disable_vdd);
335f752b 713static void
46bd8383 714intel_dp_pps_init(struct intel_dp *intel_dp);
bf13e81b 715
69d93820
CW
716static intel_wakeref_t
717pps_lock(struct intel_dp *intel_dp)
773538e8 718{
de25eb7f 719 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 720 intel_wakeref_t wakeref;
773538e8
VS
721
722 /*
40c7ae45 723 * See intel_power_sequencer_reset() why we need
773538e8
VS
724 * a power domain reference here.
725 */
69d93820
CW
726 wakeref = intel_display_power_get(dev_priv,
727 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
773538e8
VS
728
729 mutex_lock(&dev_priv->pps_mutex);
69d93820
CW
730
731 return wakeref;
773538e8
VS
732}
733
69d93820
CW
734static intel_wakeref_t
735pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
773538e8 736{
de25eb7f 737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
773538e8
VS
738
739 mutex_unlock(&dev_priv->pps_mutex);
69d93820
CW
740 intel_display_power_put(dev_priv,
741 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
742 wakeref);
743 return 0;
773538e8
VS
744}
745
69d93820
CW
746#define with_pps_lock(dp, wf) \
747 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
748
961a0db0
VS
749static void
750vlv_power_sequencer_kick(struct intel_dp *intel_dp)
751{
de25eb7f 752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
961a0db0 753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
961a0db0 754 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
755 bool pll_enabled, release_cl_override = false;
756 enum dpio_phy phy = DPIO_PHY(pipe);
757 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
830de422 758 u32 DP;
961a0db0 759
eb020ca3
PB
760 if (drm_WARN(&dev_priv->drm,
761 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
762 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
763 pipe_name(pipe), intel_dig_port->base.base.base.id,
764 intel_dig_port->base.base.name))
961a0db0
VS
765 return;
766
bdc6114e
WK
767 drm_dbg_kms(&dev_priv->drm,
768 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
769 pipe_name(pipe), intel_dig_port->base.base.base.id,
770 intel_dig_port->base.base.name);
961a0db0
VS
771
772 /* Preserve the BIOS-computed detected bit. This is
773 * supposed to be read-only.
774 */
b4e33881 775 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
961a0db0
VS
776 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
777 DP |= DP_PORT_WIDTH(1);
778 DP |= DP_LINK_TRAIN_PAT_1;
779
920a14b2 780 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
781 DP |= DP_PIPE_SEL_CHV(pipe);
782 else
783 DP |= DP_PIPE_SEL(pipe);
961a0db0 784
b4e33881 785 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
d288f65f
VS
786
787 /*
788 * The DPLL for the pipe must be enabled for this to work.
789 * So enable temporarily it if it's not already enabled.
790 */
0047eedc 791 if (!pll_enabled) {
920a14b2 792 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
0047eedc
VS
793 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
794
30ad9814 795 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
3f36b937 796 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
bdc6114e
WK
797 drm_err(&dev_priv->drm,
798 "Failed to force on pll for pipe %c!\n",
799 pipe_name(pipe));
3f36b937
TU
800 return;
801 }
0047eedc 802 }
d288f65f 803
961a0db0
VS
804 /*
805 * Similar magic as in intel_dp_enable_port().
806 * We _must_ do this port enable + disable trick
e7f2af78 807 * to make this power sequencer lock onto the port.
961a0db0
VS
808 * Otherwise even VDD force bit won't work.
809 */
b4e33881
JN
810 intel_de_write(dev_priv, intel_dp->output_reg, DP);
811 intel_de_posting_read(dev_priv, intel_dp->output_reg);
961a0db0 812
b4e33881
JN
813 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
814 intel_de_posting_read(dev_priv, intel_dp->output_reg);
961a0db0 815
b4e33881
JN
816 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
817 intel_de_posting_read(dev_priv, intel_dp->output_reg);
d288f65f 818
0047eedc 819 if (!pll_enabled) {
30ad9814 820 vlv_force_pll_off(dev_priv, pipe);
0047eedc
VS
821
822 if (release_cl_override)
823 chv_phy_powergate_ch(dev_priv, phy, ch, false);
824 }
961a0db0
VS
825}
826
9f2bdb00
VS
827static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
828{
829 struct intel_encoder *encoder;
830 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
831
832 /*
833 * We don't have power sequencer currently.
834 * Pick one that's not used by other ports.
835 */
14aa521c 836 for_each_intel_dp(&dev_priv->drm, encoder) {
b7d02c3a 837 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
9f2bdb00
VS
838
839 if (encoder->type == INTEL_OUTPUT_EDP) {
eb020ca3
PB
840 drm_WARN_ON(&dev_priv->drm,
841 intel_dp->active_pipe != INVALID_PIPE &&
842 intel_dp->active_pipe !=
843 intel_dp->pps_pipe);
9f2bdb00
VS
844
845 if (intel_dp->pps_pipe != INVALID_PIPE)
846 pipes &= ~(1 << intel_dp->pps_pipe);
847 } else {
eb020ca3
PB
848 drm_WARN_ON(&dev_priv->drm,
849 intel_dp->pps_pipe != INVALID_PIPE);
9f2bdb00
VS
850
851 if (intel_dp->active_pipe != INVALID_PIPE)
852 pipes &= ~(1 << intel_dp->active_pipe);
853 }
854 }
855
856 if (pipes == 0)
857 return INVALID_PIPE;
858
859 return ffs(pipes) - 1;
860}
861
bf13e81b
JN
862static enum pipe
863vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
864{
de25eb7f 865 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
bf13e81b 866 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
a8c3344e 867 enum pipe pipe;
bf13e81b 868
e39b999a 869 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 870
a8c3344e 871 /* We should never land here with regular DP ports */
eb020ca3 872 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
a8c3344e 873
eb020ca3
PB
874 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
875 intel_dp->active_pipe != intel_dp->pps_pipe);
9f2bdb00 876
a4a5d2f8
VS
877 if (intel_dp->pps_pipe != INVALID_PIPE)
878 return intel_dp->pps_pipe;
879
9f2bdb00 880 pipe = vlv_find_free_pps(dev_priv);
a4a5d2f8
VS
881
882 /*
883 * Didn't find one. This should not happen since there
884 * are two power sequencers and up to two eDP ports.
885 */
eb020ca3 886 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
a8c3344e 887 pipe = PIPE_A;
a4a5d2f8 888
46bd8383 889 vlv_steal_power_sequencer(dev_priv, pipe);
a8c3344e 890 intel_dp->pps_pipe = pipe;
a4a5d2f8 891
bdc6114e
WK
892 drm_dbg_kms(&dev_priv->drm,
893 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
894 pipe_name(intel_dp->pps_pipe),
895 intel_dig_port->base.base.base.id,
896 intel_dig_port->base.base.name);
a4a5d2f8
VS
897
898 /* init power sequencer on this pipe and port */
46bd8383
VS
899 intel_dp_init_panel_power_sequencer(intel_dp);
900 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8 901
961a0db0
VS
902 /*
903 * Even vdd force doesn't work until we've made
904 * the power sequencer lock in on the port.
905 */
906 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
907
908 return intel_dp->pps_pipe;
909}
910
78597996
ID
911static int
912bxt_power_sequencer_idx(struct intel_dp *intel_dp)
913{
de25eb7f 914 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
73c0fcac 915 int backlight_controller = dev_priv->vbt.backlight.controller;
78597996
ID
916
917 lockdep_assert_held(&dev_priv->pps_mutex);
918
919 /* We should never land here with regular DP ports */
eb020ca3 920 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
78597996 921
78597996 922 if (!intel_dp->pps_reset)
73c0fcac 923 return backlight_controller;
78597996
ID
924
925 intel_dp->pps_reset = false;
926
927 /*
928 * Only the HW needs to be reprogrammed, the SW state is fixed and
929 * has been setup during connector init.
930 */
46bd8383 931 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
78597996 932
73c0fcac 933 return backlight_controller;
78597996
ID
934}
935
6491ab27
VS
936typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
937 enum pipe pipe);
938
939static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
940 enum pipe pipe)
941{
b4e33881 942 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
6491ab27
VS
943}
944
945static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
946 enum pipe pipe)
947{
b4e33881 948 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
6491ab27
VS
949}
950
951static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
952 enum pipe pipe)
953{
954 return true;
955}
bf13e81b 956
a4a5d2f8 957static enum pipe
6491ab27
VS
958vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
959 enum port port,
960 vlv_pipe_check pipe_check)
a4a5d2f8
VS
961{
962 enum pipe pipe;
bf13e81b 963
bf13e81b 964 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
b4e33881 965 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
bf13e81b 966 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
967
968 if (port_sel != PANEL_PORT_SELECT_VLV(port))
969 continue;
970
6491ab27
VS
971 if (!pipe_check(dev_priv, pipe))
972 continue;
973
a4a5d2f8 974 return pipe;
bf13e81b
JN
975 }
976
a4a5d2f8
VS
977 return INVALID_PIPE;
978}
979
980static void
981vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
982{
de25eb7f 983 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a4a5d2f8 984 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 985 enum port port = intel_dig_port->base.port;
a4a5d2f8
VS
986
987 lockdep_assert_held(&dev_priv->pps_mutex);
988
989 /* try to find a pipe with this port selected */
6491ab27
VS
990 /* first pick one where the panel is on */
991 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
992 vlv_pipe_has_pp_on);
993 /* didn't find one? pick one where vdd is on */
994 if (intel_dp->pps_pipe == INVALID_PIPE)
995 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
996 vlv_pipe_has_vdd_on);
997 /* didn't find one? pick one with just the correct port */
998 if (intel_dp->pps_pipe == INVALID_PIPE)
999 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1000 vlv_pipe_any);
a4a5d2f8
VS
1001
1002 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1003 if (intel_dp->pps_pipe == INVALID_PIPE) {
bdc6114e
WK
1004 drm_dbg_kms(&dev_priv->drm,
1005 "no initial power sequencer for [ENCODER:%d:%s]\n",
1006 intel_dig_port->base.base.base.id,
1007 intel_dig_port->base.base.name);
a4a5d2f8 1008 return;
bf13e81b
JN
1009 }
1010
bdc6114e
WK
1011 drm_dbg_kms(&dev_priv->drm,
1012 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1013 intel_dig_port->base.base.base.id,
1014 intel_dig_port->base.base.name,
1015 pipe_name(intel_dp->pps_pipe));
a4a5d2f8 1016
46bd8383
VS
1017 intel_dp_init_panel_power_sequencer(intel_dp);
1018 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
bf13e81b
JN
1019}
1020
78597996 1021void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
773538e8 1022{
773538e8
VS
1023 struct intel_encoder *encoder;
1024
a2ab4ab6
CW
1025 if (drm_WARN_ON(&dev_priv->drm,
1026 !(IS_VALLEYVIEW(dev_priv) ||
1027 IS_CHERRYVIEW(dev_priv) ||
1028 IS_GEN9_LP(dev_priv))))
773538e8
VS
1029 return;
1030
1031 /*
1032 * We can't grab pps_mutex here due to deadlock with power_domain
1033 * mutex when power_domain functions are called while holding pps_mutex.
1034 * That also means that in order to use pps_pipe the code needs to
1035 * hold both a power domain reference and pps_mutex, and the power domain
1036 * reference get/put must be done while _not_ holding pps_mutex.
1037 * pps_{lock,unlock}() do these steps in the correct order, so one
1038 * should use them always.
1039 */
1040
14aa521c 1041 for_each_intel_dp(&dev_priv->drm, encoder) {
b7d02c3a 1042 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
7e732cac 1043
eb020ca3
PB
1044 drm_WARN_ON(&dev_priv->drm,
1045 intel_dp->active_pipe != INVALID_PIPE);
9f2bdb00
VS
1046
1047 if (encoder->type != INTEL_OUTPUT_EDP)
1048 continue;
1049
cc3f90f0 1050 if (IS_GEN9_LP(dev_priv))
78597996
ID
1051 intel_dp->pps_reset = true;
1052 else
1053 intel_dp->pps_pipe = INVALID_PIPE;
773538e8 1054 }
bf13e81b
JN
1055}
1056
8e8232d5
ID
1057struct pps_registers {
1058 i915_reg_t pp_ctrl;
1059 i915_reg_t pp_stat;
1060 i915_reg_t pp_on;
1061 i915_reg_t pp_off;
1062 i915_reg_t pp_div;
1063};
1064
46bd8383 1065static void intel_pps_get_registers(struct intel_dp *intel_dp,
8e8232d5
ID
1066 struct pps_registers *regs)
1067{
de25eb7f 1068 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
44cb734c
ID
1069 int pps_idx = 0;
1070
8e8232d5
ID
1071 memset(regs, 0, sizeof(*regs));
1072
cc3f90f0 1073 if (IS_GEN9_LP(dev_priv))
44cb734c
ID
1074 pps_idx = bxt_power_sequencer_idx(intel_dp);
1075 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1076 pps_idx = vlv_power_sequencer_pipe(intel_dp);
8e8232d5 1077
44cb734c
ID
1078 regs->pp_ctrl = PP_CONTROL(pps_idx);
1079 regs->pp_stat = PP_STATUS(pps_idx);
1080 regs->pp_on = PP_ON_DELAYS(pps_idx);
1081 regs->pp_off = PP_OFF_DELAYS(pps_idx);
ab3517c1
JN
1082
1083 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
c6c30b91 1084 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
ab3517c1
JN
1085 regs->pp_div = INVALID_MMIO_REG;
1086 else
44cb734c 1087 regs->pp_div = PP_DIVISOR(pps_idx);
8e8232d5
ID
1088}
1089
f0f59a00
VS
1090static i915_reg_t
1091_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b 1092{
8e8232d5 1093 struct pps_registers regs;
bf13e81b 1094
46bd8383 1095 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
1096
1097 return regs.pp_ctrl;
bf13e81b
JN
1098}
1099
f0f59a00
VS
1100static i915_reg_t
1101_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b 1102{
8e8232d5 1103 struct pps_registers regs;
bf13e81b 1104
46bd8383 1105 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
1106
1107 return regs.pp_stat;
bf13e81b
JN
1108}
1109
01527b31
CT
1110/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1111 This function only applicable when panel PM state is not to be tracked */
1112static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1113 void *unused)
1114{
1115 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1116 edp_notifier);
de25eb7f 1117 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 1118 intel_wakeref_t wakeref;
01527b31 1119
1853a9da 1120 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
01527b31
CT
1121 return 0;
1122
69d93820
CW
1123 with_pps_lock(intel_dp, wakeref) {
1124 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1125 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1126 i915_reg_t pp_ctrl_reg, pp_div_reg;
1127 u32 pp_div;
1128
1129 pp_ctrl_reg = PP_CONTROL(pipe);
1130 pp_div_reg = PP_DIVISOR(pipe);
b4e33881 1131 pp_div = intel_de_read(dev_priv, pp_div_reg);
69d93820
CW
1132 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1133
1134 /* 0x1F write to PP_DIV_REG sets max cycle delay */
b4e33881
JN
1135 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
1136 intel_de_write(dev_priv, pp_ctrl_reg,
1137 PANEL_UNLOCK_REGS);
69d93820
CW
1138 msleep(intel_dp->panel_power_cycle_delay);
1139 }
01527b31
CT
1140 }
1141
1142 return 0;
1143}
1144
4be73780 1145static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 1146{
de25eb7f 1147 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1148
e39b999a
VS
1149 lockdep_assert_held(&dev_priv->pps_mutex);
1150
920a14b2 1151 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1152 intel_dp->pps_pipe == INVALID_PIPE)
1153 return false;
1154
b4e33881 1155 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
1156}
1157
4be73780 1158static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 1159{
de25eb7f 1160 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1161
e39b999a
VS
1162 lockdep_assert_held(&dev_priv->pps_mutex);
1163
920a14b2 1164 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1165 intel_dp->pps_pipe == INVALID_PIPE)
1166 return false;
1167
b4e33881 1168 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
1169}
1170
9b984dae
KP
1171static void
1172intel_dp_check_edp(struct intel_dp *intel_dp)
1173{
de25eb7f 1174 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1175
1853a9da 1176 if (!intel_dp_is_edp(intel_dp))
9b984dae 1177 return;
453c5420 1178
4be73780 1179 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
eb020ca3
PB
1180 drm_WARN(&dev_priv->drm, 1,
1181 "eDP powered off while attempting aux channel communication.\n");
bdc6114e 1182 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
b4e33881
JN
1183 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1184 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
9b984dae
KP
1185 }
1186}
1187
830de422 1188static u32
8a29c778 1189intel_dp_aux_wait_done(struct intel_dp *intel_dp)
9ee32fea 1190{
5a31d30b 1191 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4904fa66 1192 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
54516464 1193 const unsigned int timeout_ms = 10;
830de422 1194 u32 status;
9ee32fea
DV
1195 bool done;
1196
5a31d30b
TU
1197#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1198 done = wait_event_timeout(i915->gmbus_wait_queue, C,
54516464 1199 msecs_to_jiffies_timeout(timeout_ms));
39806c3f
VS
1200
1201 /* just trace the final value */
1202 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1203
9ee32fea 1204 if (!done)
bdc6114e 1205 drm_err(&i915->drm,
264c0247 1206 "%s: did not complete or timeout within %ums (status 0x%08x)\n",
bdc6114e 1207 intel_dp->aux.name, timeout_ms, status);
9ee32fea
DV
1208#undef C
1209
1210 return status;
1211}
1212
830de422 1213static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 1214{
de25eb7f 1215 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
9ee32fea 1216
a457f54b
VS
1217 if (index)
1218 return 0;
1219
ec5b01dd
DL
1220 /*
1221 * The clock divider is based off the hrawclk, and would like to run at
a457f54b 1222 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
a4fc5ed6 1223 */
b04002f4 1224 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
ec5b01dd
DL
1225}
1226
830de422 1227static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1228{
de25eb7f 1229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
b04002f4 1231 u32 freq;
ec5b01dd
DL
1232
1233 if (index)
1234 return 0;
1235
a457f54b
VS
1236 /*
1237 * The clock divider is based off the cdclk or PCH rawclk, and would
1238 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1239 * divide by 2000 and use that
1240 */
563d22a0 1241 if (dig_port->aux_ch == AUX_CH_A)
b04002f4 1242 freq = dev_priv->cdclk.hw.cdclk;
e7dc33f3 1243 else
b04002f4
CW
1244 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1245 return DIV_ROUND_CLOSEST(freq, 2000);
ec5b01dd
DL
1246}
1247
830de422 1248static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1249{
de25eb7f 1250 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ec5b01dd 1252
563d22a0 1253 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
2c55c336 1254 /* Workaround for non-ULT HSW */
bc86625a
CW
1255 switch (index) {
1256 case 0: return 63;
1257 case 1: return 72;
1258 default: return 0;
1259 }
2c55c336 1260 }
a457f54b
VS
1261
1262 return ilk_get_aux_clock_divider(intel_dp, index);
b84a1cf8
RV
1263}
1264
830de422 1265static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
b6b5e383
DL
1266{
1267 /*
1268 * SKL doesn't need us to program the AUX clock divider (Hardware will
1269 * derive the clock from CDCLK automatically). We still implement the
1270 * get_aux_clock_divider vfunc to plug-in into the existing code.
1271 */
1272 return index ? 0 : 1;
1273}
1274
830de422
JN
1275static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1276 int send_bytes,
1277 u32 aux_clock_divider)
5ed12a19
DL
1278{
1279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8652744b
TU
1280 struct drm_i915_private *dev_priv =
1281 to_i915(intel_dig_port->base.base.dev);
830de422 1282 u32 precharge, timeout;
5ed12a19 1283
cf819eff 1284 if (IS_GEN(dev_priv, 6))
5ed12a19
DL
1285 precharge = 3;
1286 else
1287 precharge = 5;
1288
8f5f63d5 1289 if (IS_BROADWELL(dev_priv))
5ed12a19
DL
1290 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1291 else
1292 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1293
1294 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 1295 DP_AUX_CH_CTL_DONE |
8a29c778 1296 DP_AUX_CH_CTL_INTERRUPT |
788d4433 1297 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 1298 timeout |
788d4433 1299 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
1300 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1301 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 1302 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
1303}
1304
830de422
JN
1305static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1306 int send_bytes,
1307 u32 unused)
b9ca5fad 1308{
6f211ed4 1309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
49748264
JRS
1310 struct drm_i915_private *i915 =
1311 to_i915(intel_dig_port->base.base.dev);
1312 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
830de422 1313 u32 ret;
6f211ed4
AS
1314
1315 ret = DP_AUX_CH_CTL_SEND_BUSY |
1316 DP_AUX_CH_CTL_DONE |
1317 DP_AUX_CH_CTL_INTERRUPT |
1318 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1319 DP_AUX_CH_CTL_TIME_OUT_MAX |
1320 DP_AUX_CH_CTL_RECEIVE_ERROR |
1321 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1322 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1323 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1324
49748264
JRS
1325 if (intel_phy_is_tc(i915, phy) &&
1326 intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
6f211ed4
AS
1327 ret |= DP_AUX_CH_CTL_TBT_IO;
1328
1329 return ret;
b9ca5fad
DL
1330}
1331
b84a1cf8 1332static int
f7606265 1333intel_dp_aux_xfer(struct intel_dp *intel_dp,
830de422
JN
1334 const u8 *send, int send_bytes,
1335 u8 *recv, int recv_size,
8159c796 1336 u32 aux_send_ctl_flags)
b84a1cf8
RV
1337{
1338 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5a31d30b 1339 struct drm_i915_private *i915 =
0031fb96 1340 to_i915(intel_dig_port->base.base.dev);
5a31d30b 1341 struct intel_uncore *uncore = &i915->uncore;
d8fe2ab6
MR
1342 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1343 bool is_tc_port = intel_phy_is_tc(i915, phy);
4904fa66 1344 i915_reg_t ch_ctl, ch_data[5];
830de422 1345 u32 aux_clock_divider;
f39194a7
ID
1346 enum intel_display_power_domain aux_domain =
1347 intel_aux_power_domain(intel_dig_port);
1348 intel_wakeref_t aux_wakeref;
1349 intel_wakeref_t pps_wakeref;
b84a1cf8 1350 int i, ret, recv_bytes;
5ed12a19 1351 int try, clock = 0;
830de422 1352 u32 status;
884f19e9
JN
1353 bool vdd;
1354
4904fa66
VS
1355 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1356 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1357 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1358
8c10e226
ID
1359 if (is_tc_port)
1360 intel_tc_port_lock(intel_dig_port);
1361
5a31d30b 1362 aux_wakeref = intel_display_power_get(i915, aux_domain);
f39194a7 1363 pps_wakeref = pps_lock(intel_dp);
e39b999a 1364
72c3500a
VS
1365 /*
1366 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1367 * In such cases we want to leave VDD enabled and it's up to upper layers
1368 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1369 * ourselves.
1370 */
1e0560e0 1371 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
1372
1373 /* dp aux is extremely sensitive to irq latency, hence request the
1374 * lowest possible wakeup latency and so prevent the cpu from going into
1375 * deep sleep states.
1376 */
4d4dda48 1377 cpu_latency_qos_update_request(&i915->pm_qos, 0);
b84a1cf8
RV
1378
1379 intel_dp_check_edp(intel_dp);
5eb08b69 1380
11bee43e
JB
1381 /* Try to wait for any previous AUX channel activity */
1382 for (try = 0; try < 3; try++) {
5a31d30b 1383 status = intel_uncore_read_notrace(uncore, ch_ctl);
11bee43e
JB
1384 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1385 break;
1386 msleep(1);
1387 }
39806c3f
VS
1388 /* just trace the final value */
1389 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
11bee43e
JB
1390
1391 if (try == 3) {
5a31d30b 1392 const u32 status = intel_uncore_read(uncore, ch_ctl);
02196c77 1393
81cdeca4 1394 if (status != intel_dp->aux_busy_last_status) {
eb020ca3
PB
1395 drm_WARN(&i915->drm, 1,
1396 "%s: not started (status 0x%08x)\n",
1397 intel_dp->aux.name, status);
81cdeca4 1398 intel_dp->aux_busy_last_status = status;
02196c77
MK
1399 }
1400
9ee32fea
DV
1401 ret = -EBUSY;
1402 goto out;
4f7f7b7e
CW
1403 }
1404
46a5ae9f 1405 /* Only 5 data registers! */
eb020ca3 1406 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
46a5ae9f
PZ
1407 ret = -E2BIG;
1408 goto out;
1409 }
1410
ec5b01dd 1411 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
8159c796 1412 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
8159c796
VS
1413 send_bytes,
1414 aux_clock_divider);
1415
1416 send_ctl |= aux_send_ctl_flags;
5ed12a19 1417
bc86625a
CW
1418 /* Must try at least 3 times according to DP spec */
1419 for (try = 0; try < 5; try++) {
1420 /* Load the send data into the aux channel data registers */
1421 for (i = 0; i < send_bytes; i += 4)
5a31d30b
TU
1422 intel_uncore_write(uncore,
1423 ch_data[i >> 2],
1424 intel_dp_pack_aux(send + i,
1425 send_bytes - i));
bc86625a
CW
1426
1427 /* Send the command and wait for it to complete */
5a31d30b 1428 intel_uncore_write(uncore, ch_ctl, send_ctl);
bc86625a 1429
8a29c778 1430 status = intel_dp_aux_wait_done(intel_dp);
bc86625a
CW
1431
1432 /* Clear done status and any errors */
5a31d30b
TU
1433 intel_uncore_write(uncore,
1434 ch_ctl,
1435 status |
1436 DP_AUX_CH_CTL_DONE |
1437 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1438 DP_AUX_CH_CTL_RECEIVE_ERROR);
bc86625a 1439
74ebf294
TP
1440 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1441 * 400us delay required for errors and timeouts
1442 * Timeout errors from the HW already meet this
1443 * requirement so skip to next iteration
1444 */
3975f0aa
DP
1445 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1446 continue;
1447
74ebf294
TP
1448 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1449 usleep_range(400, 500);
bc86625a 1450 continue;
74ebf294 1451 }
bc86625a 1452 if (status & DP_AUX_CH_CTL_DONE)
e058c945 1453 goto done;
bc86625a 1454 }
a4fc5ed6
KP
1455 }
1456
a4fc5ed6 1457 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
264c0247
VS
1458 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1459 intel_dp->aux.name, status);
9ee32fea
DV
1460 ret = -EBUSY;
1461 goto out;
a4fc5ed6
KP
1462 }
1463
e058c945 1464done:
a4fc5ed6
KP
1465 /* Check for timeout or receive error.
1466 * Timeouts occur when the sink is not connected
1467 */
a5b3da54 1468 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
264c0247
VS
1469 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1470 intel_dp->aux.name, status);
9ee32fea
DV
1471 ret = -EIO;
1472 goto out;
a5b3da54 1473 }
1ae8c0a5
KP
1474
1475 /* Timeouts occur when the device isn't connected, so they're
1476 * "normal" -- don't fill the kernel log with these */
a5b3da54 1477 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
264c0247
VS
1478 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1479 intel_dp->aux.name, status);
9ee32fea
DV
1480 ret = -ETIMEDOUT;
1481 goto out;
a4fc5ed6
KP
1482 }
1483
1484 /* Unload any bytes sent back from the other side */
1485 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1486 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
1487
1488 /*
1489 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1490 * We have no idea of what happened so we return -EBUSY so
1491 * drm layer takes care for the necessary retries.
1492 */
1493 if (recv_bytes == 0 || recv_bytes > 20) {
bdc6114e 1494 drm_dbg_kms(&i915->drm,
264c0247
VS
1495 "%s: Forbidden recv_bytes = %d on aux transaction\n",
1496 intel_dp->aux.name, recv_bytes);
14e01889
RV
1497 ret = -EBUSY;
1498 goto out;
1499 }
1500
a4fc5ed6
KP
1501 if (recv_bytes > recv_size)
1502 recv_bytes = recv_size;
0206e353 1503
4f7f7b7e 1504 for (i = 0; i < recv_bytes; i += 4)
5a31d30b 1505 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
a4f1289e 1506 recv + i, recv_bytes - i);
a4fc5ed6 1507
9ee32fea
DV
1508 ret = recv_bytes;
1509out:
4d4dda48 1510 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
9ee32fea 1511
884f19e9
JN
1512 if (vdd)
1513 edp_panel_vdd_off(intel_dp, false);
1514
f39194a7 1515 pps_unlock(intel_dp, pps_wakeref);
5a31d30b 1516 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
e39b999a 1517
8c10e226
ID
1518 if (is_tc_port)
1519 intel_tc_port_unlock(intel_dig_port);
1520
9ee32fea 1521 return ret;
a4fc5ed6
KP
1522}
1523
a6c8aff0
JN
1524#define BARE_ADDRESS_SIZE 3
1525#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
32078b72
VS
1526
1527static void
1528intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1529 const struct drm_dp_aux_msg *msg)
1530{
1531 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1532 txbuf[1] = (msg->address >> 8) & 0xff;
1533 txbuf[2] = msg->address & 0xff;
1534 txbuf[3] = msg->size - 1;
1535}
1536
9d1a1031
JN
1537static ssize_t
1538intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 1539{
9d1a1031 1540 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
830de422 1541 u8 txbuf[20], rxbuf[20];
9d1a1031 1542 size_t txsize, rxsize;
a4fc5ed6 1543 int ret;
a4fc5ed6 1544
32078b72 1545 intel_dp_aux_header(txbuf, msg);
46a5ae9f 1546
9d1a1031
JN
1547 switch (msg->request & ~DP_AUX_I2C_MOT) {
1548 case DP_AUX_NATIVE_WRITE:
1549 case DP_AUX_I2C_WRITE:
c1e74122 1550 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 1551 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 1552 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 1553
9d1a1031
JN
1554 if (WARN_ON(txsize > 20))
1555 return -E2BIG;
a4fc5ed6 1556
dd788090
VS
1557 WARN_ON(!msg->buffer != !msg->size);
1558
d81a67cc
ID
1559 if (msg->buffer)
1560 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 1561
f7606265 1562 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
8159c796 1563 rxbuf, rxsize, 0);
9d1a1031
JN
1564 if (ret > 0) {
1565 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 1566
a1ddefd8
JN
1567 if (ret > 1) {
1568 /* Number of bytes written in a short write. */
1569 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1570 } else {
1571 /* Return payload size. */
1572 ret = msg->size;
1573 }
9d1a1031
JN
1574 }
1575 break;
46a5ae9f 1576
9d1a1031
JN
1577 case DP_AUX_NATIVE_READ:
1578 case DP_AUX_I2C_READ:
a6c8aff0 1579 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1580 rxsize = msg->size + 1;
a4fc5ed6 1581
9d1a1031
JN
1582 if (WARN_ON(rxsize > 20))
1583 return -E2BIG;
a4fc5ed6 1584
f7606265 1585 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
8159c796 1586 rxbuf, rxsize, 0);
9d1a1031
JN
1587 if (ret > 0) {
1588 msg->reply = rxbuf[0] >> 4;
1589 /*
1590 * Assume happy day, and copy the data. The caller is
1591 * expected to check msg->reply before touching it.
1592 *
1593 * Return payload size.
1594 */
1595 ret--;
1596 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1597 }
9d1a1031
JN
1598 break;
1599
1600 default:
1601 ret = -EINVAL;
1602 break;
a4fc5ed6 1603 }
f51a44b9 1604
9d1a1031 1605 return ret;
a4fc5ed6
KP
1606}
1607
8f7ce038 1608
4904fa66 1609static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
da00bdcf 1610{
de25eb7f 1611 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1612 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1613 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1614
bdabdb63
VS
1615 switch (aux_ch) {
1616 case AUX_CH_B:
1617 case AUX_CH_C:
1618 case AUX_CH_D:
1619 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1620 default:
bdabdb63
VS
1621 MISSING_CASE(aux_ch);
1622 return DP_AUX_CH_CTL(AUX_CH_B);
da00bdcf
VS
1623 }
1624}
1625
4904fa66 1626static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
330e20ec 1627{
de25eb7f 1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1629 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1630 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1631
bdabdb63
VS
1632 switch (aux_ch) {
1633 case AUX_CH_B:
1634 case AUX_CH_C:
1635 case AUX_CH_D:
1636 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1637 default:
bdabdb63
VS
1638 MISSING_CASE(aux_ch);
1639 return DP_AUX_CH_DATA(AUX_CH_B, index);
330e20ec
VS
1640 }
1641}
1642
4904fa66 1643static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1644{
de25eb7f 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1646 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1647 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1648
bdabdb63
VS
1649 switch (aux_ch) {
1650 case AUX_CH_A:
1651 return DP_AUX_CH_CTL(aux_ch);
1652 case AUX_CH_B:
1653 case AUX_CH_C:
1654 case AUX_CH_D:
1655 return PCH_DP_AUX_CH_CTL(aux_ch);
da00bdcf 1656 default:
bdabdb63
VS
1657 MISSING_CASE(aux_ch);
1658 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1659 }
1660}
1661
4904fa66 1662static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1663{
de25eb7f 1664 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1665 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1666 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1667
bdabdb63
VS
1668 switch (aux_ch) {
1669 case AUX_CH_A:
1670 return DP_AUX_CH_DATA(aux_ch, index);
1671 case AUX_CH_B:
1672 case AUX_CH_C:
1673 case AUX_CH_D:
1674 return PCH_DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1675 default:
bdabdb63
VS
1676 MISSING_CASE(aux_ch);
1677 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1678 }
1679}
1680
4904fa66 1681static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1682{
de25eb7f 1683 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1685 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1686
bdabdb63
VS
1687 switch (aux_ch) {
1688 case AUX_CH_A:
1689 case AUX_CH_B:
1690 case AUX_CH_C:
1691 case AUX_CH_D:
bb187e93 1692 case AUX_CH_E:
bdabdb63 1693 case AUX_CH_F:
eb8de23c 1694 case AUX_CH_G:
bdabdb63 1695 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1696 default:
bdabdb63
VS
1697 MISSING_CASE(aux_ch);
1698 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1699 }
1700}
1701
4904fa66 1702static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1703{
de25eb7f 1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1706 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1707
bdabdb63
VS
1708 switch (aux_ch) {
1709 case AUX_CH_A:
1710 case AUX_CH_B:
1711 case AUX_CH_C:
1712 case AUX_CH_D:
bb187e93 1713 case AUX_CH_E:
bdabdb63 1714 case AUX_CH_F:
eb8de23c 1715 case AUX_CH_G:
bdabdb63 1716 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1717 default:
bdabdb63
VS
1718 MISSING_CASE(aux_ch);
1719 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1720 }
1721}
1722
91e939ae
VS
1723static void
1724intel_dp_aux_fini(struct intel_dp *intel_dp)
1725{
1726 kfree(intel_dp->aux.name);
1727}
1728
1729static void
1730intel_dp_aux_init(struct intel_dp *intel_dp)
330e20ec 1731{
de25eb7f 1732 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1733 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1734 struct intel_encoder *encoder = &dig_port->base;
91e939ae 1735
4904fa66
VS
1736 if (INTEL_GEN(dev_priv) >= 9) {
1737 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1738 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1739 } else if (HAS_PCH_SPLIT(dev_priv)) {
1740 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1741 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1742 } else {
1743 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1744 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1745 }
330e20ec 1746
91e939ae
VS
1747 if (INTEL_GEN(dev_priv) >= 9)
1748 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1749 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1750 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1751 else if (HAS_PCH_SPLIT(dev_priv))
1752 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1753 else
1754 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
bdabdb63 1755
91e939ae
VS
1756 if (INTEL_GEN(dev_priv) >= 9)
1757 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1758 else
1759 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
ab2c0672 1760
7a418e34 1761 drm_dp_aux_init(&intel_dp->aux);
8316f337 1762
7a418e34 1763 /* Failure to allocate our preferred name is not critical */
a87e692a
VS
1764 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
1765 aux_ch_name(dig_port->aux_ch),
bdabdb63 1766 port_name(encoder->port));
9d1a1031 1767 intel_dp->aux.transfer = intel_dp_aux_transfer;
a4fc5ed6
KP
1768}
1769
e588fa18 1770bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1771{
fc603ca7 1772 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
e588fa18 1773
fc603ca7 1774 return max_rate >= 540000;
ed63baaf
TS
1775}
1776
2edd5327
MN
1777bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1778{
1779 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1780
1781 return max_rate >= 810000;
1782}
1783
c6bb3538
DV
1784static void
1785intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1786 struct intel_crtc_state *pipe_config)
c6bb3538 1787{
2f773477 1788 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9dd4ffdf
CML
1789 const struct dp_link_dpll *divisor = NULL;
1790 int i, count = 0;
c6bb3538 1791
9beb5fea 1792 if (IS_G4X(dev_priv)) {
45101e93
VS
1793 divisor = g4x_dpll;
1794 count = ARRAY_SIZE(g4x_dpll);
6e266956 1795 } else if (HAS_PCH_SPLIT(dev_priv)) {
9dd4ffdf
CML
1796 divisor = pch_dpll;
1797 count = ARRAY_SIZE(pch_dpll);
920a14b2 1798 } else if (IS_CHERRYVIEW(dev_priv)) {
ef9348c8
CML
1799 divisor = chv_dpll;
1800 count = ARRAY_SIZE(chv_dpll);
11a914c2 1801 } else if (IS_VALLEYVIEW(dev_priv)) {
65ce4bf5
CML
1802 divisor = vlv_dpll;
1803 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1804 }
9dd4ffdf
CML
1805
1806 if (divisor && count) {
1807 for (i = 0; i < count; i++) {
840b32b7 1808 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1809 pipe_config->dpll = divisor[i].dpll;
1810 pipe_config->clock_set = true;
1811 break;
1812 }
1813 }
c6bb3538
DV
1814 }
1815}
1816
0336400e
VS
1817static void snprintf_int_array(char *str, size_t len,
1818 const int *array, int nelem)
1819{
1820 int i;
1821
1822 str[0] = '\0';
1823
1824 for (i = 0; i < nelem; i++) {
b2f505be 1825 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1826 if (r >= len)
1827 return;
1828 str += r;
1829 len -= r;
1830 }
1831}
1832
1833static void intel_dp_print_rates(struct intel_dp *intel_dp)
1834{
0336400e
VS
1835 char str[128]; /* FIXME: too big for stack? */
1836
bdbf43d7 1837 if (!drm_debug_enabled(DRM_UT_KMS))
0336400e
VS
1838 return;
1839
55cfc580
JN
1840 snprintf_int_array(str, sizeof(str),
1841 intel_dp->source_rates, intel_dp->num_source_rates);
0336400e
VS
1842 DRM_DEBUG_KMS("source rates: %s\n", str);
1843
68f357cb
JN
1844 snprintf_int_array(str, sizeof(str),
1845 intel_dp->sink_rates, intel_dp->num_sink_rates);
0336400e
VS
1846 DRM_DEBUG_KMS("sink rates: %s\n", str);
1847
975ee5fc
JN
1848 snprintf_int_array(str, sizeof(str),
1849 intel_dp->common_rates, intel_dp->num_common_rates);
94ca719e 1850 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1851}
1852
50fec21a
VS
1853int
1854intel_dp_max_link_rate(struct intel_dp *intel_dp)
1855{
50fec21a
VS
1856 int len;
1857
e6c0c64a 1858 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
50fec21a
VS
1859 if (WARN_ON(len <= 0))
1860 return 162000;
1861
975ee5fc 1862 return intel_dp->common_rates[len - 1];
50fec21a
VS
1863}
1864
ed4e9c1d
VS
1865int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1866{
8001b754
JN
1867 int i = intel_dp_rate_index(intel_dp->sink_rates,
1868 intel_dp->num_sink_rates, rate);
b5c72b20
JN
1869
1870 if (WARN_ON(i < 0))
1871 i = 0;
1872
1873 return i;
ed4e9c1d
VS
1874}
1875
94223d04 1876void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
830de422 1877 u8 *link_bw, u8 *rate_select)
04a60f9f 1878{
68f357cb
JN
1879 /* eDP 1.4 rate select method. */
1880 if (intel_dp->use_rate_select) {
04a60f9f
VS
1881 *link_bw = 0;
1882 *rate_select =
1883 intel_dp_rate_select(intel_dp, port_clock);
1884 } else {
1885 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1886 *rate_select = 0;
1887 }
1888}
1889
240999cf 1890static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
a4a15777
MN
1891 const struct intel_crtc_state *pipe_config)
1892{
1893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1894
9770f220
MTP
1895 /* On TGL, FEC is supported on all Pipes */
1896 if (INTEL_GEN(dev_priv) >= 12)
1897 return true;
1898
1899 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1900 return true;
1901
1902 return false;
240999cf
AS
1903}
1904
1905static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1906 const struct intel_crtc_state *pipe_config)
1907{
1908 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1909 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1910}
1911
a4a15777 1912static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
deaaff49 1913 const struct intel_crtc_state *crtc_state)
a4a15777 1914{
deaaff49
JN
1915 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1916
1917 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
240999cf
AS
1918 return false;
1919
deaaff49 1920 return intel_dsc_source_support(encoder, crtc_state) &&
a4a15777
MN
1921 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1922}
1923
f580bea9
JN
1924static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1925 struct intel_crtc_state *pipe_config)
f9bb705e 1926{
de25eb7f 1927 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ef32659a 1928 struct intel_connector *intel_connector = intel_dp->attached_connector;
f9bb705e
MK
1929 int bpp, bpc;
1930
1931 bpp = pipe_config->pipe_bpp;
1932 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1933
1934 if (bpc > 0)
1935 bpp = min(bpp, 3*bpc);
1936
ef32659a
JN
1937 if (intel_dp_is_edp(intel_dp)) {
1938 /* Get bpp from vbt only for panels that dont have bpp in edid */
1939 if (intel_connector->base.display_info.bpc == 0 &&
1940 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
bdc6114e
WK
1941 drm_dbg_kms(&dev_priv->drm,
1942 "clamping bpp for eDP panel to BIOS-provided %i\n",
1943 dev_priv->vbt.edp.bpp);
ef32659a
JN
1944 bpp = dev_priv->vbt.edp.bpp;
1945 }
1946 }
1947
f9bb705e
MK
1948 return bpp;
1949}
1950
a4971453 1951/* Adjust link config limits based on compliance test requests. */
f1477219 1952void
a4971453
JN
1953intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1954 struct intel_crtc_state *pipe_config,
1955 struct link_config_limits *limits)
1956{
1957 /* For DP Compliance we override the computed bpp for the pipe */
1958 if (intel_dp->compliance.test_data.bpc != 0) {
1959 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1960
1961 limits->min_bpp = limits->max_bpp = bpp;
1962 pipe_config->dither_force_disable = bpp == 6 * 3;
1963
1964 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1965 }
1966
1967 /* Use values requested by Compliance Test Request */
1968 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1969 int index;
1970
1971 /* Validate the compliance test data since max values
1972 * might have changed due to link train fallback.
1973 */
1974 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1975 intel_dp->compliance.test_lane_count)) {
1976 index = intel_dp_rate_index(intel_dp->common_rates,
1977 intel_dp->num_common_rates,
1978 intel_dp->compliance.test_link_rate);
1979 if (index >= 0)
1980 limits->min_clock = limits->max_clock = index;
1981 limits->min_lane_count = limits->max_lane_count =
1982 intel_dp->compliance.test_lane_count;
1983 }
1984 }
1985}
1986
16668f48
GM
1987static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1988{
1989 /*
1990 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1991 * format of the number of bytes per pixel will be half the number
1992 * of bytes of RGB pixel.
1993 */
1994 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1995 bpp /= 2;
1996
1997 return bpp;
1998}
1999
3acd115d 2000/* Optimize link config in order: max bpp, min clock, min lanes */
204474a6 2001static int
3acd115d
JN
2002intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2003 struct intel_crtc_state *pipe_config,
2004 const struct link_config_limits *limits)
2005{
1326a92c 2006 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
3acd115d
JN
2007 int bpp, clock, lane_count;
2008 int mode_rate, link_clock, link_avail;
2009
2010 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
ddb3d12a
VS
2011 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
2012
3acd115d 2013 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
ddb3d12a 2014 output_bpp);
3acd115d
JN
2015
2016 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2017 for (lane_count = limits->min_lane_count;
2018 lane_count <= limits->max_lane_count;
2019 lane_count <<= 1) {
2020 link_clock = intel_dp->common_rates[clock];
2021 link_avail = intel_dp_max_data_rate(link_clock,
2022 lane_count);
2023
2024 if (mode_rate <= link_avail) {
2025 pipe_config->lane_count = lane_count;
2026 pipe_config->pipe_bpp = bpp;
2027 pipe_config->port_clock = link_clock;
2028
204474a6 2029 return 0;
3acd115d
JN
2030 }
2031 }
2032 }
2033 }
2034
204474a6 2035 return -EINVAL;
3acd115d
JN
2036}
2037
a4a15777
MN
2038static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2039{
2040 int i, num_bpc;
2041 u8 dsc_bpc[3] = {0};
2042
2043 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2044 dsc_bpc);
2045 for (i = 0; i < num_bpc; i++) {
2046 if (dsc_max_bpc >= dsc_bpc[i])
2047 return dsc_bpc[i] * 3;
2048 }
2049
2050 return 0;
2051}
2052
7a7b5be9
JN
2053#define DSC_SUPPORTED_VERSION_MIN 1
2054
2055static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2056 struct intel_crtc_state *crtc_state)
2057{
b7d02c3a 2058 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
7a7b5be9
JN
2059 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2060 u8 line_buf_depth;
2061 int ret;
2062
2063 ret = intel_dsc_compute_params(encoder, crtc_state);
2064 if (ret)
2065 return ret;
2066
c42c38ec
JN
2067 /*
2068 * Slice Height of 8 works for all currently available panels. So start
2069 * with that if pic_height is an integral multiple of 8. Eventually add
2070 * logic to try multiple slice heights.
2071 */
2072 if (vdsc_cfg->pic_height % 8 == 0)
2073 vdsc_cfg->slice_height = 8;
2074 else if (vdsc_cfg->pic_height % 4 == 0)
2075 vdsc_cfg->slice_height = 4;
2076 else
2077 vdsc_cfg->slice_height = 2;
2078
7a7b5be9
JN
2079 vdsc_cfg->dsc_version_major =
2080 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2081 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2082 vdsc_cfg->dsc_version_minor =
2083 min(DSC_SUPPORTED_VERSION_MIN,
2084 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2085 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2086
2087 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2088 DP_DSC_RGB;
2089
2090 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2091 if (!line_buf_depth) {
2092 DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
2093 return -EINVAL;
2094 }
2095
2096 if (vdsc_cfg->dsc_version_minor == 2)
2097 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2098 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2099 else
2100 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2101 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2102
2103 vdsc_cfg->block_pred_enable =
2104 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2105 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2106
2107 return drm_dsc_compute_rc_parameters(vdsc_cfg);
2108}
2109
204474a6
LP
2110static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2111 struct intel_crtc_state *pipe_config,
2112 struct drm_connector_state *conn_state,
2113 struct link_config_limits *limits)
a4a15777
MN
2114{
2115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2116 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1326a92c 2117 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
a4a15777
MN
2118 u8 dsc_max_bpc;
2119 int pipe_bpp;
204474a6 2120 int ret;
a4a15777 2121
6fd3134a
VS
2122 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2123 intel_dp_supports_fec(intel_dp, pipe_config);
2124
a4a15777 2125 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
204474a6 2126 return -EINVAL;
a4a15777 2127
cee508a0
AS
2128 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2129 if (INTEL_GEN(dev_priv) >= 12)
2130 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2131 else
2132 dsc_max_bpc = min_t(u8, 10,
2133 conn_state->max_requested_bpc);
a4a15777
MN
2134
2135 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
cee508a0
AS
2136
2137 /* Min Input BPC for ICL+ is 8 */
2138 if (pipe_bpp < 8 * 3) {
bdc6114e
WK
2139 drm_dbg_kms(&dev_priv->drm,
2140 "No DSC support for less than 8bpc\n");
204474a6 2141 return -EINVAL;
a4a15777
MN
2142 }
2143
2144 /*
2145 * For now enable DSC for max bpp, max link rate, max lane count.
2146 * Optimize this later for the minimum possible link rate/lane count
2147 * with DSC enabled for the requested mode.
2148 */
2149 pipe_config->pipe_bpp = pipe_bpp;
2150 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2151 pipe_config->lane_count = limits->max_lane_count;
2152
2153 if (intel_dp_is_edp(intel_dp)) {
010663a6 2154 pipe_config->dsc.compressed_bpp =
a4a15777
MN
2155 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2156 pipe_config->pipe_bpp);
010663a6 2157 pipe_config->dsc.slice_count =
a4a15777
MN
2158 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2159 true);
2160 } else {
2161 u16 dsc_max_output_bpp;
2162 u8 dsc_dp_slice_count;
2163
2164 dsc_max_output_bpp =
45d3c5cd
MR
2165 intel_dp_dsc_get_output_bpp(dev_priv,
2166 pipe_config->port_clock,
a4a15777
MN
2167 pipe_config->lane_count,
2168 adjusted_mode->crtc_clock,
2169 adjusted_mode->crtc_hdisplay);
2170 dsc_dp_slice_count =
2171 intel_dp_dsc_get_slice_count(intel_dp,
2172 adjusted_mode->crtc_clock,
2173 adjusted_mode->crtc_hdisplay);
2174 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
bdc6114e
WK
2175 drm_dbg_kms(&dev_priv->drm,
2176 "Compressed BPP/Slice Count not supported\n");
204474a6 2177 return -EINVAL;
a4a15777 2178 }
010663a6 2179 pipe_config->dsc.compressed_bpp = min_t(u16,
a4a15777
MN
2180 dsc_max_output_bpp >> 4,
2181 pipe_config->pipe_bpp);
010663a6 2182 pipe_config->dsc.slice_count = dsc_dp_slice_count;
a4a15777
MN
2183 }
2184 /*
2185 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2186 * is greater than the maximum Cdclock and if slice count is even
2187 * then we need to use 2 VDSC instances.
2188 */
2189 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
010663a6
JN
2190 if (pipe_config->dsc.slice_count > 1) {
2191 pipe_config->dsc.dsc_split = true;
a4a15777 2192 } else {
bdc6114e
WK
2193 drm_dbg_kms(&dev_priv->drm,
2194 "Cannot split stream to use 2 VDSC instances\n");
204474a6 2195 return -EINVAL;
a4a15777
MN
2196 }
2197 }
204474a6 2198
7a7b5be9 2199 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
204474a6 2200 if (ret < 0) {
bdc6114e
WK
2201 drm_dbg_kms(&dev_priv->drm,
2202 "Cannot compute valid DSC parameters for Input Bpp = %d "
2203 "Compressed BPP = %d\n",
2204 pipe_config->pipe_bpp,
2205 pipe_config->dsc.compressed_bpp);
204474a6 2206 return ret;
168243c1 2207 }
204474a6 2208
010663a6 2209 pipe_config->dsc.compression_enable = true;
bdc6114e
WK
2210 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2211 "Compressed Bpp = %d Slice Count = %d\n",
2212 pipe_config->pipe_bpp,
2213 pipe_config->dsc.compressed_bpp,
2214 pipe_config->dsc.slice_count);
a4a15777 2215
204474a6 2216 return 0;
a4a15777
MN
2217}
2218
4e2056e0
VS
2219int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2220{
2221 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2222 return 6 * 3;
2223 else
2224 return 8 * 3;
2225}
2226
204474a6 2227static int
981a63eb 2228intel_dp_compute_link_config(struct intel_encoder *encoder,
a4a15777
MN
2229 struct intel_crtc_state *pipe_config,
2230 struct drm_connector_state *conn_state)
a4fc5ed6 2231{
1326a92c 2232 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
b7d02c3a 2233 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
7c2781e4 2234 struct link_config_limits limits;
94ca719e 2235 int common_len;
204474a6 2236 int ret;
7c2781e4 2237
975ee5fc 2238 common_len = intel_dp_common_len_rate_limit(intel_dp,
e6c0c64a 2239 intel_dp->max_link_rate);
a8f3ef61
SJ
2240
2241 /* No common link rates between source and sink */
3a47ae20 2242 drm_WARN_ON(encoder->base.dev, common_len <= 0);
a8f3ef61 2243
7c2781e4
JN
2244 limits.min_clock = 0;
2245 limits.max_clock = common_len - 1;
2246
2247 limits.min_lane_count = 1;
2248 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2249
4e2056e0 2250 limits.min_bpp = intel_dp_min_bpp(pipe_config);
7c2781e4 2251 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
a4fc5ed6 2252
f11cb1c1 2253 if (intel_dp_is_edp(intel_dp)) {
344c5bbc
JN
2254 /*
2255 * Use the maximum clock and number of lanes the eDP panel
f11cb1c1
JN
2256 * advertizes being capable of. The panels are generally
2257 * designed to support only a single clock and lane
2258 * configuration, and typically these values correspond to the
2259 * native resolution of the panel.
344c5bbc 2260 */
7c2781e4
JN
2261 limits.min_lane_count = limits.max_lane_count;
2262 limits.min_clock = limits.max_clock;
7984211e 2263 }
657445fe 2264
a4971453
JN
2265 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2266
7c2781e4
JN
2267 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2268 "max rate %d max bpp %d pixel clock %iKHz\n",
2269 limits.max_lane_count,
2270 intel_dp->common_rates[limits.max_clock],
2271 limits.max_bpp, adjusted_mode->crtc_clock);
2272
f11cb1c1
JN
2273 /*
2274 * Optimize for slow and wide. This is the place to add alternative
2275 * optimization policy.
2276 */
2277 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
a4a15777
MN
2278
2279 /* enable compression if the mode doesn't fit available BW */
e845f099 2280 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
204474a6
LP
2281 if (ret || intel_dp->force_dsc_en) {
2282 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2283 conn_state, &limits);
2284 if (ret < 0)
2285 return ret;
7769db58 2286 }
981a63eb 2287
010663a6 2288 if (pipe_config->dsc.compression_enable) {
a4a15777
MN
2289 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2290 pipe_config->lane_count, pipe_config->port_clock,
2291 pipe_config->pipe_bpp,
010663a6 2292 pipe_config->dsc.compressed_bpp);
a4a15777
MN
2293
2294 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2295 intel_dp_link_required(adjusted_mode->crtc_clock,
010663a6 2296 pipe_config->dsc.compressed_bpp),
a4a15777
MN
2297 intel_dp_max_data_rate(pipe_config->port_clock,
2298 pipe_config->lane_count));
2299 } else {
2300 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2301 pipe_config->lane_count, pipe_config->port_clock,
2302 pipe_config->pipe_bpp);
2303
2304 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2305 intel_dp_link_required(adjusted_mode->crtc_clock,
2306 pipe_config->pipe_bpp),
2307 intel_dp_max_data_rate(pipe_config->port_clock,
2308 pipe_config->lane_count));
2309 }
204474a6 2310 return 0;
981a63eb
JN
2311}
2312
8e9d645c
GM
2313static int
2314intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2315 struct drm_connector *connector,
2316 struct intel_crtc_state *crtc_state)
2317{
2318 const struct drm_display_info *info = &connector->display_info;
2319 const struct drm_display_mode *adjusted_mode =
1326a92c 2320 &crtc_state->hw.adjusted_mode;
2225f3c6 2321 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8e9d645c
GM
2322 int ret;
2323
2324 if (!drm_mode_is_420_only(info, adjusted_mode) ||
2325 !intel_dp_get_colorimetry_status(intel_dp) ||
2326 !connector->ycbcr_420_allowed)
2327 return 0;
2328
2329 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2330
2331 /* YCBCR 420 output conversion needs a scaler */
2332 ret = skl_update_scaler_crtc(crtc_state);
2333 if (ret) {
2334 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2335 return ret;
2336 }
2337
2338 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2339
2340 return 0;
2341}
2342
37aa52bf
VS
2343bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2344 const struct drm_connector_state *conn_state)
2345{
2346 const struct intel_digital_connector_state *intel_conn_state =
2347 to_intel_digital_connector_state(conn_state);
2348 const struct drm_display_mode *adjusted_mode =
1326a92c 2349 &crtc_state->hw.adjusted_mode;
37aa52bf 2350
cae154fc
VS
2351 /*
2352 * Our YCbCr output is always limited range.
2353 * crtc_state->limited_color_range only applies to RGB,
2354 * and it must never be set for YCbCr or we risk setting
2355 * some conflicting bits in PIPECONF which will mess up
2356 * the colors on the monitor.
2357 */
2358 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2359 return false;
2360
37aa52bf
VS
2361 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2362 /*
2363 * See:
2364 * CEA-861-E - 5.1 Default Encoding Parameters
2365 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2366 */
2367 return crtc_state->pipe_bpp != 18 &&
2368 drm_default_rgb_quant_range(adjusted_mode) ==
2369 HDMI_QUANTIZATION_RANGE_LIMITED;
2370 } else {
2371 return intel_conn_state->broadcast_rgb ==
2372 INTEL_BROADCAST_RGB_LIMITED;
2373 }
2374}
2375
07130981
KV
2376static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2377 enum port port)
2378{
2379 if (IS_G4X(dev_priv))
2380 return false;
2381 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2382 return false;
2383
2384 return true;
2385}
2386
204474a6 2387int
981a63eb
JN
2388intel_dp_compute_config(struct intel_encoder *encoder,
2389 struct intel_crtc_state *pipe_config,
2390 struct drm_connector_state *conn_state)
2391{
2392 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1326a92c 2393 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
b7d02c3a
VS
2394 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2395 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
981a63eb 2396 enum port port = encoder->port;
2225f3c6 2397 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
981a63eb
JN
2398 struct intel_connector *intel_connector = intel_dp->attached_connector;
2399 struct intel_digital_connector_state *intel_conn_state =
2400 to_intel_digital_connector_state(conn_state);
0883ce81 2401 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
53ca2edc 2402 DP_DPCD_QUIRK_CONSTANT_N);
8e9d645c 2403 int ret = 0, output_bpp;
981a63eb
JN
2404
2405 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2406 pipe_config->has_pch_encoder = true;
2407
d9facae6 2408 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
0c06fa15 2409
668b6c17
SS
2410 if (lspcon->active)
2411 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
8e9d645c
GM
2412 else
2413 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2414 pipe_config);
2415
2416 if (ret)
2417 return ret;
668b6c17 2418
981a63eb 2419 pipe_config->has_drrs = false;
07130981 2420 if (!intel_dp_port_has_audio(dev_priv, port))
981a63eb
JN
2421 pipe_config->has_audio = false;
2422 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2423 pipe_config->has_audio = intel_dp->has_audio;
2424 else
2425 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2426
2427 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
d93fa1b4
JN
2428 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2429 adjusted_mode);
981a63eb
JN
2430
2431 if (INTEL_GEN(dev_priv) >= 9) {
981a63eb
JN
2432 ret = skl_update_scaler_crtc(pipe_config);
2433 if (ret)
2434 return ret;
2435 }
2436
b2ae318a 2437 if (HAS_GMCH(dev_priv))
981a63eb
JN
2438 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2439 conn_state->scaling_mode);
2440 else
2441 intel_pch_panel_fitting(intel_crtc, pipe_config,
2442 conn_state->scaling_mode);
2443 }
2444
e4dd27aa 2445 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
204474a6 2446 return -EINVAL;
e4dd27aa 2447
b2ae318a 2448 if (HAS_GMCH(dev_priv) &&
981a63eb 2449 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
204474a6 2450 return -EINVAL;
981a63eb
JN
2451
2452 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
204474a6 2453 return -EINVAL;
981a63eb 2454
98c93394
VS
2455 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2456 return -EINVAL;
2457
204474a6
LP
2458 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2459 if (ret < 0)
2460 return ret;
981a63eb 2461
37aa52bf
VS
2462 pipe_config->limited_color_range =
2463 intel_dp_limited_color_range(pipe_config, conn_state);
55bc60db 2464
010663a6
JN
2465 if (pipe_config->dsc.compression_enable)
2466 output_bpp = pipe_config->dsc.compressed_bpp;
a4a15777 2467 else
16668f48 2468 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
aefa95ba
VS
2469
2470 intel_link_compute_m_n(output_bpp,
2471 pipe_config->lane_count,
2472 adjusted_mode->crtc_clock,
2473 pipe_config->port_clock,
2474 &pipe_config->dp_m_n,
ed06efb8 2475 constant_n, pipe_config->fec_enable);
9d1a455b 2476
439d7ac0 2477 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 2478 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 2479 pipe_config->has_drrs = true;
aefa95ba 2480 intel_link_compute_m_n(output_bpp,
981a63eb
JN
2481 pipe_config->lane_count,
2482 intel_connector->panel.downclock_mode->clock,
2483 pipe_config->port_clock,
2484 &pipe_config->dp_m2_n2,
ed06efb8 2485 constant_n, pipe_config->fec_enable);
439d7ac0
PB
2486 }
2487
4f8036a2 2488 if (!HAS_DDI(dev_priv))
840b32b7 2489 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 2490
4d90f2d5
VS
2491 intel_psr_compute_config(intel_dp, pipe_config);
2492
204474a6 2493 return 0;
a4fc5ed6
KP
2494}
2495
901c2daf 2496void intel_dp_set_link_params(struct intel_dp *intel_dp,
830de422 2497 int link_rate, u8 lane_count,
dfa10480 2498 bool link_mst)
901c2daf 2499{
edb2e530 2500 intel_dp->link_trained = false;
dfa10480
ACO
2501 intel_dp->link_rate = link_rate;
2502 intel_dp->lane_count = lane_count;
2503 intel_dp->link_mst = link_mst;
901c2daf
VS
2504}
2505
85cb48a1 2506static void intel_dp_prepare(struct intel_encoder *encoder,
5f88a9c6 2507 const struct intel_crtc_state *pipe_config)
a4fc5ed6 2508{
2f773477 2509 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 2510 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
8f4f2797 2511 enum port port = encoder->port;
2225f3c6 2512 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
1326a92c 2513 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
a4fc5ed6 2514
dfa10480
ACO
2515 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2516 pipe_config->lane_count,
2517 intel_crtc_has_type(pipe_config,
2518 INTEL_OUTPUT_DP_MST));
901c2daf 2519
417e822d 2520 /*
1a2eb460 2521 * There are four kinds of DP registers:
417e822d
KP
2522 *
2523 * IBX PCH
1a2eb460
KP
2524 * SNB CPU
2525 * IVB CPU
417e822d
KP
2526 * CPT PCH
2527 *
2528 * IBX PCH and CPU are the same for almost everything,
2529 * except that the CPU DP PLL is configured in this
2530 * register
2531 *
2532 * CPT PCH is quite different, having many bits moved
2533 * to the TRANS_DP_CTL register instead. That
9eae5e27 2534 * configuration happens (oddly) in ilk_pch_enable
417e822d 2535 */
9c9e7927 2536
417e822d
KP
2537 /* Preserve the BIOS-computed detected bit. This is
2538 * supposed to be read-only.
2539 */
b4e33881 2540 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 2541
417e822d 2542 /* Handle DP bits in common between all three register formats */
417e822d 2543 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
85cb48a1 2544 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
a4fc5ed6 2545
417e822d 2546 /* Split out the IBX/CPU vs CPT settings */
32f9d658 2547
b752e995 2548 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460
KP
2549 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2550 intel_dp->DP |= DP_SYNC_HS_HIGH;
2551 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2552 intel_dp->DP |= DP_SYNC_VS_HIGH;
2553 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2554
6aba5b6c 2555 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
2556 intel_dp->DP |= DP_ENHANCED_FRAMING;
2557
59b74c49 2558 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
6e266956 2559 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
e3ef4479
VS
2560 u32 trans_dp;
2561
39e5fa88 2562 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479 2563
b4e33881 2564 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
e3ef4479
VS
2565 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2566 trans_dp |= TRANS_DP_ENH_FRAMING;
2567 else
2568 trans_dp &= ~TRANS_DP_ENH_FRAMING;
b4e33881 2569 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 2570 } else {
c99f53f7 2571 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
0f2a2a75 2572 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
2573
2574 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2575 intel_dp->DP |= DP_SYNC_HS_HIGH;
2576 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2577 intel_dp->DP |= DP_SYNC_VS_HIGH;
2578 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2579
6aba5b6c 2580 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
2581 intel_dp->DP |= DP_ENHANCED_FRAMING;
2582
920a14b2 2583 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
2584 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2585 else
2586 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
32f9d658 2587 }
a4fc5ed6
KP
2588}
2589
ffd6749d
PZ
2590#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2591#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 2592
1a5ef5b7
PZ
2593#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2594#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 2595
ffd6749d
PZ
2596#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2597#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 2598
46bd8383 2599static void intel_pps_verify_state(struct intel_dp *intel_dp);
de9c1b6b 2600
4be73780 2601static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
2602 u32 mask,
2603 u32 value)
bd943159 2604{
de25eb7f 2605 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0f59a00 2606 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 2607
e39b999a
VS
2608 lockdep_assert_held(&dev_priv->pps_mutex);
2609
46bd8383 2610 intel_pps_verify_state(intel_dp);
de9c1b6b 2611
bf13e81b
JN
2612 pp_stat_reg = _pp_stat_reg(intel_dp);
2613 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 2614
bdc6114e
WK
2615 drm_dbg_kms(&dev_priv->drm,
2616 "mask %08x value %08x status %08x control %08x\n",
2617 mask, value,
b4e33881
JN
2618 intel_de_read(dev_priv, pp_stat_reg),
2619 intel_de_read(dev_priv, pp_ctrl_reg));
32ce697c 2620
4cb3b44d
DCS
2621 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2622 mask, value, 5000))
bdc6114e
WK
2623 drm_err(&dev_priv->drm,
2624 "Panel status timeout: status %08x control %08x\n",
b4e33881
JN
2625 intel_de_read(dev_priv, pp_stat_reg),
2626 intel_de_read(dev_priv, pp_ctrl_reg));
54c136d4 2627
bdc6114e 2628 drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
99ea7127 2629}
32ce697c 2630
4be73780 2631static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
2632{
2633 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 2634 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
2635}
2636
4be73780 2637static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
2638{
2639 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 2640 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
2641}
2642
4be73780 2643static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 2644{
d28d4731
AK
2645 ktime_t panel_power_on_time;
2646 s64 panel_power_off_duration;
2647
99ea7127 2648 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 2649
d28d4731
AK
2650 /* take the difference of currrent time and panel power off time
2651 * and then make panel wait for t11_t12 if needed. */
2652 panel_power_on_time = ktime_get_boottime();
2653 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2654
dce56b3c
PZ
2655 /* When we disable the VDD override bit last we have to do the manual
2656 * wait. */
d28d4731
AK
2657 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2658 wait_remaining_ms_from_jiffies(jiffies,
2659 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 2660
4be73780 2661 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
2662}
2663
4be73780 2664static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
2665{
2666 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2667 intel_dp->backlight_on_delay);
2668}
2669
4be73780 2670static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
2671{
2672 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2673 intel_dp->backlight_off_delay);
2674}
99ea7127 2675
832dd3c1
KP
2676/* Read the current pp_control value, unlocking the register if it
2677 * is locked
2678 */
2679
9eae5e27 2680static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 2681{
de25eb7f 2682 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
453c5420 2683 u32 control;
832dd3c1 2684
e39b999a
VS
2685 lockdep_assert_held(&dev_priv->pps_mutex);
2686
b4e33881 2687 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
eb020ca3
PB
2688 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
2689 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
b0a08bec
VK
2690 control &= ~PANEL_UNLOCK_MASK;
2691 control |= PANEL_UNLOCK_REGS;
2692 }
832dd3c1 2693 return control;
bd943159
KP
2694}
2695
951468f3
VS
2696/*
2697 * Must be paired with edp_panel_vdd_off().
2698 * Must hold pps_mutex around the whole on/off sequence.
2699 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2700 */
1e0560e0 2701static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 2702{
de25eb7f 2703 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4e6e1a54 2704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5d613501 2705 u32 pp;
f0f59a00 2706 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 2707 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 2708
e39b999a
VS
2709 lockdep_assert_held(&dev_priv->pps_mutex);
2710
1853a9da 2711 if (!intel_dp_is_edp(intel_dp))
adddaaf4 2712 return false;
bd943159 2713
2c623c11 2714 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 2715 intel_dp->want_panel_vdd = true;
99ea7127 2716
4be73780 2717 if (edp_have_panel_vdd(intel_dp))
adddaaf4 2718 return need_to_disable;
b0665d57 2719
337837ac
ID
2720 intel_display_power_get(dev_priv,
2721 intel_aux_power_domain(intel_dig_port));
e9cb81a2 2722
bdc6114e
WK
2723 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
2724 intel_dig_port->base.base.base.id,
2725 intel_dig_port->base.base.name);
bd943159 2726
4be73780
DV
2727 if (!edp_have_panel_power(intel_dp))
2728 wait_panel_power_cycle(intel_dp);
99ea7127 2729
9eae5e27 2730 pp = ilk_get_pp_control(intel_dp);
5d613501 2731 pp |= EDP_FORCE_VDD;
ebf33b18 2732
bf13e81b
JN
2733 pp_stat_reg = _pp_stat_reg(intel_dp);
2734 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2735
b4e33881
JN
2736 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2737 intel_de_posting_read(dev_priv, pp_ctrl_reg);
bdc6114e 2738 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
b4e33881
JN
2739 intel_de_read(dev_priv, pp_stat_reg),
2740 intel_de_read(dev_priv, pp_ctrl_reg));
ebf33b18
KP
2741 /*
2742 * If the panel wasn't on, delay before accessing aux channel
2743 */
4be73780 2744 if (!edp_have_panel_power(intel_dp)) {
bdc6114e
WK
2745 drm_dbg_kms(&dev_priv->drm,
2746 "[ENCODER:%d:%s] panel power wasn't enabled\n",
2747 intel_dig_port->base.base.base.id,
2748 intel_dig_port->base.base.name);
f01eca2e 2749 msleep(intel_dp->panel_power_up_delay);
f01eca2e 2750 }
adddaaf4
JN
2751
2752 return need_to_disable;
2753}
2754
951468f3
VS
2755/*
2756 * Must be paired with intel_edp_panel_vdd_off() or
2757 * intel_edp_panel_off().
2758 * Nested calls to these functions are not allowed since
2759 * we drop the lock. Caller must use some higher level
2760 * locking to prevent nested calls from other threads.
2761 */
b80d6c78 2762void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 2763{
69d93820 2764 intel_wakeref_t wakeref;
c695b6b6 2765 bool vdd;
adddaaf4 2766
1853a9da 2767 if (!intel_dp_is_edp(intel_dp))
c695b6b6
VS
2768 return;
2769
69d93820
CW
2770 vdd = false;
2771 with_pps_lock(intel_dp, wakeref)
2772 vdd = edp_panel_vdd_on(intel_dp);
66a990dd
VS
2773 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
2774 dp_to_dig_port(intel_dp)->base.base.base.id,
2775 dp_to_dig_port(intel_dp)->base.base.name);
5d613501
JB
2776}
2777
4be73780 2778static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 2779{
de25eb7f 2780 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
be2c9196
VS
2781 struct intel_digital_port *intel_dig_port =
2782 dp_to_dig_port(intel_dp);
5d613501 2783 u32 pp;
f0f59a00 2784 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 2785
e39b999a 2786 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 2787
eb020ca3 2788 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
4e6e1a54 2789
15e899a0 2790 if (!edp_have_panel_vdd(intel_dp))
be2c9196 2791 return;
b0665d57 2792
bdc6114e
WK
2793 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
2794 intel_dig_port->base.base.base.id,
2795 intel_dig_port->base.base.name);
bd943159 2796
9eae5e27 2797 pp = ilk_get_pp_control(intel_dp);
be2c9196 2798 pp &= ~EDP_FORCE_VDD;
453c5420 2799
be2c9196
VS
2800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2801 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 2802
b4e33881
JN
2803 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2804 intel_de_posting_read(dev_priv, pp_ctrl_reg);
90791a5c 2805
be2c9196 2806 /* Make sure sequencer is idle before allowing subsequent activity */
bdc6114e 2807 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
b4e33881
JN
2808 intel_de_read(dev_priv, pp_stat_reg),
2809 intel_de_read(dev_priv, pp_ctrl_reg));
e9cb81a2 2810
5a162e22 2811 if ((pp & PANEL_POWER_ON) == 0)
d28d4731 2812 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 2813
0e6e0be4
CW
2814 intel_display_power_put_unchecked(dev_priv,
2815 intel_aux_power_domain(intel_dig_port));
bd943159 2816}
5d613501 2817
4be73780 2818static void edp_panel_vdd_work(struct work_struct *__work)
bd943159 2819{
69d93820
CW
2820 struct intel_dp *intel_dp =
2821 container_of(to_delayed_work(__work),
2822 struct intel_dp, panel_vdd_work);
2823 intel_wakeref_t wakeref;
bd943159 2824
69d93820
CW
2825 with_pps_lock(intel_dp, wakeref) {
2826 if (!intel_dp->want_panel_vdd)
2827 edp_panel_vdd_off_sync(intel_dp);
2828 }
bd943159
KP
2829}
2830
aba86890
ID
2831static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2832{
2833 unsigned long delay;
2834
2835 /*
2836 * Queue the timer to fire a long time from now (relative to the power
2837 * down delay) to keep the panel power up across a sequence of
2838 * operations.
2839 */
2840 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2841 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2842}
2843
951468f3
VS
2844/*
2845 * Must be paired with edp_panel_vdd_on().
2846 * Must hold pps_mutex around the whole on/off sequence.
2847 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2848 */
4be73780 2849static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2850{
de25eb7f 2851 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
e39b999a
VS
2852
2853 lockdep_assert_held(&dev_priv->pps_mutex);
2854
1853a9da 2855 if (!intel_dp_is_edp(intel_dp))
97af61f5 2856 return;
5d613501 2857
66a990dd
VS
2858 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
2859 dp_to_dig_port(intel_dp)->base.base.base.id,
2860 dp_to_dig_port(intel_dp)->base.base.name);
f2e8b18a 2861
bd943159
KP
2862 intel_dp->want_panel_vdd = false;
2863
aba86890 2864 if (sync)
4be73780 2865 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2866 else
2867 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2868}
2869
9f0fb5be 2870static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2871{
de25eb7f 2872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
99ea7127 2873 u32 pp;
f0f59a00 2874 i915_reg_t pp_ctrl_reg;
9934c132 2875
9f0fb5be
VS
2876 lockdep_assert_held(&dev_priv->pps_mutex);
2877
1853a9da 2878 if (!intel_dp_is_edp(intel_dp))
bd943159 2879 return;
99ea7127 2880
bdc6114e
WK
2881 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
2882 dp_to_dig_port(intel_dp)->base.base.base.id,
2883 dp_to_dig_port(intel_dp)->base.base.name);
e39b999a 2884
eb020ca3
PB
2885 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
2886 "[ENCODER:%d:%s] panel power already on\n",
2887 dp_to_dig_port(intel_dp)->base.base.base.id,
2888 dp_to_dig_port(intel_dp)->base.base.name))
9f0fb5be 2889 return;
9934c132 2890
4be73780 2891 wait_panel_power_cycle(intel_dp);
37c6c9b0 2892
bf13e81b 2893 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
9eae5e27 2894 pp = ilk_get_pp_control(intel_dp);
cf819eff 2895 if (IS_GEN(dev_priv, 5)) {
05ce1a49
KP
2896 /* ILK workaround: disable reset around power sequence */
2897 pp &= ~PANEL_POWER_RESET;
b4e33881
JN
2898 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2899 intel_de_posting_read(dev_priv, pp_ctrl_reg);
05ce1a49 2900 }
37c6c9b0 2901
5a162e22 2902 pp |= PANEL_POWER_ON;
cf819eff 2903 if (!IS_GEN(dev_priv, 5))
99ea7127
KP
2904 pp |= PANEL_POWER_RESET;
2905
b4e33881
JN
2906 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2907 intel_de_posting_read(dev_priv, pp_ctrl_reg);
9934c132 2908
4be73780 2909 wait_panel_on(intel_dp);
dce56b3c 2910 intel_dp->last_power_on = jiffies;
9934c132 2911
cf819eff 2912 if (IS_GEN(dev_priv, 5)) {
05ce1a49 2913 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
b4e33881
JN
2914 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2915 intel_de_posting_read(dev_priv, pp_ctrl_reg);
05ce1a49 2916 }
9f0fb5be 2917}
e39b999a 2918
9f0fb5be
VS
2919void intel_edp_panel_on(struct intel_dp *intel_dp)
2920{
69d93820
CW
2921 intel_wakeref_t wakeref;
2922
1853a9da 2923 if (!intel_dp_is_edp(intel_dp))
9f0fb5be
VS
2924 return;
2925
69d93820
CW
2926 with_pps_lock(intel_dp, wakeref)
2927 edp_panel_on(intel_dp);
9934c132
JB
2928}
2929
9f0fb5be
VS
2930
2931static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2932{
de25eb7f 2933 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 2934 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
99ea7127 2935 u32 pp;
f0f59a00 2936 i915_reg_t pp_ctrl_reg;
9934c132 2937
9f0fb5be
VS
2938 lockdep_assert_held(&dev_priv->pps_mutex);
2939
1853a9da 2940 if (!intel_dp_is_edp(intel_dp))
97af61f5 2941 return;
37c6c9b0 2942
bdc6114e
WK
2943 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
2944 dig_port->base.base.base.id, dig_port->base.base.name);
37c6c9b0 2945
eb020ca3
PB
2946 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
2947 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
2948 dig_port->base.base.base.id, dig_port->base.base.name);
24f3e092 2949
9eae5e27 2950 pp = ilk_get_pp_control(intel_dp);
35a38556
DV
2951 /* We need to switch off panel power _and_ force vdd, for otherwise some
2952 * panels get very unhappy and cease to work. */
5a162e22 2953 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
b3064154 2954 EDP_BLC_ENABLE);
453c5420 2955
bf13e81b 2956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2957
849e39f5
PZ
2958 intel_dp->want_panel_vdd = false;
2959
b4e33881
JN
2960 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2961 intel_de_posting_read(dev_priv, pp_ctrl_reg);
9934c132 2962
4be73780 2963 wait_panel_off(intel_dp);
d7ba25bd 2964 intel_dp->panel_power_off_time = ktime_get_boottime();
849e39f5
PZ
2965
2966 /* We got a reference when we enabled the VDD. */
0e6e0be4 2967 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
9f0fb5be 2968}
e39b999a 2969
9f0fb5be
VS
2970void intel_edp_panel_off(struct intel_dp *intel_dp)
2971{
69d93820
CW
2972 intel_wakeref_t wakeref;
2973
1853a9da 2974 if (!intel_dp_is_edp(intel_dp))
9f0fb5be 2975 return;
e39b999a 2976
69d93820
CW
2977 with_pps_lock(intel_dp, wakeref)
2978 edp_panel_off(intel_dp);
9934c132
JB
2979}
2980
1250d107
JN
2981/* Enable backlight in the panel power control. */
2982static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2983{
de25eb7f 2984 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 2985 intel_wakeref_t wakeref;
32f9d658 2986
01cb9ea6
JB
2987 /*
2988 * If we enable the backlight right away following a panel power
2989 * on, we may see slight flicker as the panel syncs with the eDP
2990 * link. So delay a bit to make sure the image is solid before
2991 * allowing it to appear.
2992 */
4be73780 2993 wait_backlight_on(intel_dp);
e39b999a 2994
69d93820
CW
2995 with_pps_lock(intel_dp, wakeref) {
2996 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2997 u32 pp;
453c5420 2998
9eae5e27 2999 pp = ilk_get_pp_control(intel_dp);
69d93820 3000 pp |= EDP_BLC_ENABLE;
453c5420 3001
b4e33881
JN
3002 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3003 intel_de_posting_read(dev_priv, pp_ctrl_reg);
69d93820 3004 }
32f9d658
ZW
3005}
3006
1250d107 3007/* Enable backlight PWM and backlight PP control. */
b037d58f
ML
3008void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3009 const struct drm_connector_state *conn_state)
1250d107 3010{
b7d02c3a 3011 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
b037d58f 3012
1853a9da 3013 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
3014 return;
3015
3016 DRM_DEBUG_KMS("\n");
3017
b037d58f 3018 intel_panel_enable_backlight(crtc_state, conn_state);
1250d107
JN
3019 _intel_edp_backlight_on(intel_dp);
3020}
3021
3022/* Disable backlight in the panel power control. */
3023static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 3024{
de25eb7f 3025 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 3026 intel_wakeref_t wakeref;
32f9d658 3027
1853a9da 3028 if (!intel_dp_is_edp(intel_dp))
f01eca2e
KP
3029 return;
3030
69d93820
CW
3031 with_pps_lock(intel_dp, wakeref) {
3032 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3033 u32 pp;
e39b999a 3034
9eae5e27 3035 pp = ilk_get_pp_control(intel_dp);
69d93820 3036 pp &= ~EDP_BLC_ENABLE;
453c5420 3037
b4e33881
JN
3038 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3039 intel_de_posting_read(dev_priv, pp_ctrl_reg);
69d93820 3040 }
e39b999a
VS
3041
3042 intel_dp->last_backlight_off = jiffies;
f7d2323c 3043 edp_wait_backlight_off(intel_dp);
1250d107 3044}
f7d2323c 3045
1250d107 3046/* Disable backlight PP control and backlight PWM. */
b037d58f 3047void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1250d107 3048{
b7d02c3a 3049 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
b037d58f 3050
1853a9da 3051 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
3052 return;
3053
3054 DRM_DEBUG_KMS("\n");
f7d2323c 3055
1250d107 3056 _intel_edp_backlight_off(intel_dp);
b037d58f 3057 intel_panel_disable_backlight(old_conn_state);
32f9d658 3058}
a4fc5ed6 3059
73580fb7
JN
3060/*
3061 * Hook for controlling the panel power control backlight through the bl_power
3062 * sysfs attribute. Take care to handle multiple calls.
3063 */
3064static void intel_edp_backlight_power(struct intel_connector *connector,
3065 bool enable)
3066{
43a6d19c 3067 struct intel_dp *intel_dp = intel_attached_dp(connector);
69d93820 3068 intel_wakeref_t wakeref;
e39b999a
VS
3069 bool is_enabled;
3070
69d93820
CW
3071 is_enabled = false;
3072 with_pps_lock(intel_dp, wakeref)
9eae5e27 3073 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
73580fb7
JN
3074 if (is_enabled == enable)
3075 return;
3076
23ba9373
JN
3077 DRM_DEBUG_KMS("panel power control backlight %s\n",
3078 enable ? "enable" : "disable");
73580fb7
JN
3079
3080 if (enable)
3081 _intel_edp_backlight_on(intel_dp);
3082 else
3083 _intel_edp_backlight_off(intel_dp);
3084}
3085
64e1077a
VS
3086static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3087{
3088 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3089 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
b4e33881 3090 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
64e1077a
VS
3091
3092 I915_STATE_WARN(cur_state != state,
66a990dd
VS
3093 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3094 dig_port->base.base.base.id, dig_port->base.base.name,
87ad3212 3095 onoff(state), onoff(cur_state));
64e1077a
VS
3096}
3097#define assert_dp_port_disabled(d) assert_dp_port((d), false)
3098
3099static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3100{
b4e33881 3101 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
64e1077a
VS
3102
3103 I915_STATE_WARN(cur_state != state,
3104 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 3105 onoff(state), onoff(cur_state));
64e1077a
VS
3106}
3107#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3108#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3109
9eae5e27
LDM
3110static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3111 const struct intel_crtc_state *pipe_config)
d240f20f 3112{
2225f3c6 3113 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
64e1077a 3114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 3115
5c34ba27 3116 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
64e1077a
VS
3117 assert_dp_port_disabled(intel_dp);
3118 assert_edp_pll_disabled(dev_priv);
2bd2ad64 3119
bdc6114e
WK
3120 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3121 pipe_config->port_clock);
abfce949
VS
3122
3123 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3124
85cb48a1 3125 if (pipe_config->port_clock == 162000)
abfce949
VS
3126 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3127 else
3128 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3129
b4e33881
JN
3130 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3131 intel_de_posting_read(dev_priv, DP_A);
abfce949
VS
3132 udelay(500);
3133
6b23f3e8
VS
3134 /*
3135 * [DevILK] Work around required when enabling DP PLL
3136 * while a pipe is enabled going to FDI:
3137 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3138 * 2. Program DP PLL enable
3139 */
cf819eff 3140 if (IS_GEN(dev_priv, 5))
0f0f74bc 3141 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
6b23f3e8 3142
0767935e 3143 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 3144
b4e33881
JN
3145 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3146 intel_de_posting_read(dev_priv, DP_A);
298b0b39 3147 udelay(200);
d240f20f
JB
3148}
3149
9eae5e27
LDM
3150static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3151 const struct intel_crtc_state *old_crtc_state)
d240f20f 3152{
2225f3c6 3153 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
64e1077a 3154 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 3155
5c34ba27 3156 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
64e1077a
VS
3157 assert_dp_port_disabled(intel_dp);
3158 assert_edp_pll_enabled(dev_priv);
2bd2ad64 3159
bdc6114e 3160 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
abfce949 3161
6fec7662 3162 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 3163
b4e33881
JN
3164 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3165 intel_de_posting_read(dev_priv, DP_A);
d240f20f
JB
3166 udelay(200);
3167}
3168
857c416e
VS
3169static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3170{
3171 /*
3172 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3173 * be capable of signalling downstream hpd with a long pulse.
3174 * Whether or not that means D3 is safe to use is not clear,
3175 * but let's assume so until proven otherwise.
3176 *
3177 * FIXME should really check all downstream ports...
3178 */
3179 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
b4c32073 3180 drm_dp_is_branch(intel_dp->dpcd) &&
857c416e
VS
3181 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3182}
3183
2279298d
GS
3184void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3185 const struct intel_crtc_state *crtc_state,
3186 bool enable)
3187{
3188 int ret;
3189
010663a6 3190 if (!crtc_state->dsc.compression_enable)
2279298d
GS
3191 return;
3192
3193 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3194 enable ? DP_DECOMPRESSION_EN : 0);
3195 if (ret < 0)
3196 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
3197 enable ? "enable" : "disable");
3198}
3199
c7ad3810 3200/* If the sink supports it, try to set the power state appropriately */
c19b0669 3201void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
3202{
3203 int ret, i;
3204
3205 /* Should have a valid DPCD by this point */
3206 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3207 return;
3208
3209 if (mode != DRM_MODE_DPMS_ON) {
857c416e
VS
3210 if (downstream_hpd_needs_d0(intel_dp))
3211 return;
3212
9d1a1031
JN
3213 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3214 DP_SET_POWER_D3);
c7ad3810 3215 } else {
357c0ae9
ID
3216 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3217
c7ad3810
JB
3218 /*
3219 * When turning on, we need to retry for 1ms to give the sink
3220 * time to wake up.
3221 */
3222 for (i = 0; i < 3; i++) {
9d1a1031
JN
3223 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3224 DP_SET_POWER_D0);
c7ad3810
JB
3225 if (ret == 1)
3226 break;
3227 msleep(1);
3228 }
357c0ae9
ID
3229
3230 if (ret == 1 && lspcon->active)
3231 lspcon_wait_pcon_mode(lspcon);
c7ad3810 3232 }
f9cac721
JN
3233
3234 if (ret != 1)
3235 DRM_DEBUG_KMS("failed to %s sink power state\n",
3236 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
3237}
3238
59b74c49
VS
3239static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3240 enum port port, enum pipe *pipe)
3241{
3242 enum pipe p;
3243
3244 for_each_pipe(dev_priv, p) {
b4e33881 3245 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
59b74c49
VS
3246
3247 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3248 *pipe = p;
3249 return true;
3250 }
3251 }
3252
bdc6114e
WK
3253 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3254 port_name(port));
59b74c49
VS
3255
3256 /* must initialize pipe to something for the asserts */
3257 *pipe = PIPE_A;
3258
3259 return false;
3260}
3261
3262bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3263 i915_reg_t dp_reg, enum port port,
3264 enum pipe *pipe)
3265{
3266 bool ret;
3267 u32 val;
3268
b4e33881 3269 val = intel_de_read(dev_priv, dp_reg);
59b74c49
VS
3270
3271 ret = val & DP_PORT_EN;
3272
3273 /* asserts want to know the pipe even if the port is disabled */
3274 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3275 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3276 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3277 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3278 else if (IS_CHERRYVIEW(dev_priv))
3279 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3280 else
3281 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3282
3283 return ret;
3284}
3285
19d8fe15
DV
3286static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3287 enum pipe *pipe)
d240f20f 3288{
2f773477 3289 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3290 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
0e6e0be4 3291 intel_wakeref_t wakeref;
6fa9a5ec 3292 bool ret;
6d129bea 3293
0e6e0be4
CW
3294 wakeref = intel_display_power_get_if_enabled(dev_priv,
3295 encoder->power_domain);
3296 if (!wakeref)
6d129bea
ID
3297 return false;
3298
59b74c49
VS
3299 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3300 encoder->port, pipe);
6fa9a5ec 3301
0e6e0be4 3302 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
6fa9a5ec
ID
3303
3304 return ret;
19d8fe15 3305}
d240f20f 3306
045ac3b5 3307static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 3308 struct intel_crtc_state *pipe_config)
045ac3b5 3309{
2f773477 3310 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3311 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
045ac3b5 3312 u32 tmp, flags = 0;
8f4f2797 3313 enum port port = encoder->port;
2225f3c6 3314 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
045ac3b5 3315
e1214b95
VS
3316 if (encoder->type == INTEL_OUTPUT_EDP)
3317 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3318 else
3319 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
045ac3b5 3320
b4e33881 3321 tmp = intel_de_read(dev_priv, intel_dp->output_reg);
9fcb1704
JN
3322
3323 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 3324
6e266956 3325 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
b4e33881
JN
3326 u32 trans_dp = intel_de_read(dev_priv,
3327 TRANS_DP_CTL(crtc->pipe));
b81e34c2
VS
3328
3329 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
3330 flags |= DRM_MODE_FLAG_PHSYNC;
3331 else
3332 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3333
b81e34c2 3334 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
3335 flags |= DRM_MODE_FLAG_PVSYNC;
3336 else
3337 flags |= DRM_MODE_FLAG_NVSYNC;
3338 } else {
39e5fa88 3339 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
3340 flags |= DRM_MODE_FLAG_PHSYNC;
3341 else
3342 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3343
39e5fa88 3344 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
3345 flags |= DRM_MODE_FLAG_PVSYNC;
3346 else
3347 flags |= DRM_MODE_FLAG_NVSYNC;
3348 }
045ac3b5 3349
1326a92c 3350 pipe_config->hw.adjusted_mode.flags |= flags;
f1f644dc 3351
c99f53f7 3352 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
3353 pipe_config->limited_color_range = true;
3354
90a6b7b0
VS
3355 pipe_config->lane_count =
3356 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3357
eb14cb74
VS
3358 intel_dp_get_m_n(crtc, pipe_config);
3359
18442d08 3360 if (port == PORT_A) {
b4e33881 3361 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
3362 pipe_config->port_clock = 162000;
3363 else
3364 pipe_config->port_clock = 270000;
3365 }
18442d08 3366
1326a92c 3367 pipe_config->hw.adjusted_mode.crtc_clock =
e3b247da
VS
3368 intel_dotclock_calculate(pipe_config->port_clock,
3369 &pipe_config->dp_m_n);
7f16e5c1 3370
1853a9da 3371 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
6aa23e65 3372 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
c6cd2ee2
JN
3373 /*
3374 * This is a big fat ugly hack.
3375 *
3376 * Some machines in UEFI boot mode provide us a VBT that has 18
3377 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3378 * unknown we fail to light up. Yet the same BIOS boots up with
3379 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3380 * max, not what it tells us to use.
3381 *
3382 * Note: This will still be broken if the eDP panel is not lit
3383 * up by the BIOS, and thus we can't get the mode at module
3384 * load.
3385 */
bdc6114e
WK
3386 drm_dbg_kms(&dev_priv->drm,
3387 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3388 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
6aa23e65 3389 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
c6cd2ee2 3390 }
045ac3b5
JB
3391}
3392
fd6bbda9 3393static void intel_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3394 const struct intel_crtc_state *old_crtc_state,
3395 const struct drm_connector_state *old_conn_state)
d240f20f 3396{
b7d02c3a 3397 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
495a5bb8 3398
edb2e530
VS
3399 intel_dp->link_trained = false;
3400
85cb48a1 3401 if (old_crtc_state->has_audio)
8ec47de2
VS
3402 intel_audio_codec_disable(encoder,
3403 old_crtc_state, old_conn_state);
6cb49835
DV
3404
3405 /* Make sure the panel is off before trying to change the mode. But also
3406 * ensure that we have vdd while we switch off the panel. */
24f3e092 3407 intel_edp_panel_vdd_on(intel_dp);
b037d58f 3408 intel_edp_backlight_off(old_conn_state);
fdbc3b1f 3409 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 3410 intel_edp_panel_off(intel_dp);
1a8ff607
VS
3411}
3412
3413static void g4x_disable_dp(struct intel_encoder *encoder,
3414 const struct intel_crtc_state *old_crtc_state,
3415 const struct drm_connector_state *old_conn_state)
1a8ff607
VS
3416{
3417 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3418}
3419
3420static void vlv_disable_dp(struct intel_encoder *encoder,
3421 const struct intel_crtc_state *old_crtc_state,
3422 const struct drm_connector_state *old_conn_state)
3423{
1a8ff607 3424 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
d240f20f
JB
3425}
3426
51a9f6df 3427static void g4x_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3428 const struct intel_crtc_state *old_crtc_state,
3429 const struct drm_connector_state *old_conn_state)
d240f20f 3430{
b7d02c3a 3431 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
adc10304 3432 enum port port = encoder->port;
2bd2ad64 3433
51a9f6df
VS
3434 /*
3435 * Bspec does not list a specific disable sequence for g4x DP.
3436 * Follow the ilk+ sequence (disable pipe before the port) for
3437 * g4x DP as it does not suffer from underruns like the normal
3438 * g4x modeset sequence (disable pipe after the port).
3439 */
adc10304 3440 intel_dp_link_down(encoder, old_crtc_state);
abfce949
VS
3441
3442 /* Only ilk+ has port A */
08aff3fe 3443 if (port == PORT_A)
9eae5e27 3444 ilk_edp_pll_off(intel_dp, old_crtc_state);
49277c31
VS
3445}
3446
fd6bbda9 3447static void vlv_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3448 const struct intel_crtc_state *old_crtc_state,
3449 const struct drm_connector_state *old_conn_state)
49277c31 3450{
adc10304 3451 intel_dp_link_down(encoder, old_crtc_state);
2bd2ad64
DV
3452}
3453
fd6bbda9 3454static void chv_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3455 const struct intel_crtc_state *old_crtc_state,
3456 const struct drm_connector_state *old_conn_state)
a8f327fb 3457{
adc10304 3458 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
97fd4d5c 3459
adc10304 3460 intel_dp_link_down(encoder, old_crtc_state);
a8f327fb 3461
221c7862 3462 vlv_dpio_get(dev_priv);
a8f327fb
VS
3463
3464 /* Assert data lane reset */
2e1029c6 3465 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
580d3811 3466
221c7862 3467 vlv_dpio_put(dev_priv);
580d3811
VS
3468}
3469
7b13b58a
VS
3470static void
3471_intel_dp_set_link_train(struct intel_dp *intel_dp,
830de422
JN
3472 u32 *DP,
3473 u8 dp_train_pat)
7b13b58a 3474{
de25eb7f 3475 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3476 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3477 enum port port = intel_dig_port->base.port;
830de422 3478 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
7b13b58a 3479
2edd5327 3480 if (dp_train_pat & train_pat_mask)
bdc6114e
WK
3481 drm_dbg_kms(&dev_priv->drm,
3482 "Using DP training pattern TPS%d\n",
3483 dp_train_pat & train_pat_mask);
8b0878a0 3484
4f8036a2 3485 if (HAS_DDI(dev_priv)) {
b4e33881 3486 u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
7b13b58a
VS
3487
3488 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3489 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3490 else
3491 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3492
3493 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2edd5327 3494 switch (dp_train_pat & train_pat_mask) {
7b13b58a
VS
3495 case DP_TRAINING_PATTERN_DISABLE:
3496 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3497
3498 break;
3499 case DP_TRAINING_PATTERN_1:
3500 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3501 break;
3502 case DP_TRAINING_PATTERN_2:
3503 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3504 break;
3505 case DP_TRAINING_PATTERN_3:
3506 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3507 break;
2edd5327
MN
3508 case DP_TRAINING_PATTERN_4:
3509 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3510 break;
7b13b58a 3511 }
b4e33881 3512 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
7b13b58a 3513
b752e995 3514 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 3515 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
7b13b58a
VS
3516 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3517
3518 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3519 case DP_TRAINING_PATTERN_DISABLE:
3520 *DP |= DP_LINK_TRAIN_OFF_CPT;
3521 break;
3522 case DP_TRAINING_PATTERN_1:
3523 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3524 break;
3525 case DP_TRAINING_PATTERN_2:
3526 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3527 break;
3528 case DP_TRAINING_PATTERN_3:
bdc6114e
WK
3529 drm_dbg_kms(&dev_priv->drm,
3530 "TPS3 not supported, using TPS2 instead\n");
7b13b58a
VS
3531 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3532 break;
3533 }
3534
3535 } else {
3b358cda 3536 *DP &= ~DP_LINK_TRAIN_MASK;
7b13b58a
VS
3537
3538 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3539 case DP_TRAINING_PATTERN_DISABLE:
3540 *DP |= DP_LINK_TRAIN_OFF;
3541 break;
3542 case DP_TRAINING_PATTERN_1:
3543 *DP |= DP_LINK_TRAIN_PAT_1;
3544 break;
3545 case DP_TRAINING_PATTERN_2:
3546 *DP |= DP_LINK_TRAIN_PAT_2;
3547 break;
3548 case DP_TRAINING_PATTERN_3:
bdc6114e
WK
3549 drm_dbg_kms(&dev_priv->drm,
3550 "TPS3 not supported, using TPS2 instead\n");
3b358cda 3551 *DP |= DP_LINK_TRAIN_PAT_2;
7b13b58a
VS
3552 break;
3553 }
3554 }
3555}
3556
85cb48a1 3557static void intel_dp_enable_port(struct intel_dp *intel_dp,
5f88a9c6 3558 const struct intel_crtc_state *old_crtc_state)
7b13b58a 3559{
de25eb7f 3560 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3561
7b13b58a 3562 /* enable with pattern 1 (as per spec) */
7b13b58a 3563
8b0878a0 3564 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
7b713f50
VS
3565
3566 /*
3567 * Magic for VLV/CHV. We _must_ first set up the register
3568 * without actually enabling the port, and then do another
3569 * write to enable the port. Otherwise link training will
3570 * fail when the power sequencer is freshly used for this port.
3571 */
3572 intel_dp->DP |= DP_PORT_EN;
85cb48a1 3573 if (old_crtc_state->has_audio)
6fec7662 3574 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50 3575
b4e33881
JN
3576 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3577 intel_de_posting_read(dev_priv, intel_dp->output_reg);
580d3811
VS
3578}
3579
85cb48a1 3580static void intel_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3581 const struct intel_crtc_state *pipe_config,
3582 const struct drm_connector_state *conn_state)
d240f20f 3583{
2f773477 3584 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3585 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2225f3c6 3586 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
b4e33881 3587 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
d6fbdd15 3588 enum pipe pipe = crtc->pipe;
69d93820 3589 intel_wakeref_t wakeref;
5d613501 3590
eb020ca3 3591 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
0c33d8d7 3592 return;
5d613501 3593
69d93820
CW
3594 with_pps_lock(intel_dp, wakeref) {
3595 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3596 vlv_init_panel_power_sequencer(encoder, pipe_config);
093e3f13 3597
69d93820 3598 intel_dp_enable_port(intel_dp, pipe_config);
093e3f13 3599
69d93820
CW
3600 edp_panel_vdd_on(intel_dp);
3601 edp_panel_on(intel_dp);
3602 edp_panel_vdd_off(intel_dp, true);
3603 }
093e3f13 3604
920a14b2 3605 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e0fce78f
VS
3606 unsigned int lane_mask = 0x0;
3607
920a14b2 3608 if (IS_CHERRYVIEW(dev_priv))
85cb48a1 3609 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
e0fce78f 3610
9b6de0a1
VS
3611 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3612 lane_mask);
e0fce78f 3613 }
61234fa5 3614
f01eca2e 3615 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 3616 intel_dp_start_link_train(intel_dp);
3ab9c637 3617 intel_dp_stop_link_train(intel_dp);
c1dec79a 3618
85cb48a1 3619 if (pipe_config->has_audio) {
bdc6114e
WK
3620 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
3621 pipe_name(pipe));
bbf35e9d 3622 intel_audio_codec_enable(encoder, pipe_config, conn_state);
c1dec79a 3623 }
ab1f90f9 3624}
89b667f8 3625
fd6bbda9 3626static void g4x_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3627 const struct intel_crtc_state *pipe_config,
3628 const struct drm_connector_state *conn_state)
ecff4f3b 3629{
bbf35e9d 3630 intel_enable_dp(encoder, pipe_config, conn_state);
b037d58f 3631 intel_edp_backlight_on(pipe_config, conn_state);
ab1f90f9 3632}
89b667f8 3633
fd6bbda9 3634static void vlv_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3635 const struct intel_crtc_state *pipe_config,
3636 const struct drm_connector_state *conn_state)
ab1f90f9 3637{
b037d58f 3638 intel_edp_backlight_on(pipe_config, conn_state);
d240f20f
JB
3639}
3640
fd6bbda9 3641static void g4x_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3642 const struct intel_crtc_state *pipe_config,
3643 const struct drm_connector_state *conn_state)
ab1f90f9 3644{
b7d02c3a 3645 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
8f4f2797 3646 enum port port = encoder->port;
ab1f90f9 3647
85cb48a1 3648 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3649
d41f1efb 3650 /* Only ilk+ has port A */
abfce949 3651 if (port == PORT_A)
9eae5e27 3652 ilk_edp_pll_on(intel_dp, pipe_config);
ab1f90f9
JN
3653}
3654
83b84597
VS
3655static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3656{
3657 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
fac5e23e 3658 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
83b84597 3659 enum pipe pipe = intel_dp->pps_pipe;
44cb734c 3660 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
83b84597 3661
eb020ca3 3662 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
9f2bdb00 3663
eb020ca3 3664 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
d158694f
VS
3665 return;
3666
83b84597
VS
3667 edp_panel_vdd_off_sync(intel_dp);
3668
3669 /*
e7f2af78 3670 * VLV seems to get confused when multiple power sequencers
83b84597
VS
3671 * have the same port selected (even if only one has power/vdd
3672 * enabled). The failure manifests as vlv_wait_port_ready() failing
3673 * CHV on the other hand doesn't seem to mind having the same port
e7f2af78 3674 * selected in multiple power sequencers, but let's clear the
83b84597
VS
3675 * port select always when logically disconnecting a power sequencer
3676 * from a port.
3677 */
bdc6114e
WK
3678 drm_dbg_kms(&dev_priv->drm,
3679 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3680 pipe_name(pipe), intel_dig_port->base.base.base.id,
3681 intel_dig_port->base.base.name);
b4e33881
JN
3682 intel_de_write(dev_priv, pp_on_reg, 0);
3683 intel_de_posting_read(dev_priv, pp_on_reg);
83b84597
VS
3684
3685 intel_dp->pps_pipe = INVALID_PIPE;
3686}
3687
46bd8383 3688static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a4a5d2f8
VS
3689 enum pipe pipe)
3690{
a4a5d2f8
VS
3691 struct intel_encoder *encoder;
3692
3693 lockdep_assert_held(&dev_priv->pps_mutex);
3694
14aa521c 3695 for_each_intel_dp(&dev_priv->drm, encoder) {
b7d02c3a 3696 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
a4a5d2f8 3697
eb020ca3
PB
3698 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
3699 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
3700 pipe_name(pipe), encoder->base.base.id,
3701 encoder->base.name);
9f2bdb00 3702
a4a5d2f8
VS
3703 if (intel_dp->pps_pipe != pipe)
3704 continue;
3705
bdc6114e
WK
3706 drm_dbg_kms(&dev_priv->drm,
3707 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
3708 pipe_name(pipe), encoder->base.base.id,
3709 encoder->base.name);
a4a5d2f8
VS
3710
3711 /* make sure vdd is off before we steal it */
83b84597 3712 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
3713 }
3714}
3715
adc10304
VS
3716static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3717 const struct intel_crtc_state *crtc_state)
a4a5d2f8 3718{
46bd8383 3719 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3720 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2225f3c6 3721 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
a4a5d2f8
VS
3722
3723 lockdep_assert_held(&dev_priv->pps_mutex);
3724
eb020ca3 3725 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
093e3f13 3726
9f2bdb00
VS
3727 if (intel_dp->pps_pipe != INVALID_PIPE &&
3728 intel_dp->pps_pipe != crtc->pipe) {
3729 /*
3730 * If another power sequencer was being used on this
3731 * port previously make sure to turn off vdd there while
3732 * we still have control of it.
3733 */
83b84597 3734 vlv_detach_power_sequencer(intel_dp);
9f2bdb00 3735 }
a4a5d2f8
VS
3736
3737 /*
3738 * We may be stealing the power
3739 * sequencer from another port.
3740 */
46bd8383 3741 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
a4a5d2f8 3742
9f2bdb00
VS
3743 intel_dp->active_pipe = crtc->pipe;
3744
1853a9da 3745 if (!intel_dp_is_edp(intel_dp))
9f2bdb00
VS
3746 return;
3747
a4a5d2f8
VS
3748 /* now it's all ours */
3749 intel_dp->pps_pipe = crtc->pipe;
3750
bdc6114e
WK
3751 drm_dbg_kms(&dev_priv->drm,
3752 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
3753 pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
3754 encoder->base.name);
a4a5d2f8
VS
3755
3756 /* init power sequencer on this pipe and port */
46bd8383
VS
3757 intel_dp_init_panel_power_sequencer(intel_dp);
3758 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8
VS
3759}
3760
fd6bbda9 3761static void vlv_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3762 const struct intel_crtc_state *pipe_config,
3763 const struct drm_connector_state *conn_state)
a4fc5ed6 3764{
2e1029c6 3765 vlv_phy_pre_encoder_enable(encoder, pipe_config);
ab1f90f9 3766
bbf35e9d 3767 intel_enable_dp(encoder, pipe_config, conn_state);
89b667f8
JB
3768}
3769
fd6bbda9 3770static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
5f88a9c6
VS
3771 const struct intel_crtc_state *pipe_config,
3772 const struct drm_connector_state *conn_state)
89b667f8 3773{
85cb48a1 3774 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3775
2e1029c6 3776 vlv_phy_pre_pll_enable(encoder, pipe_config);
a4fc5ed6
KP
3777}
3778
fd6bbda9 3779static void chv_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3780 const struct intel_crtc_state *pipe_config,
3781 const struct drm_connector_state *conn_state)
e4a1d846 3782{
2e1029c6 3783 chv_phy_pre_encoder_enable(encoder, pipe_config);
e4a1d846 3784
bbf35e9d 3785 intel_enable_dp(encoder, pipe_config, conn_state);
b0b33846
VS
3786
3787 /* Second common lane will stay alive on its own now */
e7d2a717 3788 chv_phy_release_cl2_override(encoder);
e4a1d846
CML
3789}
3790
fd6bbda9 3791static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
5f88a9c6
VS
3792 const struct intel_crtc_state *pipe_config,
3793 const struct drm_connector_state *conn_state)
9197c88b 3794{
85cb48a1 3795 intel_dp_prepare(encoder, pipe_config);
625695f8 3796
2e1029c6 3797 chv_phy_pre_pll_enable(encoder, pipe_config);
9197c88b
VS
3798}
3799
fd6bbda9 3800static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
2e1029c6
VS
3801 const struct intel_crtc_state *old_crtc_state,
3802 const struct drm_connector_state *old_conn_state)
d6db995f 3803{
2e1029c6 3804 chv_phy_post_pll_disable(encoder, old_crtc_state);
d6db995f
VS
3805}
3806
a4fc5ed6
KP
3807/*
3808 * Fetch AUX CH registers 0x202 - 0x207 which contain
3809 * link status information
3810 */
94223d04 3811bool
830de422 3812intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3813{
9f085ebb
L
3814 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3815 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3816}
3817
1100244e 3818/* These are source-specific values. */
830de422 3819u8
1a2eb460 3820intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3821{
de25eb7f 3822 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a393e964
VS
3823 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3824 enum port port = encoder->port;
1a2eb460 3825
a393e964 3826 if (HAS_DDI(dev_priv))
ffe5111e 3827 return intel_ddi_dp_voltage_max(encoder);
a393e964 3828 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
bd60018a 3829 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
b752e995 3830 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
bd60018a 3831 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6e266956 3832 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
bd60018a 3833 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3834 else
bd60018a 3835 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3836}
3837
830de422
JN
3838u8
3839intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
1a2eb460 3840{
de25eb7f 3841 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4718a365
VS
3842 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3843 enum port port = encoder->port;
1a2eb460 3844
4718a365
VS
3845 if (HAS_DDI(dev_priv)) {
3846 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
8652744b 3847 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e2fa6fba 3848 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3850 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3851 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3852 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3854 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3855 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3856 default:
bd60018a 3857 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3858 }
b752e995 3859 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460 3860 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3862 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3865 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3866 default:
bd60018a 3867 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3868 }
3869 } else {
3870 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3872 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3874 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3876 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3878 default:
bd60018a 3879 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3880 }
a4fc5ed6
KP
3881 }
3882}
3883
830de422 3884static u32 vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba 3885{
53d98725 3886 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
e2fa6fba
P
3887 unsigned long demph_reg_value, preemph_reg_value,
3888 uniqtranscale_reg_value;
830de422 3889 u8 train_set = intel_dp->train_set[0];
e2fa6fba
P
3890
3891 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3892 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3893 preemph_reg_value = 0x0004000;
3894 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3896 demph_reg_value = 0x2B405555;
3897 uniqtranscale_reg_value = 0x552AB83A;
3898 break;
bd60018a 3899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3900 demph_reg_value = 0x2B404040;
3901 uniqtranscale_reg_value = 0x5548B83A;
3902 break;
bd60018a 3903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3904 demph_reg_value = 0x2B245555;
3905 uniqtranscale_reg_value = 0x5560B83A;
3906 break;
bd60018a 3907 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3908 demph_reg_value = 0x2B405555;
3909 uniqtranscale_reg_value = 0x5598DA3A;
3910 break;
3911 default:
3912 return 0;
3913 }
3914 break;
bd60018a 3915 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3916 preemph_reg_value = 0x0002000;
3917 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3919 demph_reg_value = 0x2B404040;
3920 uniqtranscale_reg_value = 0x5552B83A;
3921 break;
bd60018a 3922 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3923 demph_reg_value = 0x2B404848;
3924 uniqtranscale_reg_value = 0x5580B83A;
3925 break;
bd60018a 3926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3927 demph_reg_value = 0x2B404040;
3928 uniqtranscale_reg_value = 0x55ADDA3A;
3929 break;
3930 default:
3931 return 0;
3932 }
3933 break;
bd60018a 3934 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3935 preemph_reg_value = 0x0000000;
3936 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3938 demph_reg_value = 0x2B305555;
3939 uniqtranscale_reg_value = 0x5570B83A;
3940 break;
bd60018a 3941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3942 demph_reg_value = 0x2B2B4040;
3943 uniqtranscale_reg_value = 0x55ADDA3A;
3944 break;
3945 default:
3946 return 0;
3947 }
3948 break;
bd60018a 3949 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3950 preemph_reg_value = 0x0006000;
3951 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3953 demph_reg_value = 0x1B405555;
3954 uniqtranscale_reg_value = 0x55ADDA3A;
3955 break;
3956 default:
3957 return 0;
3958 }
3959 break;
3960 default:
3961 return 0;
3962 }
3963
53d98725
ACO
3964 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3965 uniqtranscale_reg_value, 0);
e2fa6fba
P
3966
3967 return 0;
3968}
3969
830de422 3970static u32 chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846 3971{
b7fa22d8
ACO
3972 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3973 u32 deemph_reg_value, margin_reg_value;
3974 bool uniq_trans_scale = false;
830de422 3975 u8 train_set = intel_dp->train_set[0];
e4a1d846
CML
3976
3977 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3978 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3979 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3981 deemph_reg_value = 128;
3982 margin_reg_value = 52;
3983 break;
bd60018a 3984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3985 deemph_reg_value = 128;
3986 margin_reg_value = 77;
3987 break;
bd60018a 3988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3989 deemph_reg_value = 128;
3990 margin_reg_value = 102;
3991 break;
bd60018a 3992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3993 deemph_reg_value = 128;
3994 margin_reg_value = 154;
b7fa22d8 3995 uniq_trans_scale = true;
e4a1d846
CML
3996 break;
3997 default:
3998 return 0;
3999 }
4000 break;
bd60018a 4001 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 4002 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4004 deemph_reg_value = 85;
4005 margin_reg_value = 78;
4006 break;
bd60018a 4007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
4008 deemph_reg_value = 85;
4009 margin_reg_value = 116;
4010 break;
bd60018a 4011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
4012 deemph_reg_value = 85;
4013 margin_reg_value = 154;
4014 break;
4015 default:
4016 return 0;
4017 }
4018 break;
bd60018a 4019 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 4020 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4022 deemph_reg_value = 64;
4023 margin_reg_value = 104;
4024 break;
bd60018a 4025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
4026 deemph_reg_value = 64;
4027 margin_reg_value = 154;
4028 break;
4029 default:
4030 return 0;
4031 }
4032 break;
bd60018a 4033 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 4034 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4036 deemph_reg_value = 43;
4037 margin_reg_value = 154;
4038 break;
4039 default:
4040 return 0;
4041 }
4042 break;
4043 default:
4044 return 0;
4045 }
4046
b7fa22d8
ACO
4047 chv_set_phy_signal_level(encoder, deemph_reg_value,
4048 margin_reg_value, uniq_trans_scale);
e4a1d846
CML
4049
4050 return 0;
4051}
4052
830de422
JN
4053static u32
4054g4x_signal_levels(u8 train_set)
a4fc5ed6 4055{
830de422 4056 u32 signal_levels = 0;
a4fc5ed6 4057
3cf2efb1 4058 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
4060 default:
4061 signal_levels |= DP_VOLTAGE_0_4;
4062 break;
bd60018a 4063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
4064 signal_levels |= DP_VOLTAGE_0_6;
4065 break;
bd60018a 4066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
4067 signal_levels |= DP_VOLTAGE_0_8;
4068 break;
bd60018a 4069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
4070 signal_levels |= DP_VOLTAGE_1_2;
4071 break;
4072 }
3cf2efb1 4073 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 4074 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
4075 default:
4076 signal_levels |= DP_PRE_EMPHASIS_0;
4077 break;
bd60018a 4078 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
4079 signal_levels |= DP_PRE_EMPHASIS_3_5;
4080 break;
bd60018a 4081 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
4082 signal_levels |= DP_PRE_EMPHASIS_6;
4083 break;
bd60018a 4084 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
4085 signal_levels |= DP_PRE_EMPHASIS_9_5;
4086 break;
4087 }
4088 return signal_levels;
4089}
4090
4d82c2b5 4091/* SNB CPU eDP voltage swing and pre-emphasis control */
830de422
JN
4092static u32
4093snb_cpu_edp_signal_levels(u8 train_set)
e3421a18 4094{
3c5a62b5
YL
4095 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4096 DP_TRAIN_PRE_EMPHASIS_MASK);
4097 switch (signal_levels) {
bd60018a
SJ
4098 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 4100 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 4101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 4102 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
4103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 4105 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
4106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 4108 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
4109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 4111 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 4112 default:
3c5a62b5
YL
4113 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4114 "0x%x\n", signal_levels);
4115 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
4116 }
4117}
4118
4d82c2b5 4119/* IVB CPU eDP voltage swing and pre-emphasis control */
830de422
JN
4120static u32
4121ivb_cpu_edp_signal_levels(u8 train_set)
1a2eb460
KP
4122{
4123 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4124 DP_TRAIN_PRE_EMPHASIS_MASK);
4125 switch (signal_levels) {
bd60018a 4126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 4127 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 4128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 4129 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 4130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
4131 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4132
bd60018a 4133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 4134 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 4135 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
4136 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4137
bd60018a 4138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 4139 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 4140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
4141 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4142
4143 default:
4144 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4145 "0x%x\n", signal_levels);
4146 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4147 }
4148}
4149
94223d04 4150void
f4eb692e 4151intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e 4152{
de25eb7f 4153 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0a3424e 4154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 4155 enum port port = intel_dig_port->base.port;
830de422
JN
4156 u32 signal_levels, mask = 0;
4157 u8 train_set = intel_dp->train_set[0];
f0a3424e 4158
61cdfb9e 4159 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
d509af6c
RV
4160 signal_levels = bxt_signal_levels(intel_dp);
4161 } else if (HAS_DDI(dev_priv)) {
f8896f5d 4162 signal_levels = ddi_signal_levels(intel_dp);
d509af6c 4163 mask = DDI_BUF_EMP_MASK;
920a14b2 4164 } else if (IS_CHERRYVIEW(dev_priv)) {
5829975c 4165 signal_levels = chv_signal_levels(intel_dp);
11a914c2 4166 } else if (IS_VALLEYVIEW(dev_priv)) {
5829975c 4167 signal_levels = vlv_signal_levels(intel_dp);
b752e995 4168 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
4d82c2b5 4169 signal_levels = ivb_cpu_edp_signal_levels(train_set);
f0a3424e 4170 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
cf819eff 4171 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
4d82c2b5 4172 signal_levels = snb_cpu_edp_signal_levels(train_set);
f0a3424e
PZ
4173 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4174 } else {
45101e93 4175 signal_levels = g4x_signal_levels(train_set);
f0a3424e
PZ
4176 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
4177 }
4178
96fb9f9b 4179 if (mask)
bdc6114e
WK
4180 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4181 signal_levels);
4182
4183 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
4184 train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
4185 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
4186 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
4187 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4188 DP_TRAIN_PRE_EMPHASIS_SHIFT,
4189 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
4190 " (max)" : "");
f0a3424e 4191
f4eb692e 4192 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915 4193
b4e33881
JN
4194 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4195 intel_de_posting_read(dev_priv, intel_dp->output_reg);
f0a3424e
PZ
4196}
4197
94223d04 4198void
e9c176d5 4199intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
830de422 4200 u8 dp_train_pat)
a4fc5ed6 4201{
174edf1f 4202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
4203 struct drm_i915_private *dev_priv =
4204 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 4205
f4eb692e 4206 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 4207
b4e33881
JN
4208 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4209 intel_de_posting_read(dev_priv, intel_dp->output_reg);
e9c176d5
ACO
4210}
4211
94223d04 4212void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637 4213{
de25eb7f 4214 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3ab9c637 4215 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 4216 enum port port = intel_dig_port->base.port;
830de422 4217 u32 val;
3ab9c637 4218
4f8036a2 4219 if (!HAS_DDI(dev_priv))
3ab9c637
ID
4220 return;
4221
b4e33881 4222 val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
3ab9c637
ID
4223 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
4224 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
b4e33881 4225 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
3ab9c637
ID
4226
4227 /*
99389390
JRS
4228 * Until TGL on PORT_A we can have only eDP in SST mode. There the only
4229 * reason we need to set idle transmission mode is to work around a HW
4230 * issue where we enable the pipe while not in idle link-training mode.
3ab9c637
ID
4231 * In this case there is requirement to wait for a minimum number of
4232 * idle patterns to be sent.
4233 */
99389390 4234 if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
3ab9c637
ID
4235 return;
4236
4444df6e 4237 if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
4cb3b44d 4238 DP_TP_STATUS_IDLE_DONE, 1))
bdc6114e
WK
4239 drm_err(&dev_priv->drm,
4240 "Timed out waiting for DP idle patterns\n");
3ab9c637
ID
4241}
4242
a4fc5ed6 4243static void
adc10304
VS
4244intel_dp_link_down(struct intel_encoder *encoder,
4245 const struct intel_crtc_state *old_crtc_state)
a4fc5ed6 4246{
adc10304 4247 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 4248 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2225f3c6 4249 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
adc10304 4250 enum port port = encoder->port;
830de422 4251 u32 DP = intel_dp->DP;
a4fc5ed6 4252
eb020ca3
PB
4253 if (drm_WARN_ON(&dev_priv->drm,
4254 (intel_de_read(dev_priv, intel_dp->output_reg) &
4255 DP_PORT_EN) == 0))
1b39d6f3
CW
4256 return;
4257
bdc6114e 4258 drm_dbg_kms(&dev_priv->drm, "\n");
32f9d658 4259
b752e995 4260 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 4261 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
e3421a18 4262 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 4263 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 4264 } else {
3b358cda 4265 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 4266 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 4267 }
b4e33881
JN
4268 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4269 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5eb08b69 4270
1612c8bd 4271 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
b4e33881
JN
4272 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4273 intel_de_posting_read(dev_priv, intel_dp->output_reg);
1612c8bd
VS
4274
4275 /*
4276 * HW workaround for IBX, we need to move the port
4277 * to transcoder A after disabling it to allow the
4278 * matching HDMI port to be enabled on transcoder A.
4279 */
6e266956 4280 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
4281 /*
4282 * We get CPU/PCH FIFO underruns on the other pipe when
4283 * doing the workaround. Sweep them under the rug.
4284 */
4285 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4286 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4287
1612c8bd 4288 /* always enable with pattern 1 (as per spec) */
59b74c49
VS
4289 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4290 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4291 DP_LINK_TRAIN_PAT_1;
b4e33881
JN
4292 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4293 intel_de_posting_read(dev_priv, intel_dp->output_reg);
1612c8bd
VS
4294
4295 DP &= ~DP_PORT_EN;
b4e33881
JN
4296 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4297 intel_de_posting_read(dev_priv, intel_dp->output_reg);
0c241d5b 4298
0f0f74bc 4299 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
0c241d5b
VS
4300 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4301 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
4302 }
4303
f01eca2e 4304 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
4305
4306 intel_dp->DP = DP;
9f2bdb00
VS
4307
4308 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
69d93820
CW
4309 intel_wakeref_t wakeref;
4310
4311 with_pps_lock(intel_dp, wakeref)
4312 intel_dp->active_pipe = INVALID_PIPE;
9f2bdb00 4313 }
a4fc5ed6
KP
4314}
4315
a1d92652
MA
4316static void
4317intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4318{
4319 u8 dpcd_ext[6];
4320
4321 /*
4322 * Prior to DP1.3 the bit represented by
4323 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4324 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4325 * the true capability of the panel. The only way to check is to
4326 * then compare 0000h and 2200h.
4327 */
4328 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4329 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4330 return;
4331
4332 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4333 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4334 DRM_ERROR("DPCD failed read at extended capabilities\n");
4335 return;
4336 }
4337
4338 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4339 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4340 return;
4341 }
4342
4343 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4344 return;
4345
4346 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4347 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4348
4349 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4350}
4351
24e807e7 4352bool
fe5a66f9 4353intel_dp_read_dpcd(struct intel_dp *intel_dp)
92fd8fd1 4354{
9f085ebb
L
4355 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4356 sizeof(intel_dp->dpcd)) < 0)
edb39244 4357 return false; /* aux transfer failed */
92fd8fd1 4358
a1d92652
MA
4359 intel_dp_extended_receiver_capabilities(intel_dp);
4360
a8e98153 4361 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 4362
fe5a66f9
VS
4363 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4364}
edb39244 4365
8e9d645c
GM
4366bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4367{
4368 u8 dprx = 0;
4369
4370 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4371 &dprx) != 1)
4372 return false;
4373 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4374}
4375
93ac092f
MN
4376static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4377{
4378 /*
4379 * Clear the cached register set to avoid using stale values
4380 * for the sinks that do not support DSC.
4381 */
4382 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4383
08cadae8
AS
4384 /* Clear fec_capable to avoid using stale values */
4385 intel_dp->fec_capable = 0;
4386
93ac092f
MN
4387 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4388 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4389 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4390 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4391 intel_dp->dsc_dpcd,
4392 sizeof(intel_dp->dsc_dpcd)) < 0)
4393 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4394 DP_DSC_SUPPORT);
4395
4396 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4397 (int)sizeof(intel_dp->dsc_dpcd),
4398 intel_dp->dsc_dpcd);
0ce611c9 4399
08cadae8 4400 /* FEC is supported only on DP 1.4 */
0ce611c9
CW
4401 if (!intel_dp_is_edp(intel_dp) &&
4402 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4403 &intel_dp->fec_capable) < 0)
4404 DRM_ERROR("Failed to read FEC DPCD register\n");
08cadae8 4405
0ce611c9 4406 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
93ac092f
MN
4407 }
4408}
4409
fe5a66f9
VS
4410static bool
4411intel_edp_init_dpcd(struct intel_dp *intel_dp)
4412{
4413 struct drm_i915_private *dev_priv =
4414 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
30d9aa42 4415
fe5a66f9 4416 /* this function is meant to be called only once */
eb020ca3 4417 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
30d9aa42 4418
fe5a66f9 4419 if (!intel_dp_read_dpcd(intel_dp))
30d9aa42
SS
4420 return false;
4421
84c36753
JN
4422 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4423 drm_dp_is_branch(intel_dp->dpcd));
12a47a42 4424
7c838e2a
JN
4425 /*
4426 * Read the eDP display control registers.
4427 *
4428 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4429 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4430 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4431 * method). The display control registers should read zero if they're
4432 * not supported anyway.
4433 */
4434 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
f7170e2e
DC
4435 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4436 sizeof(intel_dp->edp_dpcd))
bdc6114e
WK
4437 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
4438 (int)sizeof(intel_dp->edp_dpcd),
4439 intel_dp->edp_dpcd);
06ea66b6 4440
84bb2916
DP
4441 /*
4442 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4443 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4444 */
4445 intel_psr_init_dpcd(intel_dp);
4446
e6ed2a1b
JN
4447 /* Read the eDP 1.4+ supported link rates. */
4448 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
94ca719e 4449 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4450 int i;
4451
9f085ebb
L
4452 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4453 sink_rates, sizeof(sink_rates));
ea2d8a42 4454
94ca719e
VS
4455 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4456 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4457
4458 if (val == 0)
4459 break;
4460
fd81c44e
DP
4461 /* Value read multiplied by 200kHz gives the per-lane
4462 * link rate in kHz. The source rates are, however,
4463 * stored in terms of LS_Clk kHz. The full conversion
4464 * back to symbols is
4465 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4466 */
af77b974 4467 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4468 }
94ca719e 4469 intel_dp->num_sink_rates = i;
fc0f8e25 4470 }
0336400e 4471
e6ed2a1b
JN
4472 /*
4473 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4474 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4475 */
68f357cb
JN
4476 if (intel_dp->num_sink_rates)
4477 intel_dp->use_rate_select = true;
4478 else
4479 intel_dp_set_sink_rates(intel_dp);
4480
975ee5fc
JN
4481 intel_dp_set_common_rates(intel_dp);
4482
93ac092f
MN
4483 /* Read the eDP DSC DPCD registers */
4484 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4485 intel_dp_get_dsc_sink_cap(intel_dp);
4486
fe5a66f9
VS
4487 return true;
4488}
4489
4490
4491static bool
4492intel_dp_get_dpcd(struct intel_dp *intel_dp)
4493{
4494 if (!intel_dp_read_dpcd(intel_dp))
4495 return false;
4496
eaa2b31b
VS
4497 /*
4498 * Don't clobber cached eDP rates. Also skip re-reading
4499 * the OUI/ID since we know it won't change.
4500 */
1853a9da 4501 if (!intel_dp_is_edp(intel_dp)) {
eaa2b31b
VS
4502 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4503 drm_dp_is_branch(intel_dp->dpcd));
4504
68f357cb 4505 intel_dp_set_sink_rates(intel_dp);
975ee5fc
JN
4506 intel_dp_set_common_rates(intel_dp);
4507 }
68f357cb 4508
fe5a66f9 4509 /*
2bb06265
JRS
4510 * Some eDP panels do not set a valid value for sink count, that is why
4511 * it don't care about read it here and in intel_edp_init_dpcd().
fe5a66f9 4512 */
eaa2b31b 4513 if (!intel_dp_is_edp(intel_dp) &&
0883ce81
LP
4514 !drm_dp_has_quirk(&intel_dp->desc, 0,
4515 DP_DPCD_QUIRK_NO_SINK_COUNT)) {
2bb06265
JRS
4516 u8 count;
4517 ssize_t r;
fe5a66f9 4518
2bb06265
JRS
4519 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4520 if (r < 1)
4521 return false;
4522
4523 /*
4524 * Sink count can change between short pulse hpd hence
4525 * a member variable in intel_dp will track any changes
4526 * between short pulse interrupts.
4527 */
4528 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4529
4530 /*
4531 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4532 * a dongle is present but no display. Unless we require to know
4533 * if a dongle is present or not, we don't need to update
4534 * downstream port information. So, an early return here saves
4535 * time from performing other operations which are not required.
4536 */
4537 if (!intel_dp->sink_count)
4538 return false;
4539 }
0336400e 4540
c726ad01 4541 if (!drm_dp_is_branch(intel_dp->dpcd))
edb39244
AJ
4542 return true; /* native DP sink */
4543
4544 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4545 return true; /* no per-port downstream info */
4546
9f085ebb
L
4547 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4548 intel_dp->downstream_ports,
4549 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
4550 return false; /* downstream port status fetch failed */
4551
4552 return true;
92fd8fd1
KP
4553}
4554
0e32b39c 4555static bool
9dbf5a4e 4556intel_dp_sink_can_mst(struct intel_dp *intel_dp)
0e32b39c 4557{
010b9b39 4558 u8 mstm_cap;
0e32b39c 4559
0e32b39c
DA
4560 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4561 return false;
4562
010b9b39 4563 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
c4e3170a 4564 return false;
0e32b39c 4565
010b9b39 4566 return mstm_cap & DP_MST_CAP;
c4e3170a
VS
4567}
4568
9dbf5a4e
VS
4569static bool
4570intel_dp_can_mst(struct intel_dp *intel_dp)
4571{
4572 return i915_modparams.enable_dp_mst &&
4573 intel_dp->can_mst &&
4574 intel_dp_sink_can_mst(intel_dp);
4575}
4576
c4e3170a
VS
4577static void
4578intel_dp_configure_mst(struct intel_dp *intel_dp)
4579{
9dbf5a4e
VS
4580 struct intel_encoder *encoder =
4581 &dp_to_dig_port(intel_dp)->base;
4582 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4583
7acf6c94 4584 DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
66a990dd
VS
4585 encoder->base.base.id, encoder->base.name,
4586 yesno(intel_dp->can_mst), yesno(sink_can_mst),
4587 yesno(i915_modparams.enable_dp_mst));
c4e3170a
VS
4588
4589 if (!intel_dp->can_mst)
4590 return;
4591
9dbf5a4e
VS
4592 intel_dp->is_mst = sink_can_mst &&
4593 i915_modparams.enable_dp_mst;
c4e3170a
VS
4594
4595 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4596 intel_dp->is_mst);
0e32b39c
DA
4597}
4598
0e32b39c
DA
4599static bool
4600intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4601{
e8b2577c
PD
4602 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4603 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4604 DP_DPRX_ESI_LEN;
0e32b39c
DA
4605}
4606
0c06fa15
GM
4607bool
4608intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4609 const struct drm_connector_state *conn_state)
4610{
4611 /*
4612 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4613 * of Color Encoding Format and Content Color Gamut], in order to
4614 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4615 */
4616 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4617 return true;
4618
4619 switch (conn_state->colorspace) {
4620 case DRM_MODE_COLORIMETRY_SYCC_601:
4621 case DRM_MODE_COLORIMETRY_OPYCC_601:
4622 case DRM_MODE_COLORIMETRY_BT2020_YCC:
4623 case DRM_MODE_COLORIMETRY_BT2020_RGB:
4624 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4625 return true;
4626 default:
4627 break;
4628 }
4629
4630 return false;
4631}
4632
3c053a96 4633static void
bb71fb00
GM
4634intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
4635 const struct intel_crtc_state *crtc_state,
4636 const struct drm_connector_state *conn_state)
3c053a96
GM
4637{
4638 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4639 struct dp_sdp vsc_sdp = {};
4640
4641 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4642 vsc_sdp.sdp_header.HB0 = 0;
4643 vsc_sdp.sdp_header.HB1 = 0x7;
4644
4645 /*
4646 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4647 * Colorimetry Format indication.
4648 */
4649 vsc_sdp.sdp_header.HB2 = 0x5;
4650
4651 /*
4652 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4653 * Colorimetry Format indication (HB2 = 05h).
4654 */
4655 vsc_sdp.sdp_header.HB3 = 0x13;
4656
bb71fb00
GM
4657 /* DP 1.4a spec, Table 2-120 */
4658 switch (crtc_state->output_format) {
4659 case INTEL_OUTPUT_FORMAT_YCBCR444:
4660 vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
4661 break;
4662 case INTEL_OUTPUT_FORMAT_YCBCR420:
4663 vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
4664 break;
4665 case INTEL_OUTPUT_FORMAT_RGB:
4666 default:
4667 /* RGB: DB16[7:4] = 0h */
4668 break;
4669 }
4670
4671 switch (conn_state->colorspace) {
4672 case DRM_MODE_COLORIMETRY_BT709_YCC:
4673 vsc_sdp.db[16] |= 0x1;
4674 break;
4675 case DRM_MODE_COLORIMETRY_XVYCC_601:
4676 vsc_sdp.db[16] |= 0x2;
4677 break;
4678 case DRM_MODE_COLORIMETRY_XVYCC_709:
4679 vsc_sdp.db[16] |= 0x3;
4680 break;
4681 case DRM_MODE_COLORIMETRY_SYCC_601:
4682 vsc_sdp.db[16] |= 0x4;
4683 break;
4684 case DRM_MODE_COLORIMETRY_OPYCC_601:
4685 vsc_sdp.db[16] |= 0x5;
4686 break;
4687 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4688 case DRM_MODE_COLORIMETRY_BT2020_RGB:
4689 vsc_sdp.db[16] |= 0x6;
4690 break;
4691 case DRM_MODE_COLORIMETRY_BT2020_YCC:
4692 vsc_sdp.db[16] |= 0x7;
4693 break;
4694 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
4695 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
4696 vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
4697 break;
4698 default:
4699 /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
4700
4701 /* RGB->YCBCR color conversion uses the BT.709 color space. */
4702 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4703 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4704 break;
4705 }
3c053a96
GM
4706
4707 /*
4708 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4709 * the following Component Bit Depth values are defined:
4710 * 001b = 8bpc.
4711 * 010b = 10bpc.
4712 * 011b = 12bpc.
4713 * 100b = 16bpc.
4714 */
4715 switch (crtc_state->pipe_bpp) {
4716 case 24: /* 8bpc */
4717 vsc_sdp.db[17] = 0x1;
4718 break;
4719 case 30: /* 10bpc */
4720 vsc_sdp.db[17] = 0x2;
4721 break;
4722 case 36: /* 12bpc */
4723 vsc_sdp.db[17] = 0x3;
4724 break;
4725 case 48: /* 16bpc */
4726 vsc_sdp.db[17] = 0x4;
4727 break;
4728 default:
4729 MISSING_CASE(crtc_state->pipe_bpp);
4730 break;
4731 }
4732
4733 /*
4734 * Dynamic Range (Bit 7)
4735 * 0 = VESA range, 1 = CTA range.
4736 * all YCbCr are always limited range
4737 */
4738 vsc_sdp.db[17] |= 0x80;
4739
4740 /*
4741 * Content Type (Bits 2:0)
4742 * 000b = Not defined.
4743 * 001b = Graphics.
4744 * 010b = Photo.
4745 * 011b = Video.
4746 * 100b = Game
4747 * All other values are RESERVED.
4748 * Note: See CTA-861-G for the definition and expected
4749 * processing by a stream sink for the above contect types.
4750 */
4751 vsc_sdp.db[18] = 0;
4752
4753 intel_dig_port->write_infoframe(&intel_dig_port->base,
4754 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4755}
4756
b246cf21
GM
4757static void
4758intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
4759 const struct intel_crtc_state *crtc_state,
4760 const struct drm_connector_state *conn_state)
4761{
4762 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4763 struct dp_sdp infoframe_sdp = {};
4764 struct hdmi_drm_infoframe drm_infoframe = {};
4765 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4766 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4767 ssize_t len;
4768 int ret;
4769
4770 ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
4771 if (ret) {
4772 DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
4773 return;
4774 }
4775
4776 len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
4777 if (len < 0) {
4778 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
4779 return;
4780 }
4781
4782 if (len != infoframe_size) {
4783 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
4784 return;
4785 }
4786
4787 /*
4788 * Set up the infoframe sdp packet for HDR static metadata.
4789 * Prepare VSC Header for SU as per DP 1.4a spec,
4790 * Table 2-100 and Table 2-101
4791 */
4792
4793 /* Packet ID, 00h for non-Audio INFOFRAME */
4794 infoframe_sdp.sdp_header.HB0 = 0;
4795 /*
4796 * Packet Type 80h + Non-audio INFOFRAME Type value
4797 * HDMI_INFOFRAME_TYPE_DRM: 0x87,
4798 */
4799 infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
4800 /*
4801 * Least Significant Eight Bits of (Data Byte Count – 1)
4802 * infoframe_size - 1,
4803 */
4804 infoframe_sdp.sdp_header.HB2 = 0x1D;
4805 /* INFOFRAME SDP Version Number */
4806 infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
4807 /* CTA Header Byte 2 (INFOFRAME Version Number) */
4808 infoframe_sdp.db[0] = drm_infoframe.version;
4809 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4810 infoframe_sdp.db[1] = drm_infoframe.length;
4811 /*
4812 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4813 * HDMI_INFOFRAME_HEADER_SIZE
4814 */
4815 BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4816 memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4817 HDMI_DRM_INFOFRAME_SIZE);
4818
4819 /*
4820 * Size of DP infoframe sdp packet for HDR static metadata is consist of
4821 * - DP SDP Header(struct dp_sdp_header): 4 bytes
4822 * - Two Data Blocks: 2 bytes
4823 * CTA Header Byte2 (INFOFRAME Version Number)
4824 * CTA Header Byte3 (Length of INFOFRAME)
4825 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4826 *
4827 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4828 * infoframe size. But GEN11+ has larger than that size, write_infoframe
4829 * will pad rest of the size.
4830 */
4831 intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
4832 HDMI_PACKET_TYPE_GAMUT_METADATA,
4833 &infoframe_sdp,
4834 sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
4835}
4836
bb71fb00
GM
4837void intel_dp_vsc_enable(struct intel_dp *intel_dp,
4838 const struct intel_crtc_state *crtc_state,
4839 const struct drm_connector_state *conn_state)
3c053a96 4840{
0c06fa15 4841 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
3c053a96
GM
4842 return;
4843
bb71fb00 4844 intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
3c053a96
GM
4845}
4846
b246cf21
GM
4847void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
4848 const struct intel_crtc_state *crtc_state,
4849 const struct drm_connector_state *conn_state)
4850{
4851 if (!conn_state->hdr_output_metadata)
4852 return;
4853
4854 intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
4855 crtc_state,
4856 conn_state);
4857}
4858
830de422 4859static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
c5d5ab7a 4860{
da15f7cb 4861 int status = 0;
140ef138 4862 int test_link_rate;
830de422 4863 u8 test_lane_count, test_link_bw;
da15f7cb
MN
4864 /* (DP CTS 1.2)
4865 * 4.3.1.11
4866 */
4867 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4868 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4869 &test_lane_count);
4870
4871 if (status <= 0) {
4872 DRM_DEBUG_KMS("Lane count read failed\n");
4873 return DP_TEST_NAK;
4874 }
4875 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
da15f7cb
MN
4876
4877 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4878 &test_link_bw);
4879 if (status <= 0) {
4880 DRM_DEBUG_KMS("Link Rate read failed\n");
4881 return DP_TEST_NAK;
4882 }
da15f7cb 4883 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
140ef138
MN
4884
4885 /* Validate the requested link rate and lane count */
4886 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4887 test_lane_count))
da15f7cb
MN
4888 return DP_TEST_NAK;
4889
4890 intel_dp->compliance.test_lane_count = test_lane_count;
4891 intel_dp->compliance.test_link_rate = test_link_rate;
4892
4893 return DP_TEST_ACK;
c5d5ab7a
TP
4894}
4895
830de422 4896static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
c5d5ab7a 4897{
830de422
JN
4898 u8 test_pattern;
4899 u8 test_misc;
611032bf
MN
4900 __be16 h_width, v_height;
4901 int status = 0;
4902
4903 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
010b9b39
JN
4904 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4905 &test_pattern);
611032bf
MN
4906 if (status <= 0) {
4907 DRM_DEBUG_KMS("Test pattern read failed\n");
4908 return DP_TEST_NAK;
4909 }
4910 if (test_pattern != DP_COLOR_RAMP)
4911 return DP_TEST_NAK;
4912
4913 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4914 &h_width, 2);
4915 if (status <= 0) {
4916 DRM_DEBUG_KMS("H Width read failed\n");
4917 return DP_TEST_NAK;
4918 }
4919
4920 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4921 &v_height, 2);
4922 if (status <= 0) {
4923 DRM_DEBUG_KMS("V Height read failed\n");
4924 return DP_TEST_NAK;
4925 }
4926
010b9b39
JN
4927 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4928 &test_misc);
611032bf
MN
4929 if (status <= 0) {
4930 DRM_DEBUG_KMS("TEST MISC read failed\n");
4931 return DP_TEST_NAK;
4932 }
4933 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4934 return DP_TEST_NAK;
4935 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4936 return DP_TEST_NAK;
4937 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4938 case DP_TEST_BIT_DEPTH_6:
4939 intel_dp->compliance.test_data.bpc = 6;
4940 break;
4941 case DP_TEST_BIT_DEPTH_8:
4942 intel_dp->compliance.test_data.bpc = 8;
4943 break;
4944 default:
4945 return DP_TEST_NAK;
4946 }
4947
4948 intel_dp->compliance.test_data.video_pattern = test_pattern;
4949 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4950 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4951 /* Set test active flag here so userspace doesn't interrupt things */
dd93cecf 4952 intel_dp->compliance.test_active = true;
611032bf
MN
4953
4954 return DP_TEST_ACK;
c5d5ab7a
TP
4955}
4956
830de422 4957static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4958{
830de422 4959 u8 test_result = DP_TEST_ACK;
559be30c
TP
4960 struct intel_connector *intel_connector = intel_dp->attached_connector;
4961 struct drm_connector *connector = &intel_connector->base;
4962
4963 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4964 connector->edid_corrupt ||
559be30c
TP
4965 intel_dp->aux.i2c_defer_count > 6) {
4966 /* Check EDID read for NACKs, DEFERs and corruption
4967 * (DP CTS 1.2 Core r1.1)
4968 * 4.2.2.4 : Failed EDID read, I2C_NAK
4969 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4970 * 4.2.2.6 : EDID corruption detected
4971 * Use failsafe mode for all cases
4972 */
4973 if (intel_dp->aux.i2c_nack_count > 0 ||
4974 intel_dp->aux.i2c_defer_count > 0)
4975 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4976 intel_dp->aux.i2c_nack_count,
4977 intel_dp->aux.i2c_defer_count);
c1617abc 4978 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
559be30c 4979 } else {
f79b468e
TS
4980 struct edid *block = intel_connector->detect_edid;
4981
4982 /* We have to write the checksum
4983 * of the last block read
4984 */
4985 block += intel_connector->detect_edid->extensions;
4986
010b9b39
JN
4987 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4988 block->checksum) <= 0)
559be30c
TP
4989 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4990
4991 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
b48a5ba9 4992 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
559be30c
TP
4993 }
4994
4995 /* Set test active flag here so userspace doesn't interrupt things */
dd93cecf 4996 intel_dp->compliance.test_active = true;
559be30c 4997
c5d5ab7a
TP
4998 return test_result;
4999}
5000
830de422 5001static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 5002{
830de422 5003 u8 test_result = DP_TEST_NAK;
c5d5ab7a
TP
5004 return test_result;
5005}
5006
5007static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
5008{
830de422
JN
5009 u8 response = DP_TEST_NAK;
5010 u8 request = 0;
5ec63bbd 5011 int status;
c5d5ab7a 5012
5ec63bbd 5013 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
c5d5ab7a
TP
5014 if (status <= 0) {
5015 DRM_DEBUG_KMS("Could not read test request from sink\n");
5016 goto update_status;
5017 }
5018
5ec63bbd 5019 switch (request) {
c5d5ab7a
TP
5020 case DP_TEST_LINK_TRAINING:
5021 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
c5d5ab7a
TP
5022 response = intel_dp_autotest_link_training(intel_dp);
5023 break;
5024 case DP_TEST_LINK_VIDEO_PATTERN:
5025 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
c5d5ab7a
TP
5026 response = intel_dp_autotest_video_pattern(intel_dp);
5027 break;
5028 case DP_TEST_LINK_EDID_READ:
5029 DRM_DEBUG_KMS("EDID test requested\n");
c5d5ab7a
TP
5030 response = intel_dp_autotest_edid(intel_dp);
5031 break;
5032 case DP_TEST_LINK_PHY_TEST_PATTERN:
5033 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
c5d5ab7a
TP
5034 response = intel_dp_autotest_phy_pattern(intel_dp);
5035 break;
5036 default:
5ec63bbd 5037 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
c5d5ab7a
TP
5038 break;
5039 }
5040
5ec63bbd
JN
5041 if (response & DP_TEST_ACK)
5042 intel_dp->compliance.test_type = request;
5043
c5d5ab7a 5044update_status:
5ec63bbd 5045 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
c5d5ab7a
TP
5046 if (status <= 0)
5047 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
5048}
5049
0e32b39c
DA
5050static int
5051intel_dp_check_mst_status(struct intel_dp *intel_dp)
5052{
5053 bool bret;
5054
5055 if (intel_dp->is_mst) {
e8b2577c 5056 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
0e32b39c
DA
5057 int ret = 0;
5058 int retry;
5059 bool handled;
45ef40aa
DP
5060
5061 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
0e32b39c
DA
5062 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
5063go_again:
5064 if (bret == true) {
5065
5066 /* check link status - esi[10] = 0x200c */
45ef40aa 5067 if (intel_dp->active_mst_links > 0 &&
901c2daf 5068 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
5069 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
5070 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
5071 intel_dp_stop_link_train(intel_dp);
5072 }
5073
6f34cc39 5074 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
5075 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
5076
5077 if (handled) {
5078 for (retry = 0; retry < 3; retry++) {
5079 int wret;
5080 wret = drm_dp_dpcd_write(&intel_dp->aux,
5081 DP_SINK_COUNT_ESI+1,
5082 &esi[1], 3);
5083 if (wret == 3) {
5084 break;
5085 }
5086 }
5087
5088 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
5089 if (bret == true) {
6f34cc39 5090 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
5091 goto go_again;
5092 }
5093 } else
5094 ret = 0;
5095
5096 return ret;
5097 } else {
0e32b39c
DA
5098 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
5099 intel_dp->is_mst = false;
6cbb55c0
LP
5100 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5101 intel_dp->is_mst);
0e32b39c
DA
5102 }
5103 }
5104 return -EINVAL;
5105}
5106
c85d200e
VS
5107static bool
5108intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5109{
5110 u8 link_status[DP_LINK_STATUS_SIZE];
5111
edb2e530 5112 if (!intel_dp->link_trained)
2f8e7ea9
JRS
5113 return false;
5114
5115 /*
5116 * While PSR source HW is enabled, it will control main-link sending
5117 * frames, enabling and disabling it so trying to do a retrain will fail
5118 * as the link would or not be on or it could mix training patterns
5119 * and frame data at the same time causing retrain to fail.
5120 * Also when exiting PSR, HW will retrain the link anyways fixing
5121 * any link status error.
5122 */
5123 if (intel_psr_enabled(intel_dp))
edb2e530
VS
5124 return false;
5125
5126 if (!intel_dp_get_link_status(intel_dp, link_status))
c85d200e 5127 return false;
c85d200e
VS
5128
5129 /*
5130 * Validate the cached values of intel_dp->link_rate and
5131 * intel_dp->lane_count before attempting to retrain.
5132 */
5133 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5134 intel_dp->lane_count))
5135 return false;
5136
5137 /* Retrain if Channel EQ or CR not ok */
5138 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
5139}
5140
c85d200e
VS
5141int intel_dp_retrain_link(struct intel_encoder *encoder,
5142 struct drm_modeset_acquire_ctx *ctx)
bfd02b3c 5143{
bfd02b3c 5144 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 5145 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
c85d200e
VS
5146 struct intel_connector *connector = intel_dp->attached_connector;
5147 struct drm_connector_state *conn_state;
5148 struct intel_crtc_state *crtc_state;
5149 struct intel_crtc *crtc;
5150 int ret;
5151
5152 /* FIXME handle the MST connectors as well */
5153
5154 if (!connector || connector->base.status != connector_status_connected)
5155 return 0;
5156
5157 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5158 ctx);
5159 if (ret)
5160 return ret;
5161
5162 conn_state = connector->base.state;
5163
5164 crtc = to_intel_crtc(conn_state->crtc);
5165 if (!crtc)
5166 return 0;
5167
5168 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5169 if (ret)
5170 return ret;
5171
5172 crtc_state = to_intel_crtc_state(crtc->base.state);
5173
eb020ca3 5174 drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
c85d200e 5175
1326a92c 5176 if (!crtc_state->hw.active)
c85d200e
VS
5177 return 0;
5178
5179 if (conn_state->commit &&
5180 !try_wait_for_completion(&conn_state->commit->hw_done))
5181 return 0;
5182
5183 if (!intel_dp_needs_link_retrain(intel_dp))
5184 return 0;
bfd02b3c
VS
5185
5186 /* Suppress underruns caused by re-training */
5187 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
f56f6648 5188 if (crtc_state->has_pch_encoder)
bfd02b3c
VS
5189 intel_set_pch_fifo_underrun_reporting(dev_priv,
5190 intel_crtc_pch_transcoder(crtc), false);
5191
5192 intel_dp_start_link_train(intel_dp);
5193 intel_dp_stop_link_train(intel_dp);
5194
5195 /* Keep underrun reporting disabled until things are stable */
0f0f74bc 5196 intel_wait_for_vblank(dev_priv, crtc->pipe);
bfd02b3c
VS
5197
5198 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
f56f6648 5199 if (crtc_state->has_pch_encoder)
bfd02b3c
VS
5200 intel_set_pch_fifo_underrun_reporting(dev_priv,
5201 intel_crtc_pch_transcoder(crtc), true);
c85d200e
VS
5202
5203 return 0;
bfd02b3c
VS
5204}
5205
c85d200e
VS
5206/*
5207 * If display is now connected check links status,
5208 * there has been known issues of link loss triggering
5209 * long pulse.
5210 *
5211 * Some sinks (eg. ASUS PB287Q) seem to perform some
5212 * weird HPD ping pong during modesets. So we can apparently
5213 * end up with HPD going low during a modeset, and then
5214 * going back up soon after. And once that happens we must
5215 * retrain the link to get a picture. That's in case no
5216 * userspace component reacted to intermittent HPD dip.
5217 */
3944709d
ID
5218static enum intel_hotplug_state
5219intel_dp_hotplug(struct intel_encoder *encoder,
5220 struct intel_connector *connector,
5221 bool irq_received)
5c9114d0 5222{
c85d200e 5223 struct drm_modeset_acquire_ctx ctx;
3944709d 5224 enum intel_hotplug_state state;
c85d200e 5225 int ret;
5c9114d0 5226
3944709d 5227 state = intel_encoder_hotplug(encoder, connector, irq_received);
5c9114d0 5228
c85d200e 5229 drm_modeset_acquire_init(&ctx, 0);
42e5e657 5230
c85d200e
VS
5231 for (;;) {
5232 ret = intel_dp_retrain_link(encoder, &ctx);
5c9114d0 5233
c85d200e
VS
5234 if (ret == -EDEADLK) {
5235 drm_modeset_backoff(&ctx);
5236 continue;
5237 }
5c9114d0 5238
c85d200e
VS
5239 break;
5240 }
d4cb3fd9 5241
c85d200e
VS
5242 drm_modeset_drop_locks(&ctx);
5243 drm_modeset_acquire_fini(&ctx);
3a47ae20
PB
5244 drm_WARN(encoder->base.dev, ret,
5245 "Acquiring modeset locks failed with %i\n", ret);
bfd02b3c 5246
bb80c925
JRS
5247 /*
5248 * Keeping it consistent with intel_ddi_hotplug() and
5249 * intel_hdmi_hotplug().
5250 */
5251 if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
5252 state = INTEL_HOTPLUG_RETRY;
5253
3944709d 5254 return state;
5c9114d0
SS
5255}
5256
9844bc87
DP
5257static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
5258{
5259 u8 val;
5260
5261 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5262 return;
5263
5264 if (drm_dp_dpcd_readb(&intel_dp->aux,
5265 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5266 return;
5267
5268 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5269
5270 if (val & DP_AUTOMATED_TEST_REQUEST)
5271 intel_dp_handle_test_request(intel_dp);
5272
342ac601 5273 if (val & DP_CP_IRQ)
09d56393 5274 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
342ac601
R
5275
5276 if (val & DP_SINK_SPECIFIC_IRQ)
5277 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
9844bc87
DP
5278}
5279
a4fc5ed6
KP
5280/*
5281 * According to DP spec
5282 * 5.1.2:
5283 * 1. Read DPCD
5284 * 2. Configure link according to Receiver Capabilities
5285 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
5286 * 4. Check link status on receipt of hot-plug interrupt
39ff747b
SS
5287 *
5288 * intel_dp_short_pulse - handles short pulse interrupts
5289 * when full detection is not required.
5290 * Returns %true if short pulse is handled and full detection
5291 * is NOT required and %false otherwise.
a4fc5ed6 5292 */
39ff747b 5293static bool
5c9114d0 5294intel_dp_short_pulse(struct intel_dp *intel_dp)
a4fc5ed6 5295{
de25eb7f 5296 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
39ff747b
SS
5297 u8 old_sink_count = intel_dp->sink_count;
5298 bool ret;
5b215bcf 5299
4df6960e
SS
5300 /*
5301 * Clearing compliance test variables to allow capturing
5302 * of values for next automated test request.
5303 */
c1617abc 5304 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4df6960e 5305
39ff747b
SS
5306 /*
5307 * Now read the DPCD to see if it's actually running
5308 * If the current value of sink count doesn't match with
5309 * the value that was stored earlier or dpcd read failed
5310 * we need to do full detection
5311 */
5312 ret = intel_dp_get_dpcd(intel_dp);
5313
5314 if ((old_sink_count != intel_dp->sink_count) || !ret) {
5315 /* No need to proceed if we are going to do full detect */
5316 return false;
59cd09e1
JB
5317 }
5318
9844bc87 5319 intel_dp_check_service_irq(intel_dp);
a60f0e38 5320
82e00d11
HV
5321 /* Handle CEC interrupts, if any */
5322 drm_dp_cec_irq(&intel_dp->aux);
5323
c85d200e
VS
5324 /* defer to the hotplug work for link retraining if needed */
5325 if (intel_dp_needs_link_retrain(intel_dp))
5326 return false;
42e5e657 5327
cc3054ff
JRS
5328 intel_psr_short_pulse(intel_dp);
5329
da15f7cb 5330 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
bdc6114e
WK
5331 drm_dbg_kms(&dev_priv->drm,
5332 "Link Training Compliance Test requested\n");
da15f7cb 5333 /* Send a Hotplug Uevent to userspace to start modeset */
2f773477 5334 drm_kms_helper_hotplug_event(&dev_priv->drm);
da15f7cb 5335 }
39ff747b
SS
5336
5337 return true;
a4fc5ed6 5338}
a4fc5ed6 5339
caf9ab24 5340/* XXX this is probably wrong for multiple downstream ports */
71ba9000 5341static enum drm_connector_status
26d61aad 5342intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 5343{
e393d0d6 5344 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
830de422
JN
5345 u8 *dpcd = intel_dp->dpcd;
5346 u8 type;
caf9ab24 5347
ad5125d6
ID
5348 if (WARN_ON(intel_dp_is_edp(intel_dp)))
5349 return connector_status_connected;
5350
e393d0d6
ID
5351 if (lspcon->active)
5352 lspcon_resume(lspcon);
5353
caf9ab24
AJ
5354 if (!intel_dp_get_dpcd(intel_dp))
5355 return connector_status_disconnected;
5356
5357 /* if there's no downstream port, we're done */
c726ad01 5358 if (!drm_dp_is_branch(dpcd))
26d61aad 5359 return connector_status_connected;
caf9ab24
AJ
5360
5361 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
5362 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5363 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
9d1a1031 5364
30d9aa42
SS
5365 return intel_dp->sink_count ?
5366 connector_status_connected : connector_status_disconnected;
caf9ab24
AJ
5367 }
5368
c4e3170a
VS
5369 if (intel_dp_can_mst(intel_dp))
5370 return connector_status_connected;
5371
caf9ab24 5372 /* If no HPD, poke DDC gently */
0b99836f 5373 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 5374 return connector_status_connected;
caf9ab24
AJ
5375
5376 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
5377 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5378 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5379 if (type == DP_DS_PORT_TYPE_VGA ||
5380 type == DP_DS_PORT_TYPE_NON_EDID)
5381 return connector_status_unknown;
5382 } else {
5383 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5384 DP_DWN_STRM_PORT_TYPE_MASK;
5385 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5386 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5387 return connector_status_unknown;
5388 }
caf9ab24
AJ
5389
5390 /* Anything else is out of spec, warn and ignore */
5391 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 5392 return connector_status_disconnected;
71ba9000
AJ
5393}
5394
d410b56d
CW
5395static enum drm_connector_status
5396edp_detect(struct intel_dp *intel_dp)
5397{
b93b41af 5398 return connector_status_connected;
d410b56d
CW
5399}
5400
7533eb4f 5401static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5eb08b69 5402{
7533eb4f 5403 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b93433cc 5404 u32 bit;
01cb9ea6 5405
7533eb4f
RV
5406 switch (encoder->hpd_pin) {
5407 case HPD_PORT_B:
0df53b77
JN
5408 bit = SDE_PORTB_HOTPLUG;
5409 break;
7533eb4f 5410 case HPD_PORT_C:
0df53b77
JN
5411 bit = SDE_PORTC_HOTPLUG;
5412 break;
7533eb4f 5413 case HPD_PORT_D:
0df53b77
JN
5414 bit = SDE_PORTD_HOTPLUG;
5415 break;
5416 default:
7533eb4f 5417 MISSING_CASE(encoder->hpd_pin);
0df53b77
JN
5418 return false;
5419 }
5420
b4e33881 5421 return intel_de_read(dev_priv, SDEISR) & bit;
0df53b77
JN
5422}
5423
7533eb4f 5424static bool cpt_digital_port_connected(struct intel_encoder *encoder)
0df53b77 5425{
7533eb4f 5426 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
0df53b77
JN
5427 u32 bit;
5428
7533eb4f
RV
5429 switch (encoder->hpd_pin) {
5430 case HPD_PORT_B:
0df53b77
JN
5431 bit = SDE_PORTB_HOTPLUG_CPT;
5432 break;
7533eb4f 5433 case HPD_PORT_C:
0df53b77
JN
5434 bit = SDE_PORTC_HOTPLUG_CPT;
5435 break;
7533eb4f 5436 case HPD_PORT_D:
0df53b77
JN
5437 bit = SDE_PORTD_HOTPLUG_CPT;
5438 break;
93e5f0b6 5439 default:
7533eb4f 5440 MISSING_CASE(encoder->hpd_pin);
93e5f0b6
VS
5441 return false;
5442 }
5443
b4e33881 5444 return intel_de_read(dev_priv, SDEISR) & bit;
93e5f0b6
VS
5445}
5446
7533eb4f 5447static bool spt_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5448{
7533eb4f 5449 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
93e5f0b6
VS
5450 u32 bit;
5451
7533eb4f
RV
5452 switch (encoder->hpd_pin) {
5453 case HPD_PORT_A:
93e5f0b6
VS
5454 bit = SDE_PORTA_HOTPLUG_SPT;
5455 break;
7533eb4f 5456 case HPD_PORT_E:
a78695d3
JN
5457 bit = SDE_PORTE_HOTPLUG_SPT;
5458 break;
0df53b77 5459 default:
7533eb4f 5460 return cpt_digital_port_connected(encoder);
b93433cc 5461 }
1b469639 5462
b4e33881 5463 return intel_de_read(dev_priv, SDEISR) & bit;
5eb08b69
ZW
5464}
5465
7533eb4f 5466static bool g4x_digital_port_connected(struct intel_encoder *encoder)
a4fc5ed6 5467{
7533eb4f 5468 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c 5469 u32 bit;
5eb08b69 5470
7533eb4f
RV
5471 switch (encoder->hpd_pin) {
5472 case HPD_PORT_B:
9642c81c
JN
5473 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5474 break;
7533eb4f 5475 case HPD_PORT_C:
9642c81c
JN
5476 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5477 break;
7533eb4f 5478 case HPD_PORT_D:
9642c81c
JN
5479 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5480 break;
5481 default:
7533eb4f 5482 MISSING_CASE(encoder->hpd_pin);
9642c81c
JN
5483 return false;
5484 }
5485
b4e33881 5486 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
9642c81c
JN
5487}
5488
7533eb4f 5489static bool gm45_digital_port_connected(struct intel_encoder *encoder)
9642c81c 5490{
7533eb4f 5491 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c
JN
5492 u32 bit;
5493
7533eb4f
RV
5494 switch (encoder->hpd_pin) {
5495 case HPD_PORT_B:
0780cd36 5496 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 5497 break;
7533eb4f 5498 case HPD_PORT_C:
0780cd36 5499 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 5500 break;
7533eb4f 5501 case HPD_PORT_D:
0780cd36 5502 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
5503 break;
5504 default:
7533eb4f 5505 MISSING_CASE(encoder->hpd_pin);
9642c81c 5506 return false;
a4fc5ed6
KP
5507 }
5508
b4e33881 5509 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
5510}
5511
7533eb4f 5512static bool ilk_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5513{
7533eb4f
RV
5514 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5515
5516 if (encoder->hpd_pin == HPD_PORT_A)
b4e33881 5517 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
93e5f0b6 5518 else
7533eb4f 5519 return ibx_digital_port_connected(encoder);
93e5f0b6
VS
5520}
5521
7533eb4f 5522static bool snb_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5523{
7533eb4f
RV
5524 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5525
5526 if (encoder->hpd_pin == HPD_PORT_A)
b4e33881 5527 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
93e5f0b6 5528 else
7533eb4f 5529 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5530}
5531
7533eb4f 5532static bool ivb_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5533{
7533eb4f
RV
5534 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5535
5536 if (encoder->hpd_pin == HPD_PORT_A)
b4e33881 5537 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
93e5f0b6 5538 else
7533eb4f 5539 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5540}
5541
7533eb4f 5542static bool bdw_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5543{
7533eb4f
RV
5544 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5545
5546 if (encoder->hpd_pin == HPD_PORT_A)
b4e33881 5547 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
93e5f0b6 5548 else
7533eb4f 5549 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5550}
5551
7533eb4f 5552static bool bxt_digital_port_connected(struct intel_encoder *encoder)
e464bfde 5553{
7533eb4f 5554 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
e464bfde
JN
5555 u32 bit;
5556
7533eb4f
RV
5557 switch (encoder->hpd_pin) {
5558 case HPD_PORT_A:
e464bfde
JN
5559 bit = BXT_DE_PORT_HP_DDIA;
5560 break;
7533eb4f 5561 case HPD_PORT_B:
e464bfde
JN
5562 bit = BXT_DE_PORT_HP_DDIB;
5563 break;
7533eb4f 5564 case HPD_PORT_C:
e464bfde
JN
5565 bit = BXT_DE_PORT_HP_DDIC;
5566 break;
5567 default:
7533eb4f 5568 MISSING_CASE(encoder->hpd_pin);
e464bfde
JN
5569 return false;
5570 }
5571
b4e33881 5572 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
e464bfde
JN
5573}
5574
3d1e388d
MR
5575static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
5576 enum phy phy)
b9fcddab 5577{
3d1e388d 5578 if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
b4e33881 5579 return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
53448aed 5580
b4e33881 5581 return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
b9fcddab
PZ
5582}
5583
9695cde6 5584static bool icp_digital_port_connected(struct intel_encoder *encoder)
b9fcddab
PZ
5585{
5586 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 5587 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
d8fe2ab6 5588 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
b9fcddab 5589
d8fe2ab6 5590 if (intel_phy_is_combo(dev_priv, phy))
3d1e388d 5591 return intel_combo_phy_connected(dev_priv, phy);
d8fe2ab6 5592 else if (intel_phy_is_tc(dev_priv, phy))
bc85328f 5593 return intel_tc_port_connected(dig_port);
c0aa8344 5594 else
b9fcddab 5595 MISSING_CASE(encoder->hpd_pin);
c0aa8344
MK
5596
5597 return false;
b9fcddab
PZ
5598}
5599
7e66bcf2
JN
5600/*
5601 * intel_digital_port_connected - is the specified port connected?
7533eb4f 5602 * @encoder: intel_encoder
7e66bcf2 5603 *
39d1e234
PZ
5604 * In cases where there's a connector physically connected but it can't be used
5605 * by our hardware we also return false, since the rest of the driver should
5606 * pretty much treat the port as disconnected. This is relevant for type-C
5607 * (starting on ICL) where there's ownership involved.
5608 *
7533eb4f 5609 * Return %true if port is connected, %false otherwise.
7e66bcf2 5610 */
6cfe7ec0 5611static bool __intel_digital_port_connected(struct intel_encoder *encoder)
7e66bcf2 5612{
7533eb4f
RV
5613 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5614
b2ae318a 5615 if (HAS_GMCH(dev_priv)) {
93e5f0b6 5616 if (IS_GM45(dev_priv))
7533eb4f 5617 return gm45_digital_port_connected(encoder);
93e5f0b6 5618 else
7533eb4f 5619 return g4x_digital_port_connected(encoder);
93e5f0b6
VS
5620 }
5621
9695cde6
MR
5622 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
5623 return icp_digital_port_connected(encoder);
5624 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
210126bd 5625 return spt_digital_port_connected(encoder);
cc3f90f0 5626 else if (IS_GEN9_LP(dev_priv))
7533eb4f 5627 return bxt_digital_port_connected(encoder);
cf819eff 5628 else if (IS_GEN(dev_priv, 8))
210126bd 5629 return bdw_digital_port_connected(encoder);
cf819eff 5630 else if (IS_GEN(dev_priv, 7))
210126bd 5631 return ivb_digital_port_connected(encoder);
cf819eff 5632 else if (IS_GEN(dev_priv, 6))
210126bd 5633 return snb_digital_port_connected(encoder);
cf819eff 5634 else if (IS_GEN(dev_priv, 5))
210126bd
RV
5635 return ilk_digital_port_connected(encoder);
5636
5637 MISSING_CASE(INTEL_GEN(dev_priv));
5638 return false;
7e66bcf2
JN
5639}
5640
6cfe7ec0
ID
5641bool intel_digital_port_connected(struct intel_encoder *encoder)
5642{
5643 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
96ac0813 5644 bool is_connected = false;
6cfe7ec0 5645 intel_wakeref_t wakeref;
6cfe7ec0
ID
5646
5647 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5648 is_connected = __intel_digital_port_connected(encoder);
5649
5650 return is_connected;
5651}
5652
8c241fef 5653static struct edid *
beb60608 5654intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 5655{
beb60608 5656 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 5657
9cd300e0
JN
5658 /* use cached edid if we have one */
5659 if (intel_connector->edid) {
9cd300e0
JN
5660 /* invalid edid */
5661 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
5662 return NULL;
5663
55e9edeb 5664 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
5665 } else
5666 return drm_get_edid(&intel_connector->base,
5667 &intel_dp->aux.ddc);
5668}
8c241fef 5669
beb60608
CW
5670static void
5671intel_dp_set_edid(struct intel_dp *intel_dp)
5672{
5673 struct intel_connector *intel_connector = intel_dp->attached_connector;
5674 struct edid *edid;
8c241fef 5675
f21a2198 5676 intel_dp_unset_edid(intel_dp);
beb60608
CW
5677 edid = intel_dp_get_edid(intel_dp);
5678 intel_connector->detect_edid = edid;
5679
e6b72c94 5680 intel_dp->has_audio = drm_detect_monitor_audio(edid);
82e00d11 5681 drm_dp_cec_set_edid(&intel_dp->aux, edid);
0883ce81 5682 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
8c241fef
KP
5683}
5684
beb60608
CW
5685static void
5686intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 5687{
beb60608 5688 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 5689
82e00d11 5690 drm_dp_cec_unset_edid(&intel_dp->aux);
beb60608
CW
5691 kfree(intel_connector->detect_edid);
5692 intel_connector->detect_edid = NULL;
9cd300e0 5693
beb60608 5694 intel_dp->has_audio = false;
0883ce81 5695 intel_dp->edid_quirks = 0;
beb60608 5696}
d6f24d0f 5697
6c5ed5ae 5698static int
cbfa8ac8
DP
5699intel_dp_detect(struct drm_connector *connector,
5700 struct drm_modeset_acquire_ctx *ctx,
5701 bool force)
a9756bb5 5702{
cbfa8ac8 5703 struct drm_i915_private *dev_priv = to_i915(connector->dev);
43a6d19c 5704 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
337837ac
ID
5705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5706 struct intel_encoder *encoder = &dig_port->base;
a9756bb5 5707 enum drm_connector_status status;
a9756bb5 5708
bdc6114e
WK
5709 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5710 connector->base.id, connector->name);
eb020ca3
PB
5711 drm_WARN_ON(&dev_priv->drm,
5712 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6c5ed5ae 5713
b93b41af 5714 /* Can't disconnect eDP */
1853a9da 5715 if (intel_dp_is_edp(intel_dp))
d410b56d 5716 status = edp_detect(intel_dp);
d5acd97f 5717 else if (intel_digital_port_connected(encoder))
c555a81d 5718 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 5719 else
c555a81d
ACO
5720 status = connector_status_disconnected;
5721
5cb651a7 5722 if (status == connector_status_disconnected) {
c1617abc 5723 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
93ac092f 5724 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4df6960e 5725
0e505a08 5726 if (intel_dp->is_mst) {
bdc6114e
WK
5727 drm_dbg_kms(&dev_priv->drm,
5728 "MST device may have disappeared %d vs %d\n",
5729 intel_dp->is_mst,
5730 intel_dp->mst_mgr.mst_state);
0e505a08 5731 intel_dp->is_mst = false;
5732 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5733 intel_dp->is_mst);
5734 }
5735
c8c8fb33 5736 goto out;
4df6960e 5737 }
a9756bb5 5738
d7e8ef02 5739 if (intel_dp->reset_link_params) {
540b0b7f
JN
5740 /* Initial max link lane count */
5741 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
f482984a 5742
540b0b7f
JN
5743 /* Initial max link rate */
5744 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
d7e8ef02
MN
5745
5746 intel_dp->reset_link_params = false;
5747 }
f482984a 5748
fe5a66f9
VS
5749 intel_dp_print_rates(intel_dp);
5750
93ac092f
MN
5751 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5752 if (INTEL_GEN(dev_priv) >= 11)
5753 intel_dp_get_dsc_sink_cap(intel_dp);
5754
c4e3170a
VS
5755 intel_dp_configure_mst(intel_dp);
5756
5757 if (intel_dp->is_mst) {
f21a2198
SS
5758 /*
5759 * If we are in MST mode then this connector
5760 * won't appear connected or have anything
5761 * with EDID on it
5762 */
0e32b39c
DA
5763 status = connector_status_disconnected;
5764 goto out;
f24f6eb9
DP
5765 }
5766
5767 /*
5768 * Some external monitors do not signal loss of link synchronization
5769 * with an IRQ_HPD, so force a link status check.
5770 */
47658556
DP
5771 if (!intel_dp_is_edp(intel_dp)) {
5772 int ret;
5773
5774 ret = intel_dp_retrain_link(encoder, ctx);
6cfe7ec0 5775 if (ret)
47658556 5776 return ret;
47658556 5777 }
0e32b39c 5778
4df6960e
SS
5779 /*
5780 * Clearing NACK and defer counts to get their exact values
5781 * while reading EDID which are required by Compliance tests
5782 * 4.2.2.4 and 4.2.2.5
5783 */
5784 intel_dp->aux.i2c_nack_count = 0;
5785 intel_dp->aux.i2c_defer_count = 0;
5786
beb60608 5787 intel_dp_set_edid(intel_dp);
cbfa8ac8
DP
5788 if (intel_dp_is_edp(intel_dp) ||
5789 to_intel_connector(connector)->detect_edid)
5cb651a7 5790 status = connector_status_connected;
c8c8fb33 5791
9844bc87 5792 intel_dp_check_service_irq(intel_dp);
09b1eb13 5793
c8c8fb33 5794out:
5cb651a7 5795 if (status != connector_status_connected && !intel_dp->is_mst)
f21a2198 5796 intel_dp_unset_edid(intel_dp);
7d23e3c3 5797
a8ddac7c
ID
5798 /*
5799 * Make sure the refs for power wells enabled during detect are
5800 * dropped to avoid a new detect cycle triggered by HPD polling.
5801 */
5802 intel_display_power_flush_work(dev_priv);
5803
5cb651a7 5804 return status;
f21a2198
SS
5805}
5806
beb60608
CW
5807static void
5808intel_dp_force(struct drm_connector *connector)
a4fc5ed6 5809{
43a6d19c 5810 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
337837ac
ID
5811 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5812 struct intel_encoder *intel_encoder = &dig_port->base;
25f78f58 5813 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
337837ac
ID
5814 enum intel_display_power_domain aux_domain =
5815 intel_aux_power_domain(dig_port);
0e6e0be4 5816 intel_wakeref_t wakeref;
a4fc5ed6 5817
bdc6114e
WK
5818 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5819 connector->base.id, connector->name);
beb60608 5820 intel_dp_unset_edid(intel_dp);
a4fc5ed6 5821
beb60608
CW
5822 if (connector->status != connector_status_connected)
5823 return;
671dedd2 5824
0e6e0be4 5825 wakeref = intel_display_power_get(dev_priv, aux_domain);
beb60608
CW
5826
5827 intel_dp_set_edid(intel_dp);
5828
0e6e0be4 5829 intel_display_power_put(dev_priv, aux_domain, wakeref);
beb60608
CW
5830}
5831
5832static int intel_dp_get_modes(struct drm_connector *connector)
5833{
5834 struct intel_connector *intel_connector = to_intel_connector(connector);
5835 struct edid *edid;
5836
5837 edid = intel_connector->detect_edid;
5838 if (edid) {
5839 int ret = intel_connector_update_modes(connector, edid);
5840 if (ret)
5841 return ret;
5842 }
32f9d658 5843
f8779fda 5844 /* if eDP has no EDID, fall back to fixed mode */
43a6d19c 5845 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
beb60608 5846 intel_connector->panel.fixed_mode) {
f8779fda 5847 struct drm_display_mode *mode;
beb60608
CW
5848
5849 mode = drm_mode_duplicate(connector->dev,
dd06f90e 5850 intel_connector->panel.fixed_mode);
f8779fda 5851 if (mode) {
32f9d658
ZW
5852 drm_mode_probed_add(connector, mode);
5853 return 1;
5854 }
5855 }
beb60608 5856
32f9d658 5857 return 0;
a4fc5ed6
KP
5858}
5859
7a418e34
CW
5860static int
5861intel_dp_connector_register(struct drm_connector *connector)
5862{
43a6d19c 5863 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
1ebaa0b9
CW
5864 int ret;
5865
5866 ret = intel_connector_register(connector);
5867 if (ret)
5868 return ret;
7a418e34 5869
926b005c 5870 intel_connector_debugfs_add(connector);
7a418e34
CW
5871
5872 DRM_DEBUG_KMS("registering %s bus for %s\n",
5873 intel_dp->aux.name, connector->kdev->kobj.name);
5874
5875 intel_dp->aux.dev = connector->kdev;
82e00d11
HV
5876 ret = drm_dp_aux_register(&intel_dp->aux);
5877 if (!ret)
ae85b0df 5878 drm_dp_cec_register_connector(&intel_dp->aux, connector);
82e00d11 5879 return ret;
7a418e34
CW
5880}
5881
c191eca1
CW
5882static void
5883intel_dp_connector_unregister(struct drm_connector *connector)
5884{
43a6d19c 5885 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
82e00d11
HV
5886
5887 drm_dp_cec_unregister_connector(&intel_dp->aux);
5888 drm_dp_aux_unregister(&intel_dp->aux);
c191eca1
CW
5889 intel_connector_unregister(connector);
5890}
5891
f6bff60e 5892void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
24d05927 5893{
b7d02c3a 5894 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
da63a9f2 5895 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 5896
0e32b39c 5897 intel_dp_mst_encoder_cleanup(intel_dig_port);
1853a9da 5898 if (intel_dp_is_edp(intel_dp)) {
69d93820
CW
5899 intel_wakeref_t wakeref;
5900
bd943159 5901 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5902 /*
5903 * vdd might still be enabled do to the delayed vdd off.
5904 * Make sure vdd is actually turned off here.
5905 */
69d93820
CW
5906 with_pps_lock(intel_dp, wakeref)
5907 edp_panel_vdd_off_sync(intel_dp);
773538e8 5908
01527b31
CT
5909 if (intel_dp->edp_notifier.notifier_call) {
5910 unregister_reboot_notifier(&intel_dp->edp_notifier);
5911 intel_dp->edp_notifier.notifier_call = NULL;
5912 }
bd943159 5913 }
99681886
CW
5914
5915 intel_dp_aux_fini(intel_dp);
f6bff60e
ID
5916}
5917
5918static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5919{
5920 intel_dp_encoder_flush_work(encoder);
99681886 5921
c8bd0e49 5922 drm_encoder_cleanup(encoder);
b7d02c3a 5923 kfree(enc_to_dig_port(to_intel_encoder(encoder)));
24d05927
DV
5924}
5925
bf93ba67 5926void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
07f9cd0b 5927{
b7d02c3a 5928 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
69d93820 5929 intel_wakeref_t wakeref;
07f9cd0b 5930
1853a9da 5931 if (!intel_dp_is_edp(intel_dp))
07f9cd0b
ID
5932 return;
5933
951468f3
VS
5934 /*
5935 * vdd might still be enabled do to the delayed vdd off.
5936 * Make sure vdd is actually turned off here.
5937 */
afa4e53a 5938 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
69d93820
CW
5939 with_pps_lock(intel_dp, wakeref)
5940 edp_panel_vdd_off_sync(intel_dp);
07f9cd0b
ID
5941}
5942
cf9cb35f
R
5943static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5944{
5945 long ret;
5946
5947#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5948 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5949 msecs_to_jiffies(timeout));
5950
5951 if (!ret)
5952 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5953}
5954
20f24d77
SP
5955static
5956int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5957 u8 *an)
5958{
b7d02c3a 5959 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
32078b72
VS
5960 static const struct drm_dp_aux_msg msg = {
5961 .request = DP_AUX_NATIVE_WRITE,
5962 .address = DP_AUX_HDCP_AKSV,
5963 .size = DRM_HDCP_KSV_LEN,
5964 };
830de422 5965 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
20f24d77
SP
5966 ssize_t dpcd_ret;
5967 int ret;
5968
5969 /* Output An first, that's easy */
5970 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5971 an, DRM_HDCP_AN_LEN);
5972 if (dpcd_ret != DRM_HDCP_AN_LEN) {
3aae21fc
R
5973 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5974 dpcd_ret);
20f24d77
SP
5975 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5976 }
5977
5978 /*
5979 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5980 * order to get it on the wire, we need to create the AUX header as if
5981 * we were writing the data, and then tickle the hardware to output the
5982 * data once the header is sent out.
5983 */
32078b72 5984 intel_dp_aux_header(txbuf, &msg);
20f24d77 5985
32078b72 5986 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
8159c796
VS
5987 rxbuf, sizeof(rxbuf),
5988 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
20f24d77 5989 if (ret < 0) {
3aae21fc 5990 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
20f24d77
SP
5991 return ret;
5992 } else if (ret == 0) {
3aae21fc 5993 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
20f24d77
SP
5994 return -EIO;
5995 }
5996
5997 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
4cf74aaf
R
5998 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5999 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
6000 reply);
6001 return -EIO;
6002 }
6003 return 0;
20f24d77
SP
6004}
6005
6006static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
6007 u8 *bksv)
6008{
6009 ssize_t ret;
6010 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
6011 DRM_HDCP_KSV_LEN);
6012 if (ret != DRM_HDCP_KSV_LEN) {
3aae21fc 6013 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
6014 return ret >= 0 ? -EIO : ret;
6015 }
6016 return 0;
6017}
6018
6019static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
6020 u8 *bstatus)
6021{
6022 ssize_t ret;
6023 /*
6024 * For some reason the HDMI and DP HDCP specs call this register
6025 * definition by different names. In the HDMI spec, it's called BSTATUS,
6026 * but in DP it's called BINFO.
6027 */
6028 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
6029 bstatus, DRM_HDCP_BSTATUS_LEN);
6030 if (ret != DRM_HDCP_BSTATUS_LEN) {
3aae21fc 6031 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
6032 return ret >= 0 ? -EIO : ret;
6033 }
6034 return 0;
6035}
6036
6037static
791a98dd
R
6038int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
6039 u8 *bcaps)
20f24d77
SP
6040{
6041 ssize_t ret;
791a98dd 6042
20f24d77 6043 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
791a98dd 6044 bcaps, 1);
20f24d77 6045 if (ret != 1) {
3aae21fc 6046 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
6047 return ret >= 0 ? -EIO : ret;
6048 }
791a98dd
R
6049
6050 return 0;
6051}
6052
6053static
6054int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
6055 bool *repeater_present)
6056{
6057 ssize_t ret;
6058 u8 bcaps;
6059
6060 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6061 if (ret)
6062 return ret;
6063
20f24d77
SP
6064 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
6065 return 0;
6066}
6067
6068static
6069int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
6070 u8 *ri_prime)
6071{
6072 ssize_t ret;
6073 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
6074 ri_prime, DRM_HDCP_RI_LEN);
6075 if (ret != DRM_HDCP_RI_LEN) {
3aae21fc 6076 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
6077 return ret >= 0 ? -EIO : ret;
6078 }
6079 return 0;
6080}
6081
6082static
6083int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
6084 bool *ksv_ready)
6085{
6086 ssize_t ret;
6087 u8 bstatus;
6088 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
6089 &bstatus, 1);
6090 if (ret != 1) {
3aae21fc 6091 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
6092 return ret >= 0 ? -EIO : ret;
6093 }
6094 *ksv_ready = bstatus & DP_BSTATUS_READY;
6095 return 0;
6096}
6097
6098static
6099int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
6100 int num_downstream, u8 *ksv_fifo)
6101{
6102 ssize_t ret;
6103 int i;
6104
6105 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
6106 for (i = 0; i < num_downstream; i += 3) {
6107 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
6108 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6109 DP_AUX_HDCP_KSV_FIFO,
6110 ksv_fifo + i * DRM_HDCP_KSV_LEN,
6111 len);
6112 if (ret != len) {
3aae21fc
R
6113 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
6114 i, ret);
20f24d77
SP
6115 return ret >= 0 ? -EIO : ret;
6116 }
6117 }
6118 return 0;
6119}
6120
6121static
6122int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
6123 int i, u32 *part)
6124{
6125 ssize_t ret;
6126
6127 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
6128 return -EINVAL;
6129
6130 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6131 DP_AUX_HDCP_V_PRIME(i), part,
6132 DRM_HDCP_V_PRIME_PART_LEN);
6133 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
3aae21fc 6134 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
20f24d77
SP
6135 return ret >= 0 ? -EIO : ret;
6136 }
6137 return 0;
6138}
6139
6140static
6141int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
6142 bool enable)
6143{
6144 /* Not used for single stream DisplayPort setups */
6145 return 0;
6146}
6147
6148static
6149bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
6150{
6151 ssize_t ret;
6152 u8 bstatus;
b7fc1a9b 6153
20f24d77
SP
6154 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
6155 &bstatus, 1);
6156 if (ret != 1) {
3aae21fc 6157 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
b7fc1a9b 6158 return false;
20f24d77 6159 }
b7fc1a9b 6160
20f24d77
SP
6161 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
6162}
6163
791a98dd
R
6164static
6165int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
6166 bool *hdcp_capable)
6167{
6168 ssize_t ret;
6169 u8 bcaps;
6170
6171 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6172 if (ret)
6173 return ret;
6174
6175 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
6176 return 0;
6177}
6178
238d3a9e
R
6179struct hdcp2_dp_errata_stream_type {
6180 u8 msg_id;
6181 u8 stream_type;
6182} __packed;
6183
57bf7f43 6184struct hdcp2_dp_msg_data {
238d3a9e
R
6185 u8 msg_id;
6186 u32 offset;
6187 bool msg_detectable;
6188 u32 timeout;
6189 u32 timeout2; /* Added for non_paired situation */
57bf7f43
JN
6190};
6191
e8465e1c 6192static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
57bf7f43
JN
6193 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
6194 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
6195 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
6196 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
6197 false, 0, 0 },
6198 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
6199 false, 0, 0 },
6200 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
6201 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
6202 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
6203 { HDCP_2_2_AKE_SEND_PAIRING_INFO,
6204 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
6205 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
6206 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
6207 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
6208 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
6209 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
6210 0, 0 },
6211 { HDCP_2_2_REP_SEND_RECVID_LIST,
6212 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
6213 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
6214 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
6215 0, 0 },
6216 { HDCP_2_2_REP_STREAM_MANAGE,
6217 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
6218 0, 0 },
6219 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
6220 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
238d3a9e
R
6221/* local define to shovel this through the write_2_2 interface */
6222#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
57bf7f43
JN
6223 { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
6224 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
6225 0, 0 },
6226};
238d3a9e
R
6227
6228static inline
6229int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
6230 u8 *rx_status)
6231{
6232 ssize_t ret;
6233
6234 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6235 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
6236 HDCP_2_2_DP_RXSTATUS_LEN);
6237 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
6238 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6239 return ret >= 0 ? -EIO : ret;
6240 }
6241
6242 return 0;
6243}
6244
6245static
6246int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
6247 u8 msg_id, bool *msg_ready)
6248{
6249 u8 rx_status;
6250 int ret;
6251
6252 *msg_ready = false;
6253 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6254 if (ret < 0)
6255 return ret;
6256
6257 switch (msg_id) {
6258 case HDCP_2_2_AKE_SEND_HPRIME:
6259 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
6260 *msg_ready = true;
6261 break;
6262 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
6263 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
6264 *msg_ready = true;
6265 break;
6266 case HDCP_2_2_REP_SEND_RECVID_LIST:
6267 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6268 *msg_ready = true;
6269 break;
6270 default:
6271 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
6272 return -EINVAL;
6273 }
6274
6275 return 0;
6276}
6277
6278static ssize_t
6279intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
e8465e1c 6280 const struct hdcp2_dp_msg_data *hdcp2_msg_data)
238d3a9e
R
6281{
6282 struct intel_dp *dp = &intel_dig_port->dp;
6283 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6284 u8 msg_id = hdcp2_msg_data->msg_id;
6285 int ret, timeout;
6286 bool msg_ready = false;
6287
6288 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
6289 timeout = hdcp2_msg_data->timeout2;
6290 else
6291 timeout = hdcp2_msg_data->timeout;
6292
6293 /*
6294 * There is no way to detect the CERT, LPRIME and STREAM_READY
6295 * availability. So Wait for timeout and read the msg.
6296 */
6297 if (!hdcp2_msg_data->msg_detectable) {
6298 mdelay(timeout);
6299 ret = 0;
6300 } else {
cf9cb35f
R
6301 /*
6302 * As we want to check the msg availability at timeout, Ignoring
6303 * the timeout at wait for CP_IRQ.
6304 */
6305 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6306 ret = hdcp2_detect_msg_availability(intel_dig_port,
6307 msg_id, &msg_ready);
238d3a9e
R
6308 if (!msg_ready)
6309 ret = -ETIMEDOUT;
6310 }
6311
6312 if (ret)
6313 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6314 hdcp2_msg_data->msg_id, ret, timeout);
6315
6316 return ret;
6317}
6318
e8465e1c 6319static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
238d3a9e
R
6320{
6321 int i;
6322
3be3a877
JN
6323 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
6324 if (hdcp2_dp_msg_data[i].msg_id == msg_id)
6325 return &hdcp2_dp_msg_data[i];
238d3a9e
R
6326
6327 return NULL;
6328}
6329
6330static
6331int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6332 void *buf, size_t size)
6333{
cf9cb35f
R
6334 struct intel_dp *dp = &intel_dig_port->dp;
6335 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
238d3a9e
R
6336 unsigned int offset;
6337 u8 *byte = buf;
6338 ssize_t ret, bytes_to_write, len;
e8465e1c 6339 const struct hdcp2_dp_msg_data *hdcp2_msg_data;
238d3a9e
R
6340
6341 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6342 if (!hdcp2_msg_data)
6343 return -EINVAL;
6344
6345 offset = hdcp2_msg_data->offset;
6346
6347 /* No msg_id in DP HDCP2.2 msgs */
6348 bytes_to_write = size - 1;
6349 byte++;
6350
cf9cb35f
R
6351 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6352
238d3a9e
R
6353 while (bytes_to_write) {
6354 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6355 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6356
6357 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6358 offset, (void *)byte, len);
6359 if (ret < 0)
6360 return ret;
6361
6362 bytes_to_write -= ret;
6363 byte += ret;
6364 offset += ret;
6365 }
6366
6367 return size;
6368}
6369
6370static
6371ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6372{
6373 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6374 u32 dev_cnt;
6375 ssize_t ret;
6376
6377 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6378 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6379 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6380 if (ret != HDCP_2_2_RXINFO_LEN)
6381 return ret >= 0 ? -EIO : ret;
6382
6383 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6384 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6385
6386 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6387 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6388
6389 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6390 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6391 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6392
6393 return ret;
6394}
6395
6396static
6397int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6398 u8 msg_id, void *buf, size_t size)
6399{
6400 unsigned int offset;
6401 u8 *byte = buf;
6402 ssize_t ret, bytes_to_recv, len;
e8465e1c 6403 const struct hdcp2_dp_msg_data *hdcp2_msg_data;
238d3a9e
R
6404
6405 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6406 if (!hdcp2_msg_data)
6407 return -EINVAL;
6408 offset = hdcp2_msg_data->offset;
6409
6410 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6411 if (ret < 0)
6412 return ret;
6413
6414 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6415 ret = get_receiver_id_list_size(intel_dig_port);
6416 if (ret < 0)
6417 return ret;
6418
6419 size = ret;
6420 }
6421 bytes_to_recv = size - 1;
6422
6423 /* DP adaptation msgs has no msg_id */
6424 byte++;
6425
6426 while (bytes_to_recv) {
6427 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6428 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6429
6430 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6431 (void *)byte, len);
6432 if (ret < 0) {
6433 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6434 return ret;
6435 }
6436
6437 bytes_to_recv -= ret;
6438 byte += ret;
6439 offset += ret;
6440 }
6441 byte = buf;
6442 *byte = msg_id;
6443
6444 return size;
6445}
6446
6447static
6448int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6449 bool is_repeater, u8 content_type)
6450{
391615d9 6451 int ret;
238d3a9e
R
6452 struct hdcp2_dp_errata_stream_type stream_type_msg;
6453
6454 if (is_repeater)
6455 return 0;
6456
6457 /*
6458 * Errata for DP: As Stream type is used for encryption, Receiver
6459 * should be communicated with stream type for the decryption of the
6460 * content.
6461 * Repeater will be communicated with stream type as a part of it's
6462 * auth later in time.
6463 */
6464 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6465 stream_type_msg.stream_type = content_type;
6466
391615d9 6467 ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
238d3a9e 6468 sizeof(stream_type_msg));
391615d9
AG
6469
6470 return ret < 0 ? ret : 0;
6471
238d3a9e
R
6472}
6473
6474static
6475int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6476{
6477 u8 rx_status;
6478 int ret;
6479
6480 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6481 if (ret)
6482 return ret;
6483
6484 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6485 ret = HDCP_REAUTH_REQUEST;
6486 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6487 ret = HDCP_LINK_INTEGRITY_FAILURE;
6488 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6489 ret = HDCP_TOPOLOGY_CHANGE;
6490
6491 return ret;
6492}
6493
6494static
6495int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6496 bool *capable)
6497{
6498 u8 rx_caps[3];
6499 int ret;
6500
6501 *capable = false;
6502 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6503 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6504 rx_caps, HDCP_2_2_RXCAPS_LEN);
6505 if (ret != HDCP_2_2_RXCAPS_LEN)
6506 return ret >= 0 ? -EIO : ret;
6507
6508 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6509 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6510 *capable = true;
6511
6512 return 0;
6513}
6514
20f24d77
SP
6515static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6516 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6517 .read_bksv = intel_dp_hdcp_read_bksv,
6518 .read_bstatus = intel_dp_hdcp_read_bstatus,
6519 .repeater_present = intel_dp_hdcp_repeater_present,
6520 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6521 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6522 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6523 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6524 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6525 .check_link = intel_dp_hdcp_check_link,
791a98dd 6526 .hdcp_capable = intel_dp_hdcp_capable,
238d3a9e
R
6527 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6528 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6529 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6530 .check_2_2_link = intel_dp_hdcp2_check_link,
6531 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6532 .protocol = HDCP_PROTOCOL_DP,
20f24d77
SP
6533};
6534
49e6bc51
VS
6535static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6536{
de25eb7f 6537 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 6538 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
49e6bc51
VS
6539
6540 lockdep_assert_held(&dev_priv->pps_mutex);
6541
6542 if (!edp_have_panel_vdd(intel_dp))
6543 return;
6544
6545 /*
6546 * The VDD bit needs a power domain reference, so if the bit is
6547 * already enabled when we boot or resume, grab this reference and
6548 * schedule a vdd off, so we don't hold on to the reference
6549 * indefinitely.
6550 */
bdc6114e
WK
6551 drm_dbg_kms(&dev_priv->drm,
6552 "VDD left on by BIOS, adjusting state tracking\n");
337837ac 6553 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
49e6bc51
VS
6554
6555 edp_panel_vdd_schedule_off(intel_dp);
6556}
6557
9f2bdb00
VS
6558static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6559{
de25eb7f 6560 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
59b74c49
VS
6561 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6562 enum pipe pipe;
9f2bdb00 6563
59b74c49
VS
6564 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6565 encoder->port, &pipe))
6566 return pipe;
9f2bdb00 6567
59b74c49 6568 return INVALID_PIPE;
9f2bdb00
VS
6569}
6570
bf93ba67 6571void intel_dp_encoder_reset(struct drm_encoder *encoder)
6d93c0c4 6572{
64989ca4 6573 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
b7d02c3a 6574 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
dd75f6dd 6575 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
69d93820 6576 intel_wakeref_t wakeref;
64989ca4
VS
6577
6578 if (!HAS_DDI(dev_priv))
b4e33881 6579 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
49e6bc51 6580
dd75f6dd 6581 if (lspcon->active)
910530c0
SS
6582 lspcon_resume(lspcon);
6583
d7e8ef02
MN
6584 intel_dp->reset_link_params = true;
6585
b4c7ea63
ID
6586 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6587 !intel_dp_is_edp(intel_dp))
6588 return;
6589
69d93820
CW
6590 with_pps_lock(intel_dp, wakeref) {
6591 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6592 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
49e6bc51 6593
69d93820
CW
6594 if (intel_dp_is_edp(intel_dp)) {
6595 /*
6596 * Reinit the power sequencer, in case BIOS did
6597 * something nasty with it.
6598 */
6599 intel_dp_pps_init(intel_dp);
6600 intel_edp_panel_vdd_sanitize(intel_dp);
6601 }
9f2bdb00 6602 }
6d93c0c4
ID
6603}
6604
e24bcd34
MN
6605static int intel_modeset_tile_group(struct intel_atomic_state *state,
6606 int tile_group_id)
6607{
6608 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6609 struct drm_connector_list_iter conn_iter;
6610 struct drm_connector *connector;
6611 int ret = 0;
6612
6613 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6614 drm_for_each_connector_iter(connector, &conn_iter) {
6615 struct drm_connector_state *conn_state;
6616 struct intel_crtc_state *crtc_state;
6617 struct intel_crtc *crtc;
6618
6619 if (!connector->has_tile ||
6620 connector->tile_group->id != tile_group_id)
6621 continue;
6622
6623 conn_state = drm_atomic_get_connector_state(&state->base,
6624 connector);
6625 if (IS_ERR(conn_state)) {
6626 ret = PTR_ERR(conn_state);
6627 break;
6628 }
6629
6630 crtc = to_intel_crtc(conn_state->crtc);
6631
6632 if (!crtc)
6633 continue;
6634
6635 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6636 crtc_state->uapi.mode_changed = true;
6637
6638 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6639 if (ret)
6640 break;
6641 }
b7079cbd 6642 drm_connector_list_iter_end(&conn_iter);
e24bcd34
MN
6643
6644 return ret;
6645}
6646
6647static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6648{
6649 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6650 struct intel_crtc *crtc;
6651
6652 if (transcoders == 0)
6653 return 0;
6654
6655 for_each_intel_crtc(&dev_priv->drm, crtc) {
6656 struct intel_crtc_state *crtc_state;
6657 int ret;
6658
6659 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6660 if (IS_ERR(crtc_state))
6661 return PTR_ERR(crtc_state);
6662
6663 if (!crtc_state->hw.enable)
6664 continue;
6665
6666 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6667 continue;
6668
6669 crtc_state->uapi.mode_changed = true;
6670
6671 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6672 if (ret)
6673 return ret;
6674
6675 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6676 if (ret)
6677 return ret;
6678
6679 transcoders &= ~BIT(crtc_state->cpu_transcoder);
6680 }
6681
eb020ca3 6682 drm_WARN_ON(&dev_priv->drm, transcoders != 0);
e24bcd34
MN
6683
6684 return 0;
6685}
6686
6687static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6688 struct drm_connector *connector)
6689{
6690 const struct drm_connector_state *old_conn_state =
6691 drm_atomic_get_old_connector_state(&state->base, connector);
6692 const struct intel_crtc_state *old_crtc_state;
6693 struct intel_crtc *crtc;
6694 u8 transcoders;
6695
6696 crtc = to_intel_crtc(old_conn_state->crtc);
6697 if (!crtc)
6698 return 0;
6699
6700 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6701
6702 if (!old_crtc_state->hw.active)
6703 return 0;
6704
6705 transcoders = old_crtc_state->sync_mode_slaves_mask;
6706 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6707 transcoders |= BIT(old_crtc_state->master_transcoder);
6708
6709 return intel_modeset_affected_transcoders(state,
6710 transcoders);
6711}
6712
6713static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6714 struct drm_atomic_state *_state)
6715{
6716 struct drm_i915_private *dev_priv = to_i915(conn->dev);
6717 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6718 int ret;
6719
6720 ret = intel_digital_connector_atomic_check(conn, &state->base);
6721 if (ret)
6722 return ret;
6723
6724 if (INTEL_GEN(dev_priv) < 11)
6725 return 0;
6726
6727 if (!intel_connector_needs_modeset(state, conn))
6728 return 0;
6729
6730 if (conn->has_tile) {
6731 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6732 if (ret)
6733 return ret;
6734 }
6735
6736 return intel_modeset_synced_crtcs(state, conn);
6737}
6738
a4fc5ed6 6739static const struct drm_connector_funcs intel_dp_connector_funcs = {
beb60608 6740 .force = intel_dp_force,
a4fc5ed6 6741 .fill_modes = drm_helper_probe_single_connector_modes,
8f647a01
ML
6742 .atomic_get_property = intel_digital_connector_atomic_get_property,
6743 .atomic_set_property = intel_digital_connector_atomic_set_property,
7a418e34 6744 .late_register = intel_dp_connector_register,
c191eca1 6745 .early_unregister = intel_dp_connector_unregister,
d4b26e4f 6746 .destroy = intel_connector_destroy,
c6f95f27 6747 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
8f647a01 6748 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
a4fc5ed6
KP
6749};
6750
6751static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6c5ed5ae 6752 .detect_ctx = intel_dp_detect,
a4fc5ed6
KP
6753 .get_modes = intel_dp_get_modes,
6754 .mode_valid = intel_dp_mode_valid,
e24bcd34 6755 .atomic_check = intel_dp_connector_atomic_check,
a4fc5ed6
KP
6756};
6757
a4fc5ed6 6758static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 6759 .reset = intel_dp_encoder_reset,
24d05927 6760 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
6761};
6762
b2c5c181 6763enum irqreturn
13cf5504
DA
6764intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6765{
6766 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 6767
7a7f84cc
VS
6768 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6769 /*
6770 * vdd off can generate a long pulse on eDP which
6771 * would require vdd on to handle it, and thus we
6772 * would end up in an endless cycle of
6773 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6774 */
66a990dd
VS
6775 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
6776 intel_dig_port->base.base.base.id,
6777 intel_dig_port->base.base.name);
a8b3d52f 6778 return IRQ_HANDLED;
7a7f84cc
VS
6779 }
6780
66a990dd
VS
6781 DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
6782 intel_dig_port->base.base.base.id,
6783 intel_dig_port->base.base.name,
0e32b39c 6784 long_hpd ? "long" : "short");
13cf5504 6785
27d4efc5 6786 if (long_hpd) {
d7e8ef02 6787 intel_dp->reset_link_params = true;
27d4efc5
VS
6788 return IRQ_NONE;
6789 }
6790
27d4efc5
VS
6791 if (intel_dp->is_mst) {
6792 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6793 /*
6794 * If we were in MST mode, and device is not
6795 * there, get out of MST mode
6796 */
6797 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6798 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6799 intel_dp->is_mst = false;
6800 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6801 intel_dp->is_mst);
6f08ebe7
ID
6802
6803 return IRQ_NONE;
0e32b39c 6804 }
27d4efc5 6805 }
0e32b39c 6806
27d4efc5 6807 if (!intel_dp->is_mst) {
c85d200e 6808 bool handled;
42e5e657
DV
6809
6810 handled = intel_dp_short_pulse(intel_dp);
6811
cbfa8ac8 6812 if (!handled)
6f08ebe7 6813 return IRQ_NONE;
0e32b39c 6814 }
b2c5c181 6815
6f08ebe7 6816 return IRQ_HANDLED;
13cf5504
DA
6817}
6818
477ec328 6819/* check the VBT to see whether the eDP is on another port */
7b91bf7f 6820bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
36e83a18 6821{
53ce81a7
VS
6822 /*
6823 * eDP not supported on g4x. so bail out early just
6824 * for a bit extra safety in case the VBT is bonkers.
6825 */
dd11bc10 6826 if (INTEL_GEN(dev_priv) < 5)
53ce81a7
VS
6827 return false;
6828
a98d9c1d 6829 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
3b32a35b
VS
6830 return true;
6831
951d9efe 6832 return intel_bios_is_port_edp(dev_priv, port);
36e83a18
ZY
6833}
6834
200819ab 6835static void
f684960e
CW
6836intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6837{
8b45330a 6838 struct drm_i915_private *dev_priv = to_i915(connector->dev);
68ec0736
VS
6839 enum port port = dp_to_dig_port(intel_dp)->base.port;
6840
6841 if (!IS_G4X(dev_priv) && port != PORT_A)
6842 intel_attach_force_audio_property(connector);
8b45330a 6843
e953fd7b 6844 intel_attach_broadcast_rgb_property(connector);
b2ae318a 6845 if (HAS_GMCH(dev_priv))
f1a12172
RS
6846 drm_connector_attach_max_bpc_property(connector, 6, 10);
6847 else if (INTEL_GEN(dev_priv) >= 5)
6848 drm_connector_attach_max_bpc_property(connector, 6, 12);
53b41837 6849
9d1bb6f0
GM
6850 intel_attach_colorspace_property(connector);
6851
0299dfa7
GM
6852 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
6853 drm_object_attach_property(&connector->base,
6854 connector->dev->mode_config.hdr_output_metadata_property,
6855 0);
6856
1853a9da 6857 if (intel_dp_is_edp(intel_dp)) {
8b45330a
ML
6858 u32 allowed_scalers;
6859
6860 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
b2ae318a 6861 if (!HAS_GMCH(dev_priv))
8b45330a
ML
6862 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6863
6864 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6865
eead06df 6866 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
8b45330a 6867
53b41837 6868 }
f684960e
CW
6869}
6870
dada1a9f
ID
6871static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6872{
d28d4731 6873 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
6874 intel_dp->last_power_on = jiffies;
6875 intel_dp->last_backlight_off = jiffies;
6876}
6877
67a54566 6878static void
46bd8383 6879intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
67a54566 6880{
de25eb7f 6881 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 6882 u32 pp_on, pp_off, pp_ctl;
8e8232d5 6883 struct pps_registers regs;
453c5420 6884
46bd8383 6885 intel_pps_get_registers(intel_dp, &regs);
67a54566 6886
9eae5e27 6887 pp_ctl = ilk_get_pp_control(intel_dp);
67a54566 6888
1b61c4a3
JN
6889 /* Ensure PPS is unlocked */
6890 if (!HAS_DDI(dev_priv))
b4e33881 6891 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1b61c4a3 6892
b4e33881
JN
6893 pp_on = intel_de_read(dev_priv, regs.pp_on);
6894 pp_off = intel_de_read(dev_priv, regs.pp_off);
67a54566
DV
6895
6896 /* Pull timing values out of registers */
78b36b10
JN
6897 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6898 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6899 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6900 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
67a54566 6901
ab3517c1
JN
6902 if (i915_mmio_reg_valid(regs.pp_div)) {
6903 u32 pp_div;
6904
b4e33881 6905 pp_div = intel_de_read(dev_priv, regs.pp_div);
ab3517c1 6906
78b36b10 6907 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
ab3517c1 6908 } else {
78b36b10 6909 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
b0a08bec 6910 }
54648618
ID
6911}
6912
de9c1b6b
ID
6913static void
6914intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6915{
6916 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6917 state_name,
6918 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6919}
6920
6921static void
46bd8383 6922intel_pps_verify_state(struct intel_dp *intel_dp)
de9c1b6b
ID
6923{
6924 struct edp_power_seq hw;
6925 struct edp_power_seq *sw = &intel_dp->pps_delays;
6926
46bd8383 6927 intel_pps_readout_hw_state(intel_dp, &hw);
de9c1b6b
ID
6928
6929 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6930 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6931 DRM_ERROR("PPS state mismatch\n");
6932 intel_pps_dump_state("sw", sw);
6933 intel_pps_dump_state("hw", &hw);
6934 }
6935}
6936
54648618 6937static void
46bd8383 6938intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
54648618 6939{
de25eb7f 6940 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
54648618
ID
6941 struct edp_power_seq cur, vbt, spec,
6942 *final = &intel_dp->pps_delays;
6943
6944 lockdep_assert_held(&dev_priv->pps_mutex);
6945
6946 /* already initialized? */
6947 if (final->t11_t12 != 0)
6948 return;
6949
46bd8383 6950 intel_pps_readout_hw_state(intel_dp, &cur);
67a54566 6951
de9c1b6b 6952 intel_pps_dump_state("cur", &cur);
67a54566 6953
6aa23e65 6954 vbt = dev_priv->vbt.edp.pps;
c99a259b
MN
6955 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6956 * of 500ms appears to be too short. Ocassionally the panel
6957 * just fails to power back on. Increasing the delay to 800ms
6958 * seems sufficient to avoid this problem.
6959 */
6960 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7313f5a9 6961 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
bdc6114e
WK
6962 drm_dbg_kms(&dev_priv->drm,
6963 "Increasing T12 panel delay as per the quirk to %d\n",
6964 vbt.t11_t12);
c99a259b 6965 }
770a17a5
MN
6966 /* T11_T12 delay is special and actually in units of 100ms, but zero
6967 * based in the hw (so we need to add 100 ms). But the sw vbt
6968 * table multiplies it with 1000 to make it in units of 100usec,
6969 * too. */
6970 vbt.t11_t12 += 100 * 10;
67a54566
DV
6971
6972 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6973 * our hw here, which are all in 100usec. */
6974 spec.t1_t3 = 210 * 10;
6975 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6976 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6977 spec.t10 = 500 * 10;
6978 /* This one is special and actually in units of 100ms, but zero
6979 * based in the hw (so we need to add 100 ms). But the sw vbt
6980 * table multiplies it with 1000 to make it in units of 100usec,
6981 * too. */
6982 spec.t11_t12 = (510 + 100) * 10;
6983
de9c1b6b 6984 intel_pps_dump_state("vbt", &vbt);
67a54566
DV
6985
6986 /* Use the max of the register settings and vbt. If both are
6987 * unset, fall back to the spec limits. */
36b5f425 6988#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
6989 spec.field : \
6990 max(cur.field, vbt.field))
6991 assign_final(t1_t3);
6992 assign_final(t8);
6993 assign_final(t9);
6994 assign_final(t10);
6995 assign_final(t11_t12);
6996#undef assign_final
6997
36b5f425 6998#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
6999 intel_dp->panel_power_up_delay = get_delay(t1_t3);
7000 intel_dp->backlight_on_delay = get_delay(t8);
7001 intel_dp->backlight_off_delay = get_delay(t9);
7002 intel_dp->panel_power_down_delay = get_delay(t10);
7003 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
7004#undef get_delay
7005
bdc6114e
WK
7006 drm_dbg_kms(&dev_priv->drm,
7007 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
7008 intel_dp->panel_power_up_delay,
7009 intel_dp->panel_power_down_delay,
7010 intel_dp->panel_power_cycle_delay);
f30d26e4 7011
bdc6114e
WK
7012 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
7013 intel_dp->backlight_on_delay,
7014 intel_dp->backlight_off_delay);
de9c1b6b
ID
7015
7016 /*
7017 * We override the HW backlight delays to 1 because we do manual waits
7018 * on them. For T8, even BSpec recommends doing it. For T9, if we
7019 * don't do this, we'll end up waiting for the backlight off delay
7020 * twice: once when we do the manual sleep, and once when we disable
7021 * the panel and wait for the PP_STATUS bit to become zero.
7022 */
7023 final->t8 = 1;
7024 final->t9 = 1;
5643205c
ID
7025
7026 /*
7027 * HW has only a 100msec granularity for t11_t12 so round it up
7028 * accordingly.
7029 */
7030 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
f30d26e4
JN
7031}
7032
7033static void
46bd8383 7034intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 7035 bool force_disable_vdd)
f30d26e4 7036{
de25eb7f 7037 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 7038 u32 pp_on, pp_off, port_sel = 0;
b04002f4 7039 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
8e8232d5 7040 struct pps_registers regs;
8f4f2797 7041 enum port port = dp_to_dig_port(intel_dp)->base.port;
36b5f425 7042 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 7043
e39b999a 7044 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 7045
46bd8383 7046 intel_pps_get_registers(intel_dp, &regs);
453c5420 7047
5d5ab2d2
VS
7048 /*
7049 * On some VLV machines the BIOS can leave the VDD
e7f2af78 7050 * enabled even on power sequencers which aren't
5d5ab2d2
VS
7051 * hooked up to any port. This would mess up the
7052 * power domain tracking the first time we pick
7053 * one of these power sequencers for use since
7054 * edp_panel_vdd_on() would notice that the VDD was
7055 * already on and therefore wouldn't grab the power
7056 * domain reference. Disable VDD first to avoid this.
7057 * This also avoids spuriously turning the VDD on as
e7f2af78 7058 * soon as the new power sequencer gets initialized.
5d5ab2d2
VS
7059 */
7060 if (force_disable_vdd) {
9eae5e27 7061 u32 pp = ilk_get_pp_control(intel_dp);
5d5ab2d2 7062
eb020ca3
PB
7063 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
7064 "Panel power already on\n");
5d5ab2d2
VS
7065
7066 if (pp & EDP_FORCE_VDD)
bdc6114e
WK
7067 drm_dbg_kms(&dev_priv->drm,
7068 "VDD already on, disabling first\n");
5d5ab2d2
VS
7069
7070 pp &= ~EDP_FORCE_VDD;
7071
b4e33881 7072 intel_de_write(dev_priv, regs.pp_ctrl, pp);
5d5ab2d2
VS
7073 }
7074
78b36b10
JN
7075 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
7076 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
7077 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
7078 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
67a54566
DV
7079
7080 /* Haswell doesn't have any port selection bits for the panel
7081 * power sequencer any more. */
920a14b2 7082 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ad933b56 7083 port_sel = PANEL_PORT_SELECT_VLV(port);
6e266956 7084 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
05bf51d3
VS
7085 switch (port) {
7086 case PORT_A:
a24c144c 7087 port_sel = PANEL_PORT_SELECT_DPA;
05bf51d3
VS
7088 break;
7089 case PORT_C:
7090 port_sel = PANEL_PORT_SELECT_DPC;
7091 break;
7092 case PORT_D:
a24c144c 7093 port_sel = PANEL_PORT_SELECT_DPD;
05bf51d3
VS
7094 break;
7095 default:
7096 MISSING_CASE(port);
7097 break;
7098 }
67a54566
DV
7099 }
7100
453c5420
JB
7101 pp_on |= port_sel;
7102
b4e33881
JN
7103 intel_de_write(dev_priv, regs.pp_on, pp_on);
7104 intel_de_write(dev_priv, regs.pp_off, pp_off);
ab3517c1
JN
7105
7106 /*
7107 * Compute the divisor for the pp clock, simply match the Bspec formula.
7108 */
7109 if (i915_mmio_reg_valid(regs.pp_div)) {
b4e33881
JN
7110 intel_de_write(dev_priv, regs.pp_div,
7111 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
ab3517c1
JN
7112 } else {
7113 u32 pp_ctl;
7114
b4e33881 7115 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
ab3517c1 7116 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
78b36b10 7117 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
b4e33881 7118 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
ab3517c1 7119 }
67a54566 7120
bdc6114e
WK
7121 drm_dbg_kms(&dev_priv->drm,
7122 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
b4e33881
JN
7123 intel_de_read(dev_priv, regs.pp_on),
7124 intel_de_read(dev_priv, regs.pp_off),
bdc6114e 7125 i915_mmio_reg_valid(regs.pp_div) ?
b4e33881
JN
7126 intel_de_read(dev_priv, regs.pp_div) :
7127 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
f684960e
CW
7128}
7129
46bd8383 7130static void intel_dp_pps_init(struct intel_dp *intel_dp)
335f752b 7131{
de25eb7f 7132 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
920a14b2
TU
7133
7134 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
335f752b
ID
7135 vlv_initial_power_sequencer_setup(intel_dp);
7136 } else {
46bd8383
VS
7137 intel_dp_init_panel_power_sequencer(intel_dp);
7138 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
335f752b
ID
7139 }
7140}
7141
b33a2815
VK
7142/**
7143 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5423adf1 7144 * @dev_priv: i915 device
e896402c 7145 * @crtc_state: a pointer to the active intel_crtc_state
b33a2815
VK
7146 * @refresh_rate: RR to be programmed
7147 *
7148 * This function gets called when refresh rate (RR) has to be changed from
7149 * one frequency to another. Switches can be between high and low RR
7150 * supported by the panel or to any other RR based on media playback (in
7151 * this case, RR value needs to be passed from user space).
7152 *
7153 * The caller of this function needs to take a lock on dev_priv->drrs.
7154 */
85cb48a1 7155static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5f88a9c6 7156 const struct intel_crtc_state *crtc_state,
85cb48a1 7157 int refresh_rate)
439d7ac0 7158{
96178eeb 7159 struct intel_dp *intel_dp = dev_priv->drrs.dp;
2225f3c6 7160 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
96178eeb 7161 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
7162
7163 if (refresh_rate <= 0) {
bdc6114e
WK
7164 drm_dbg_kms(&dev_priv->drm,
7165 "Refresh rate should be positive non-zero.\n");
439d7ac0
PB
7166 return;
7167 }
7168
96178eeb 7169 if (intel_dp == NULL) {
bdc6114e 7170 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
439d7ac0
PB
7171 return;
7172 }
7173
439d7ac0 7174 if (!intel_crtc) {
bdc6114e
WK
7175 drm_dbg_kms(&dev_priv->drm,
7176 "DRRS: intel_crtc not initialized\n");
439d7ac0
PB
7177 return;
7178 }
7179
96178eeb 7180 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
bdc6114e 7181 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
439d7ac0
PB
7182 return;
7183 }
7184
96178eeb
VK
7185 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
7186 refresh_rate)
439d7ac0
PB
7187 index = DRRS_LOW_RR;
7188
96178eeb 7189 if (index == dev_priv->drrs.refresh_rate_type) {
bdc6114e
WK
7190 drm_dbg_kms(&dev_priv->drm,
7191 "DRRS requested for previously set RR...ignoring\n");
439d7ac0
PB
7192 return;
7193 }
7194
1326a92c 7195 if (!crtc_state->hw.active) {
bdc6114e
WK
7196 drm_dbg_kms(&dev_priv->drm,
7197 "eDP encoder disabled. CRTC not Active\n");
439d7ac0
PB
7198 return;
7199 }
7200
85cb48a1 7201 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
a4c30b1d
VK
7202 switch (index) {
7203 case DRRS_HIGH_RR:
4c354754 7204 intel_dp_set_m_n(crtc_state, M1_N1);
a4c30b1d
VK
7205 break;
7206 case DRRS_LOW_RR:
4c354754 7207 intel_dp_set_m_n(crtc_state, M2_N2);
a4c30b1d
VK
7208 break;
7209 case DRRS_MAX_RR:
7210 default:
bdc6114e
WK
7211 drm_err(&dev_priv->drm,
7212 "Unsupported refreshrate type\n");
a4c30b1d 7213 }
85cb48a1
ML
7214 } else if (INTEL_GEN(dev_priv) > 6) {
7215 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
649636ef 7216 u32 val;
a4c30b1d 7217
b4e33881 7218 val = intel_de_read(dev_priv, reg);
439d7ac0 7219 if (index > DRRS_HIGH_RR) {
85cb48a1 7220 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
7221 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7222 else
7223 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 7224 } else {
85cb48a1 7225 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
7226 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7227 else
7228 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 7229 }
b4e33881 7230 intel_de_write(dev_priv, reg, val);
439d7ac0
PB
7231 }
7232
4e9ac947
VK
7233 dev_priv->drrs.refresh_rate_type = index;
7234
bdc6114e
WK
7235 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
7236 refresh_rate);
4e9ac947
VK
7237}
7238
b33a2815
VK
7239/**
7240 * intel_edp_drrs_enable - init drrs struct if supported
7241 * @intel_dp: DP struct
5423adf1 7242 * @crtc_state: A pointer to the active crtc state.
b33a2815
VK
7243 *
7244 * Initializes frontbuffer_bits and drrs.dp
7245 */
85cb48a1 7246void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5f88a9c6 7247 const struct intel_crtc_state *crtc_state)
c395578e 7248{
de25eb7f 7249 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 7250
85cb48a1 7251 if (!crtc_state->has_drrs) {
bdc6114e 7252 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
c395578e
VK
7253 return;
7254 }
7255
da83ef85 7256 if (dev_priv->psr.enabled) {
bdc6114e
WK
7257 drm_dbg_kms(&dev_priv->drm,
7258 "PSR enabled. Not enabling DRRS.\n");
da83ef85
RS
7259 return;
7260 }
7261
c395578e 7262 mutex_lock(&dev_priv->drrs.mutex);
f69a0d71 7263 if (dev_priv->drrs.dp) {
bdc6114e 7264 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
c395578e
VK
7265 goto unlock;
7266 }
7267
7268 dev_priv->drrs.busy_frontbuffer_bits = 0;
7269
7270 dev_priv->drrs.dp = intel_dp;
7271
7272unlock:
7273 mutex_unlock(&dev_priv->drrs.mutex);
7274}
7275
b33a2815
VK
7276/**
7277 * intel_edp_drrs_disable - Disable DRRS
7278 * @intel_dp: DP struct
5423adf1 7279 * @old_crtc_state: Pointer to old crtc_state.
b33a2815
VK
7280 *
7281 */
85cb48a1 7282void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5f88a9c6 7283 const struct intel_crtc_state *old_crtc_state)
c395578e 7284{
de25eb7f 7285 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 7286
85cb48a1 7287 if (!old_crtc_state->has_drrs)
c395578e
VK
7288 return;
7289
7290 mutex_lock(&dev_priv->drrs.mutex);
7291 if (!dev_priv->drrs.dp) {
7292 mutex_unlock(&dev_priv->drrs.mutex);
7293 return;
7294 }
7295
7296 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
7297 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
7298 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
c395578e
VK
7299
7300 dev_priv->drrs.dp = NULL;
7301 mutex_unlock(&dev_priv->drrs.mutex);
7302
7303 cancel_delayed_work_sync(&dev_priv->drrs.work);
7304}
7305
4e9ac947
VK
7306static void intel_edp_drrs_downclock_work(struct work_struct *work)
7307{
7308 struct drm_i915_private *dev_priv =
7309 container_of(work, typeof(*dev_priv), drrs.work.work);
7310 struct intel_dp *intel_dp;
7311
7312 mutex_lock(&dev_priv->drrs.mutex);
7313
7314 intel_dp = dev_priv->drrs.dp;
7315
7316 if (!intel_dp)
7317 goto unlock;
7318
439d7ac0 7319 /*
4e9ac947
VK
7320 * The delayed work can race with an invalidate hence we need to
7321 * recheck.
439d7ac0
PB
7322 */
7323
4e9ac947
VK
7324 if (dev_priv->drrs.busy_frontbuffer_bits)
7325 goto unlock;
439d7ac0 7326
85cb48a1
ML
7327 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7328 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7329
7330 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7331 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
7332 }
439d7ac0 7333
4e9ac947 7334unlock:
4e9ac947 7335 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
7336}
7337
b33a2815 7338/**
0ddfd203 7339 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5748b6a1 7340 * @dev_priv: i915 device
b33a2815
VK
7341 * @frontbuffer_bits: frontbuffer plane tracking bits
7342 *
0ddfd203
R
7343 * This function gets called everytime rendering on the given planes start.
7344 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
7345 *
7346 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7347 */
5748b6a1
CW
7348void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7349 unsigned int frontbuffer_bits)
a93fad0f 7350{
a93fad0f
VK
7351 struct drm_crtc *crtc;
7352 enum pipe pipe;
7353
9da7d693 7354 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
7355 return;
7356
88f933a8 7357 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 7358
a93fad0f 7359 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
7360 if (!dev_priv->drrs.dp) {
7361 mutex_unlock(&dev_priv->drrs.mutex);
7362 return;
7363 }
7364
a93fad0f
VK
7365 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7366 pipe = to_intel_crtc(crtc)->pipe;
7367
c1d038c6
DV
7368 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7369 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7370
0ddfd203 7371 /* invalidate means busy screen hence upclock */
c1d038c6 7372 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
7373 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7374 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
a93fad0f 7375
a93fad0f
VK
7376 mutex_unlock(&dev_priv->drrs.mutex);
7377}
7378
b33a2815 7379/**
0ddfd203 7380 * intel_edp_drrs_flush - Restart Idleness DRRS
5748b6a1 7381 * @dev_priv: i915 device
b33a2815
VK
7382 * @frontbuffer_bits: frontbuffer plane tracking bits
7383 *
0ddfd203
R
7384 * This function gets called every time rendering on the given planes has
7385 * completed or flip on a crtc is completed. So DRRS should be upclocked
7386 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7387 * if no other planes are dirty.
b33a2815
VK
7388 *
7389 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7390 */
5748b6a1
CW
7391void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7392 unsigned int frontbuffer_bits)
a93fad0f 7393{
a93fad0f
VK
7394 struct drm_crtc *crtc;
7395 enum pipe pipe;
7396
9da7d693 7397 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
7398 return;
7399
88f933a8 7400 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 7401
a93fad0f 7402 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
7403 if (!dev_priv->drrs.dp) {
7404 mutex_unlock(&dev_priv->drrs.mutex);
7405 return;
7406 }
7407
a93fad0f
VK
7408 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7409 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
7410
7411 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
7412 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7413
0ddfd203 7414 /* flush means busy screen hence upclock */
c1d038c6 7415 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
7416 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7417 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
0ddfd203
R
7418
7419 /*
7420 * flush also means no more activity hence schedule downclock, if all
7421 * other fbs are quiescent too
7422 */
7423 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
7424 schedule_delayed_work(&dev_priv->drrs.work,
7425 msecs_to_jiffies(1000));
7426 mutex_unlock(&dev_priv->drrs.mutex);
7427}
7428
b33a2815
VK
7429/**
7430 * DOC: Display Refresh Rate Switching (DRRS)
7431 *
7432 * Display Refresh Rate Switching (DRRS) is a power conservation feature
7433 * which enables swtching between low and high refresh rates,
7434 * dynamically, based on the usage scenario. This feature is applicable
7435 * for internal panels.
7436 *
7437 * Indication that the panel supports DRRS is given by the panel EDID, which
7438 * would list multiple refresh rates for one resolution.
7439 *
7440 * DRRS is of 2 types - static and seamless.
7441 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7442 * (may appear as a blink on screen) and is used in dock-undock scenario.
7443 * Seamless DRRS involves changing RR without any visual effect to the user
7444 * and can be used during normal system usage. This is done by programming
7445 * certain registers.
7446 *
7447 * Support for static/seamless DRRS may be indicated in the VBT based on
7448 * inputs from the panel spec.
7449 *
7450 * DRRS saves power by switching to low RR based on usage scenarios.
7451 *
2e7a5701
DV
7452 * The implementation is based on frontbuffer tracking implementation. When
7453 * there is a disturbance on the screen triggered by user activity or a periodic
7454 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7455 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7456 * made.
7457 *
7458 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7459 * and intel_edp_drrs_flush() are called.
b33a2815
VK
7460 *
7461 * DRRS can be further extended to support other internal panels and also
7462 * the scenario of video playback wherein RR is set based on the rate
7463 * requested by userspace.
7464 */
7465
7466/**
7467 * intel_dp_drrs_init - Init basic DRRS work and mutex.
2f773477 7468 * @connector: eDP connector
b33a2815
VK
7469 * @fixed_mode: preferred mode of panel
7470 *
7471 * This function is called only once at driver load to initialize basic
7472 * DRRS stuff.
7473 *
7474 * Returns:
7475 * Downclock mode if panel supports it, else return NULL.
7476 * DRRS support is determined by the presence of downclock mode (apart
7477 * from VBT setting).
7478 */
4f9db5b5 7479static struct drm_display_mode *
2f773477
VS
7480intel_dp_drrs_init(struct intel_connector *connector,
7481 struct drm_display_mode *fixed_mode)
4f9db5b5 7482{
2f773477 7483 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4f9db5b5
PB
7484 struct drm_display_mode *downclock_mode = NULL;
7485
9da7d693
DV
7486 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7487 mutex_init(&dev_priv->drrs.mutex);
7488
dd11bc10 7489 if (INTEL_GEN(dev_priv) <= 6) {
bdc6114e
WK
7490 drm_dbg_kms(&dev_priv->drm,
7491 "DRRS supported for Gen7 and above\n");
4f9db5b5
PB
7492 return NULL;
7493 }
7494
7495 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
bdc6114e 7496 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
4f9db5b5
PB
7497 return NULL;
7498 }
7499
abf1aae8 7500 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
4f9db5b5 7501 if (!downclock_mode) {
bdc6114e
WK
7502 drm_dbg_kms(&dev_priv->drm,
7503 "Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
7504 return NULL;
7505 }
7506
96178eeb 7507 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 7508
96178eeb 7509 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
bdc6114e
WK
7510 drm_dbg_kms(&dev_priv->drm,
7511 "seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
7512 return downclock_mode;
7513}
7514
ed92f0b2 7515static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 7516 struct intel_connector *intel_connector)
ed92f0b2 7517{
de25eb7f
RV
7518 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7519 struct drm_device *dev = &dev_priv->drm;
2f773477 7520 struct drm_connector *connector = &intel_connector->base;
ed92f0b2 7521 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 7522 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2 7523 bool has_dpcd;
6517d273 7524 enum pipe pipe = INVALID_PIPE;
69d93820
CW
7525 intel_wakeref_t wakeref;
7526 struct edid *edid;
ed92f0b2 7527
1853a9da 7528 if (!intel_dp_is_edp(intel_dp))
ed92f0b2
PZ
7529 return true;
7530
36b80aa3
JRS
7531 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7532
97a824e1
ID
7533 /*
7534 * On IBX/CPT we may get here with LVDS already registered. Since the
7535 * driver uses the only internal power sequencer available for both
7536 * eDP and LVDS bail out early in this case to prevent interfering
7537 * with an already powered-on LVDS power sequencer.
7538 */
17be4942 7539 if (intel_get_lvds_encoder(dev_priv)) {
eb020ca3
PB
7540 drm_WARN_ON(dev,
7541 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
bdc6114e
WK
7542 drm_info(&dev_priv->drm,
7543 "LVDS was detected, not registering eDP\n");
97a824e1
ID
7544
7545 return false;
7546 }
7547
69d93820
CW
7548 with_pps_lock(intel_dp, wakeref) {
7549 intel_dp_init_panel_power_timestamps(intel_dp);
7550 intel_dp_pps_init(intel_dp);
7551 intel_edp_panel_vdd_sanitize(intel_dp);
7552 }
63635217 7553
ed92f0b2 7554 /* Cache DPCD and EDID for edp. */
fe5a66f9 7555 has_dpcd = intel_edp_init_dpcd(intel_dp);
ed92f0b2 7556
fe5a66f9 7557 if (!has_dpcd) {
ed92f0b2 7558 /* if this fails, presume the device is a ghost */
bdc6114e
WK
7559 drm_info(&dev_priv->drm,
7560 "failed to retrieve link info, disabling eDP\n");
b4d06ede 7561 goto out_vdd_off;
ed92f0b2
PZ
7562 }
7563
060c8778 7564 mutex_lock(&dev->mode_config.mutex);
0b99836f 7565 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
7566 if (edid) {
7567 if (drm_add_edid_modes(connector, edid)) {
0883ce81
LP
7568 drm_connector_update_edid_property(connector, edid);
7569 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
ed92f0b2
PZ
7570 } else {
7571 kfree(edid);
7572 edid = ERR_PTR(-EINVAL);
7573 }
7574 } else {
7575 edid = ERR_PTR(-ENOENT);
7576 }
7577 intel_connector->edid = edid;
7578
0dc927eb
VS
7579 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7580 if (fixed_mode)
7581 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
ed92f0b2
PZ
7582
7583 /* fallback to VBT if available for eDP */
325710d3
VS
7584 if (!fixed_mode)
7585 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
060c8778 7586 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 7587
920a14b2 7588 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
01527b31
CT
7589 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7590 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
7591
7592 /*
7593 * Figure out the current pipe for the initial backlight setup.
7594 * If the current pipe isn't valid, try the PPS pipe, and if that
7595 * fails just assume pipe A.
7596 */
9f2bdb00 7597 pipe = vlv_active_pipe(intel_dp);
6517d273
VS
7598
7599 if (pipe != PIPE_A && pipe != PIPE_B)
7600 pipe = intel_dp->pps_pipe;
7601
7602 if (pipe != PIPE_A && pipe != PIPE_B)
7603 pipe = PIPE_A;
7604
bdc6114e
WK
7605 drm_dbg_kms(&dev_priv->drm,
7606 "using pipe %c for initial backlight setup\n",
7607 pipe_name(pipe));
01527b31
CT
7608 }
7609
d93fa1b4 7610 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 7611 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 7612 intel_panel_setup_backlight(connector, pipe);
ed92f0b2 7613
69654c63 7614 if (fixed_mode) {
69654c63 7615 drm_connector_set_panel_orientation_with_quirk(connector,
0dd5b133 7616 dev_priv->vbt.orientation,
69654c63
DB
7617 fixed_mode->hdisplay, fixed_mode->vdisplay);
7618 }
9531221d 7619
ed92f0b2 7620 return true;
b4d06ede
ID
7621
7622out_vdd_off:
7623 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7624 /*
7625 * vdd might still be enabled do to the delayed vdd off.
7626 * Make sure vdd is actually turned off here.
7627 */
69d93820
CW
7628 with_pps_lock(intel_dp, wakeref)
7629 edp_panel_vdd_off_sync(intel_dp);
b4d06ede
ID
7630
7631 return false;
ed92f0b2
PZ
7632}
7633
9301397a
MN
7634static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7635{
7636 struct intel_connector *intel_connector;
7637 struct drm_connector *connector;
7638
7639 intel_connector = container_of(work, typeof(*intel_connector),
7640 modeset_retry_work);
7641 connector = &intel_connector->base;
7642 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7643 connector->name);
7644
7645 /* Grab the locks before changing connector property*/
7646 mutex_lock(&connector->dev->mode_config.mutex);
7647 /* Set connector link status to BAD and send a Uevent to notify
7648 * userspace to do a modeset.
7649 */
97e14fbe
DV
7650 drm_connector_set_link_status_property(connector,
7651 DRM_MODE_LINK_STATUS_BAD);
9301397a
MN
7652 mutex_unlock(&connector->dev->mode_config.mutex);
7653 /* Send Hotplug uevent so userspace can reprobe */
7654 drm_kms_helper_hotplug_event(connector->dev);
7655}
7656
16c25533 7657bool
f0fec3f2
PZ
7658intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7659 struct intel_connector *intel_connector)
a4fc5ed6 7660{
f0fec3f2
PZ
7661 struct drm_connector *connector = &intel_connector->base;
7662 struct intel_dp *intel_dp = &intel_dig_port->dp;
7663 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7664 struct drm_device *dev = intel_encoder->base.dev;
fac5e23e 7665 struct drm_i915_private *dev_priv = to_i915(dev);
8f4f2797 7666 enum port port = intel_encoder->port;
d8fe2ab6 7667 enum phy phy = intel_port_to_phy(dev_priv, port);
7a418e34 7668 int type;
a4fc5ed6 7669
9301397a
MN
7670 /* Initialize the work for modeset in case of link train failure */
7671 INIT_WORK(&intel_connector->modeset_retry_work,
7672 intel_dp_modeset_retry_work_fn);
7673
eb020ca3
PB
7674 if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
7675 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7676 intel_dig_port->max_lanes, intel_encoder->base.base.id,
7677 intel_encoder->base.name))
ccb1a831
VS
7678 return false;
7679
55cfc580
JN
7680 intel_dp_set_source_rates(intel_dp);
7681
d7e8ef02 7682 intel_dp->reset_link_params = true;
a4a5d2f8 7683 intel_dp->pps_pipe = INVALID_PIPE;
9f2bdb00 7684 intel_dp->active_pipe = INVALID_PIPE;
a4a5d2f8 7685
0767935e 7686 /* Preserve the current hw state. */
b4e33881 7687 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
dd06f90e 7688 intel_dp->attached_connector = intel_connector;
3d3dc149 7689
4e309baf
ID
7690 if (intel_dp_is_port_edp(dev_priv, port)) {
7691 /*
7692 * Currently we don't support eDP on TypeC ports, although in
7693 * theory it could work on TypeC legacy ports.
7694 */
eb020ca3 7695 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
b329530c 7696 type = DRM_MODE_CONNECTOR_eDP;
4e309baf 7697 } else {
3b32a35b 7698 type = DRM_MODE_CONNECTOR_DisplayPort;
4e309baf 7699 }
b329530c 7700
9f2bdb00
VS
7701 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7702 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7703
f7d24902
ID
7704 /*
7705 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7706 * for DP the encoder type can be set by the caller to
7707 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7708 */
7709 if (type == DRM_MODE_CONNECTOR_eDP)
7710 intel_encoder->type = INTEL_OUTPUT_EDP;
7711
c17ed5b5 7712 /* eDP only on port B and/or C on vlv/chv */
eb020ca3
PB
7713 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
7714 IS_CHERRYVIEW(dev_priv)) &&
7715 intel_dp_is_edp(intel_dp) &&
7716 port != PORT_B && port != PORT_C))
c17ed5b5
VS
7717 return false;
7718
bdc6114e
WK
7719 drm_dbg_kms(&dev_priv->drm,
7720 "Adding %s connector on [ENCODER:%d:%s]\n",
7721 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7722 intel_encoder->base.base.id, intel_encoder->base.name);
e7281eab 7723
b329530c 7724 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
7725 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7726
b2ae318a 7727 if (!HAS_GMCH(dev_priv))
05021389 7728 connector->interlace_allowed = true;
a4fc5ed6
KP
7729 connector->doublescan_allowed = 0;
7730
47d0ccec
GM
7731 if (INTEL_GEN(dev_priv) >= 11)
7732 connector->ycbcr_420_allowed = true;
7733
bdabdb63 7734 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
5fb908eb 7735 intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
5432fcaf 7736
b6339585 7737 intel_dp_aux_init(intel_dp);
7a418e34 7738
df0e9248 7739 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6 7740
4f8036a2 7741 if (HAS_DDI(dev_priv))
bcbc889b
PZ
7742 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7743 else
7744 intel_connector->get_hw_state = intel_connector_get_hw_state;
7745
0e32b39c 7746 /* init MST on ports that can support it */
10d987fd
LDM
7747 intel_dp_mst_encoder_init(intel_dig_port,
7748 intel_connector->base.base.id);
0e32b39c 7749
36b5f425 7750 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
7751 intel_dp_aux_fini(intel_dp);
7752 intel_dp_mst_encoder_cleanup(intel_dig_port);
7753 goto fail;
b2f246a8 7754 }
32f9d658 7755
f684960e 7756 intel_dp_add_properties(intel_dp, connector);
20f24d77 7757
fdddd08c 7758 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
20f24d77
SP
7759 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7760 if (ret)
bdc6114e
WK
7761 drm_dbg_kms(&dev_priv->drm,
7762 "HDCP init failed, skipping.\n");
20f24d77 7763 }
f684960e 7764
a4fc5ed6
KP
7765 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7766 * 0xd. Failure to do so will result in spurious interrupts being
7767 * generated on the port when a cable is not attached.
7768 */
1c0f1b3d 7769 if (IS_G45(dev_priv)) {
b4e33881
JN
7770 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
7771 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
7772 (temp & ~0xf) | 0xd);
a4fc5ed6 7773 }
16c25533
PZ
7774
7775 return true;
a121f4e5
VS
7776
7777fail:
a121f4e5
VS
7778 drm_connector_cleanup(connector);
7779
7780 return false;
a4fc5ed6 7781}
f0fec3f2 7782
c39055b0 7783bool intel_dp_init(struct drm_i915_private *dev_priv,
457c52d8
CW
7784 i915_reg_t output_reg,
7785 enum port port)
f0fec3f2
PZ
7786{
7787 struct intel_digital_port *intel_dig_port;
7788 struct intel_encoder *intel_encoder;
7789 struct drm_encoder *encoder;
7790 struct intel_connector *intel_connector;
7791
b14c5679 7792 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2 7793 if (!intel_dig_port)
457c52d8 7794 return false;
f0fec3f2 7795
08d9bc92 7796 intel_connector = intel_connector_alloc();
11aee0f6
SM
7797 if (!intel_connector)
7798 goto err_connector_alloc;
f0fec3f2
PZ
7799
7800 intel_encoder = &intel_dig_port->base;
7801 encoder = &intel_encoder->base;
7802
c39055b0
ACO
7803 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7804 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7805 "DP %c", port_name(port)))
893da0c9 7806 goto err_encoder_init;
f0fec3f2 7807
c85d200e 7808 intel_encoder->hotplug = intel_dp_hotplug;
5bfe2ac0 7809 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 7810 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 7811 intel_encoder->get_config = intel_dp_get_config;
63a23d24 7812 intel_encoder->update_pipe = intel_panel_update_backlight;
07f9cd0b 7813 intel_encoder->suspend = intel_dp_encoder_suspend;
920a14b2 7814 if (IS_CHERRYVIEW(dev_priv)) {
9197c88b 7815 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
7816 intel_encoder->pre_enable = chv_pre_enable_dp;
7817 intel_encoder->enable = vlv_enable_dp;
1a8ff607 7818 intel_encoder->disable = vlv_disable_dp;
580d3811 7819 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 7820 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
11a914c2 7821 } else if (IS_VALLEYVIEW(dev_priv)) {
ecff4f3b 7822 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
7823 intel_encoder->pre_enable = vlv_pre_enable_dp;
7824 intel_encoder->enable = vlv_enable_dp;
1a8ff607 7825 intel_encoder->disable = vlv_disable_dp;
49277c31 7826 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 7827 } else {
ecff4f3b
JN
7828 intel_encoder->pre_enable = g4x_pre_enable_dp;
7829 intel_encoder->enable = g4x_enable_dp;
1a8ff607 7830 intel_encoder->disable = g4x_disable_dp;
51a9f6df 7831 intel_encoder->post_disable = g4x_post_disable_dp;
ab1f90f9 7832 }
f0fec3f2 7833
f0fec3f2 7834 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 7835 intel_dig_port->max_lanes = 4;
12399028
JRS
7836 intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
7837 intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
f0fec3f2 7838
cca0502b 7839 intel_encoder->type = INTEL_OUTPUT_DP;
79f255a0 7840 intel_encoder->power_domain = intel_port_to_power_domain(port);
920a14b2 7841 if (IS_CHERRYVIEW(dev_priv)) {
882ec384 7842 if (port == PORT_D)
981329ce 7843 intel_encoder->pipe_mask = BIT(PIPE_C);
882ec384 7844 else
981329ce 7845 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
882ec384 7846 } else {
34053ee1 7847 intel_encoder->pipe_mask = ~0;
882ec384 7848 }
bc079e8b 7849 intel_encoder->cloneable = 0;
03cdc1d4 7850 intel_encoder->port = port;
f0fec3f2 7851
13cf5504 7852 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
13cf5504 7853
385e4de0
VS
7854 if (port != PORT_A)
7855 intel_infoframe_init(intel_dig_port);
7856
39053089 7857 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
11aee0f6
SM
7858 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7859 goto err_init_connector;
7860
457c52d8 7861 return true;
11aee0f6
SM
7862
7863err_init_connector:
7864 drm_encoder_cleanup(encoder);
893da0c9 7865err_encoder_init:
11aee0f6
SM
7866 kfree(intel_connector);
7867err_connector_alloc:
7868 kfree(intel_dig_port);
457c52d8 7869 return false;
f0fec3f2 7870}
0e32b39c 7871
1a4313d1 7872void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
0e32b39c 7873{
1a4313d1
VS
7874 struct intel_encoder *encoder;
7875
7876 for_each_intel_encoder(&dev_priv->drm, encoder) {
7877 struct intel_dp *intel_dp;
0e32b39c 7878
1a4313d1
VS
7879 if (encoder->type != INTEL_OUTPUT_DDI)
7880 continue;
5aa56969 7881
b7d02c3a 7882 intel_dp = enc_to_intel_dp(encoder);
5aa56969 7883
1a4313d1 7884 if (!intel_dp->can_mst)
0e32b39c
DA
7885 continue;
7886
1a4313d1
VS
7887 if (intel_dp->is_mst)
7888 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
0e32b39c
DA
7889 }
7890}
7891
1a4313d1 7892void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
0e32b39c 7893{
1a4313d1 7894 struct intel_encoder *encoder;
0e32b39c 7895
1a4313d1
VS
7896 for_each_intel_encoder(&dev_priv->drm, encoder) {
7897 struct intel_dp *intel_dp;
5aa56969 7898 int ret;
0e32b39c 7899
1a4313d1
VS
7900 if (encoder->type != INTEL_OUTPUT_DDI)
7901 continue;
7902
b7d02c3a 7903 intel_dp = enc_to_intel_dp(encoder);
1a4313d1
VS
7904
7905 if (!intel_dp->can_mst)
5aa56969 7906 continue;
0e32b39c 7907
6f85f738
LP
7908 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
7909 true);
6be1cf96
LP
7910 if (ret) {
7911 intel_dp->is_mst = false;
7912 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7913 false);
7914 }
0e32b39c
DA
7915 }
7916}