]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: extract intel_vdsc.h from intel_drv.h and i915_drv.h
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_atomic_plane.h"
51 #include "intel_color.h"
52 #include "intel_cdclk.h"
53 #include "intel_crt.h"
54 #include "intel_ddi.h"
55 #include "intel_dp.h"
56 #include "intel_drv.h"
57 #include "intel_dsi.h"
58 #include "intel_dvo.h"
59 #include "intel_fbc.h"
60 #include "intel_fbdev.h"
61 #include "intel_fifo_underrun.h"
62 #include "intel_frontbuffer.h"
63 #include "intel_hdcp.h"
64 #include "intel_hdmi.h"
65 #include "intel_hotplug.h"
66 #include "intel_lvds.h"
67 #include "intel_overlay.h"
68 #include "intel_pipe_crc.h"
69 #include "intel_pm.h"
70 #include "intel_psr.h"
71 #include "intel_quirks.h"
72 #include "intel_sdvo.h"
73 #include "intel_sideband.h"
74 #include "intel_sprite.h"
75 #include "intel_tv.h"
76 #include "intel_vdsc.h"
77
78 /* Primary plane formats for gen <= 3 */
79 static const u32 i8xx_primary_formats[] = {
80 DRM_FORMAT_C8,
81 DRM_FORMAT_RGB565,
82 DRM_FORMAT_XRGB1555,
83 DRM_FORMAT_XRGB8888,
84 };
85
86 /* Primary plane formats for gen >= 4 */
87 static const u32 i965_primary_formats[] = {
88 DRM_FORMAT_C8,
89 DRM_FORMAT_RGB565,
90 DRM_FORMAT_XRGB8888,
91 DRM_FORMAT_XBGR8888,
92 DRM_FORMAT_XRGB2101010,
93 DRM_FORMAT_XBGR2101010,
94 };
95
96 static const u64 i9xx_format_modifiers[] = {
97 I915_FORMAT_MOD_X_TILED,
98 DRM_FORMAT_MOD_LINEAR,
99 DRM_FORMAT_MOD_INVALID
100 };
101
102 /* Cursor formats */
103 static const u32 intel_cursor_formats[] = {
104 DRM_FORMAT_ARGB8888,
105 };
106
107 static const u64 cursor_format_modifiers[] = {
108 DRM_FORMAT_MOD_LINEAR,
109 DRM_FORMAT_MOD_INVALID
110 };
111
112 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
113 struct intel_crtc_state *pipe_config);
114 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
115 struct intel_crtc_state *pipe_config);
116
117 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
118 struct drm_i915_gem_object *obj,
119 struct drm_mode_fb_cmd2 *mode_cmd);
120 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
121 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
122 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
123 const struct intel_link_m_n *m_n,
124 const struct intel_link_m_n *m2_n2);
125 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
126 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
127 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
128 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
129 static void vlv_prepare_pll(struct intel_crtc *crtc,
130 const struct intel_crtc_state *pipe_config);
131 static void chv_prepare_pll(struct intel_crtc *crtc,
132 const struct intel_crtc_state *pipe_config);
133 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
134 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
135 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
136 struct intel_crtc_state *crtc_state);
137 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
138 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
139 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
140 static void intel_modeset_setup_hw_state(struct drm_device *dev,
141 struct drm_modeset_acquire_ctx *ctx);
142 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
143
144 struct intel_limit {
145 struct {
146 int min, max;
147 } dot, vco, n, m, m1, m2, p, p1;
148
149 struct {
150 int dot_limit;
151 int p2_slow, p2_fast;
152 } p2;
153 };
154
155 /* returns HPLL frequency in kHz */
156 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
157 {
158 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
159
160 /* Obtain SKU information */
161 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
162 CCK_FUSE_HPLL_FREQ_MASK;
163
164 return vco_freq[hpll_freq] * 1000;
165 }
166
167 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
168 const char *name, u32 reg, int ref_freq)
169 {
170 u32 val;
171 int divider;
172
173 val = vlv_cck_read(dev_priv, reg);
174 divider = val & CCK_FREQUENCY_VALUES;
175
176 WARN((val & CCK_FREQUENCY_STATUS) !=
177 (divider << CCK_FREQUENCY_STATUS_SHIFT),
178 "%s change in progress\n", name);
179
180 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
181 }
182
183 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
184 const char *name, u32 reg)
185 {
186 int hpll;
187
188 vlv_cck_get(dev_priv);
189
190 if (dev_priv->hpll_freq == 0)
191 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
192
193 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
194
195 vlv_cck_put(dev_priv);
196
197 return hpll;
198 }
199
200 static void intel_update_czclk(struct drm_i915_private *dev_priv)
201 {
202 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
203 return;
204
205 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
206 CCK_CZ_CLOCK_CONTROL);
207
208 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
209 }
210
211 static inline u32 /* units of 100MHz */
212 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
213 const struct intel_crtc_state *pipe_config)
214 {
215 if (HAS_DDI(dev_priv))
216 return pipe_config->port_clock; /* SPLL */
217 else
218 return dev_priv->fdi_pll_freq;
219 }
220
221 static const struct intel_limit intel_limits_i8xx_dac = {
222 .dot = { .min = 25000, .max = 350000 },
223 .vco = { .min = 908000, .max = 1512000 },
224 .n = { .min = 2, .max = 16 },
225 .m = { .min = 96, .max = 140 },
226 .m1 = { .min = 18, .max = 26 },
227 .m2 = { .min = 6, .max = 16 },
228 .p = { .min = 4, .max = 128 },
229 .p1 = { .min = 2, .max = 33 },
230 .p2 = { .dot_limit = 165000,
231 .p2_slow = 4, .p2_fast = 2 },
232 };
233
234 static const struct intel_limit intel_limits_i8xx_dvo = {
235 .dot = { .min = 25000, .max = 350000 },
236 .vco = { .min = 908000, .max = 1512000 },
237 .n = { .min = 2, .max = 16 },
238 .m = { .min = 96, .max = 140 },
239 .m1 = { .min = 18, .max = 26 },
240 .m2 = { .min = 6, .max = 16 },
241 .p = { .min = 4, .max = 128 },
242 .p1 = { .min = 2, .max = 33 },
243 .p2 = { .dot_limit = 165000,
244 .p2_slow = 4, .p2_fast = 4 },
245 };
246
247 static const struct intel_limit intel_limits_i8xx_lvds = {
248 .dot = { .min = 25000, .max = 350000 },
249 .vco = { .min = 908000, .max = 1512000 },
250 .n = { .min = 2, .max = 16 },
251 .m = { .min = 96, .max = 140 },
252 .m1 = { .min = 18, .max = 26 },
253 .m2 = { .min = 6, .max = 16 },
254 .p = { .min = 4, .max = 128 },
255 .p1 = { .min = 1, .max = 6 },
256 .p2 = { .dot_limit = 165000,
257 .p2_slow = 14, .p2_fast = 7 },
258 };
259
260 static const struct intel_limit intel_limits_i9xx_sdvo = {
261 .dot = { .min = 20000, .max = 400000 },
262 .vco = { .min = 1400000, .max = 2800000 },
263 .n = { .min = 1, .max = 6 },
264 .m = { .min = 70, .max = 120 },
265 .m1 = { .min = 8, .max = 18 },
266 .m2 = { .min = 3, .max = 7 },
267 .p = { .min = 5, .max = 80 },
268 .p1 = { .min = 1, .max = 8 },
269 .p2 = { .dot_limit = 200000,
270 .p2_slow = 10, .p2_fast = 5 },
271 };
272
273 static const struct intel_limit intel_limits_i9xx_lvds = {
274 .dot = { .min = 20000, .max = 400000 },
275 .vco = { .min = 1400000, .max = 2800000 },
276 .n = { .min = 1, .max = 6 },
277 .m = { .min = 70, .max = 120 },
278 .m1 = { .min = 8, .max = 18 },
279 .m2 = { .min = 3, .max = 7 },
280 .p = { .min = 7, .max = 98 },
281 .p1 = { .min = 1, .max = 8 },
282 .p2 = { .dot_limit = 112000,
283 .p2_slow = 14, .p2_fast = 7 },
284 };
285
286
287 static const struct intel_limit intel_limits_g4x_sdvo = {
288 .dot = { .min = 25000, .max = 270000 },
289 .vco = { .min = 1750000, .max = 3500000},
290 .n = { .min = 1, .max = 4 },
291 .m = { .min = 104, .max = 138 },
292 .m1 = { .min = 17, .max = 23 },
293 .m2 = { .min = 5, .max = 11 },
294 .p = { .min = 10, .max = 30 },
295 .p1 = { .min = 1, .max = 3},
296 .p2 = { .dot_limit = 270000,
297 .p2_slow = 10,
298 .p2_fast = 10
299 },
300 };
301
302 static const struct intel_limit intel_limits_g4x_hdmi = {
303 .dot = { .min = 22000, .max = 400000 },
304 .vco = { .min = 1750000, .max = 3500000},
305 .n = { .min = 1, .max = 4 },
306 .m = { .min = 104, .max = 138 },
307 .m1 = { .min = 16, .max = 23 },
308 .m2 = { .min = 5, .max = 11 },
309 .p = { .min = 5, .max = 80 },
310 .p1 = { .min = 1, .max = 8},
311 .p2 = { .dot_limit = 165000,
312 .p2_slow = 10, .p2_fast = 5 },
313 };
314
315 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
316 .dot = { .min = 20000, .max = 115000 },
317 .vco = { .min = 1750000, .max = 3500000 },
318 .n = { .min = 1, .max = 3 },
319 .m = { .min = 104, .max = 138 },
320 .m1 = { .min = 17, .max = 23 },
321 .m2 = { .min = 5, .max = 11 },
322 .p = { .min = 28, .max = 112 },
323 .p1 = { .min = 2, .max = 8 },
324 .p2 = { .dot_limit = 0,
325 .p2_slow = 14, .p2_fast = 14
326 },
327 };
328
329 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
330 .dot = { .min = 80000, .max = 224000 },
331 .vco = { .min = 1750000, .max = 3500000 },
332 .n = { .min = 1, .max = 3 },
333 .m = { .min = 104, .max = 138 },
334 .m1 = { .min = 17, .max = 23 },
335 .m2 = { .min = 5, .max = 11 },
336 .p = { .min = 14, .max = 42 },
337 .p1 = { .min = 2, .max = 6 },
338 .p2 = { .dot_limit = 0,
339 .p2_slow = 7, .p2_fast = 7
340 },
341 };
342
343 static const struct intel_limit intel_limits_pineview_sdvo = {
344 .dot = { .min = 20000, .max = 400000},
345 .vco = { .min = 1700000, .max = 3500000 },
346 /* Pineview's Ncounter is a ring counter */
347 .n = { .min = 3, .max = 6 },
348 .m = { .min = 2, .max = 256 },
349 /* Pineview only has one combined m divider, which we treat as m2. */
350 .m1 = { .min = 0, .max = 0 },
351 .m2 = { .min = 0, .max = 254 },
352 .p = { .min = 5, .max = 80 },
353 .p1 = { .min = 1, .max = 8 },
354 .p2 = { .dot_limit = 200000,
355 .p2_slow = 10, .p2_fast = 5 },
356 };
357
358 static const struct intel_limit intel_limits_pineview_lvds = {
359 .dot = { .min = 20000, .max = 400000 },
360 .vco = { .min = 1700000, .max = 3500000 },
361 .n = { .min = 3, .max = 6 },
362 .m = { .min = 2, .max = 256 },
363 .m1 = { .min = 0, .max = 0 },
364 .m2 = { .min = 0, .max = 254 },
365 .p = { .min = 7, .max = 112 },
366 .p1 = { .min = 1, .max = 8 },
367 .p2 = { .dot_limit = 112000,
368 .p2_slow = 14, .p2_fast = 14 },
369 };
370
371 /* Ironlake / Sandybridge
372 *
373 * We calculate clock using (register_value + 2) for N/M1/M2, so here
374 * the range value for them is (actual_value - 2).
375 */
376 static const struct intel_limit intel_limits_ironlake_dac = {
377 .dot = { .min = 25000, .max = 350000 },
378 .vco = { .min = 1760000, .max = 3510000 },
379 .n = { .min = 1, .max = 5 },
380 .m = { .min = 79, .max = 127 },
381 .m1 = { .min = 12, .max = 22 },
382 .m2 = { .min = 5, .max = 9 },
383 .p = { .min = 5, .max = 80 },
384 .p1 = { .min = 1, .max = 8 },
385 .p2 = { .dot_limit = 225000,
386 .p2_slow = 10, .p2_fast = 5 },
387 };
388
389 static const struct intel_limit intel_limits_ironlake_single_lvds = {
390 .dot = { .min = 25000, .max = 350000 },
391 .vco = { .min = 1760000, .max = 3510000 },
392 .n = { .min = 1, .max = 3 },
393 .m = { .min = 79, .max = 118 },
394 .m1 = { .min = 12, .max = 22 },
395 .m2 = { .min = 5, .max = 9 },
396 .p = { .min = 28, .max = 112 },
397 .p1 = { .min = 2, .max = 8 },
398 .p2 = { .dot_limit = 225000,
399 .p2_slow = 14, .p2_fast = 14 },
400 };
401
402 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
403 .dot = { .min = 25000, .max = 350000 },
404 .vco = { .min = 1760000, .max = 3510000 },
405 .n = { .min = 1, .max = 3 },
406 .m = { .min = 79, .max = 127 },
407 .m1 = { .min = 12, .max = 22 },
408 .m2 = { .min = 5, .max = 9 },
409 .p = { .min = 14, .max = 56 },
410 .p1 = { .min = 2, .max = 8 },
411 .p2 = { .dot_limit = 225000,
412 .p2_slow = 7, .p2_fast = 7 },
413 };
414
415 /* LVDS 100mhz refclk limits. */
416 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
417 .dot = { .min = 25000, .max = 350000 },
418 .vco = { .min = 1760000, .max = 3510000 },
419 .n = { .min = 1, .max = 2 },
420 .m = { .min = 79, .max = 126 },
421 .m1 = { .min = 12, .max = 22 },
422 .m2 = { .min = 5, .max = 9 },
423 .p = { .min = 28, .max = 112 },
424 .p1 = { .min = 2, .max = 8 },
425 .p2 = { .dot_limit = 225000,
426 .p2_slow = 14, .p2_fast = 14 },
427 };
428
429 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
430 .dot = { .min = 25000, .max = 350000 },
431 .vco = { .min = 1760000, .max = 3510000 },
432 .n = { .min = 1, .max = 3 },
433 .m = { .min = 79, .max = 126 },
434 .m1 = { .min = 12, .max = 22 },
435 .m2 = { .min = 5, .max = 9 },
436 .p = { .min = 14, .max = 42 },
437 .p1 = { .min = 2, .max = 6 },
438 .p2 = { .dot_limit = 225000,
439 .p2_slow = 7, .p2_fast = 7 },
440 };
441
442 static const struct intel_limit intel_limits_vlv = {
443 /*
444 * These are the data rate limits (measured in fast clocks)
445 * since those are the strictest limits we have. The fast
446 * clock and actual rate limits are more relaxed, so checking
447 * them would make no difference.
448 */
449 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
450 .vco = { .min = 4000000, .max = 6000000 },
451 .n = { .min = 1, .max = 7 },
452 .m1 = { .min = 2, .max = 3 },
453 .m2 = { .min = 11, .max = 156 },
454 .p1 = { .min = 2, .max = 3 },
455 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
456 };
457
458 static const struct intel_limit intel_limits_chv = {
459 /*
460 * These are the data rate limits (measured in fast clocks)
461 * since those are the strictest limits we have. The fast
462 * clock and actual rate limits are more relaxed, so checking
463 * them would make no difference.
464 */
465 .dot = { .min = 25000 * 5, .max = 540000 * 5},
466 .vco = { .min = 4800000, .max = 6480000 },
467 .n = { .min = 1, .max = 1 },
468 .m1 = { .min = 2, .max = 2 },
469 .m2 = { .min = 24 << 22, .max = 175 << 22 },
470 .p1 = { .min = 2, .max = 4 },
471 .p2 = { .p2_slow = 1, .p2_fast = 14 },
472 };
473
474 static const struct intel_limit intel_limits_bxt = {
475 /* FIXME: find real dot limits */
476 .dot = { .min = 0, .max = INT_MAX },
477 .vco = { .min = 4800000, .max = 6700000 },
478 .n = { .min = 1, .max = 1 },
479 .m1 = { .min = 2, .max = 2 },
480 /* FIXME: find real m2 limits */
481 .m2 = { .min = 2 << 22, .max = 255 << 22 },
482 .p1 = { .min = 2, .max = 4 },
483 .p2 = { .p2_slow = 1, .p2_fast = 20 },
484 };
485
486 /* WA Display #0827: Gen9:all */
487 static void
488 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
489 {
490 if (enable)
491 I915_WRITE(CLKGATE_DIS_PSL(pipe),
492 I915_READ(CLKGATE_DIS_PSL(pipe)) |
493 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
494 else
495 I915_WRITE(CLKGATE_DIS_PSL(pipe),
496 I915_READ(CLKGATE_DIS_PSL(pipe)) &
497 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
498 }
499
500 /* Wa_2006604312:icl */
501 static void
502 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
503 bool enable)
504 {
505 if (enable)
506 I915_WRITE(CLKGATE_DIS_PSL(pipe),
507 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
508 else
509 I915_WRITE(CLKGATE_DIS_PSL(pipe),
510 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
511 }
512
513 static bool
514 needs_modeset(const struct drm_crtc_state *state)
515 {
516 return drm_atomic_crtc_needs_modeset(state);
517 }
518
519 /*
520 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
521 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
522 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
523 * The helpers' return value is the rate of the clock that is fed to the
524 * display engine's pipe which can be the above fast dot clock rate or a
525 * divided-down version of it.
526 */
527 /* m1 is reserved as 0 in Pineview, n is a ring counter */
528 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
529 {
530 clock->m = clock->m2 + 2;
531 clock->p = clock->p1 * clock->p2;
532 if (WARN_ON(clock->n == 0 || clock->p == 0))
533 return 0;
534 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
535 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
536
537 return clock->dot;
538 }
539
540 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
541 {
542 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
543 }
544
545 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
546 {
547 clock->m = i9xx_dpll_compute_m(clock);
548 clock->p = clock->p1 * clock->p2;
549 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
550 return 0;
551 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
552 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
553
554 return clock->dot;
555 }
556
557 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
558 {
559 clock->m = clock->m1 * clock->m2;
560 clock->p = clock->p1 * clock->p2;
561 if (WARN_ON(clock->n == 0 || clock->p == 0))
562 return 0;
563 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
564 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
565
566 return clock->dot / 5;
567 }
568
569 int chv_calc_dpll_params(int refclk, struct dpll *clock)
570 {
571 clock->m = clock->m1 * clock->m2;
572 clock->p = clock->p1 * clock->p2;
573 if (WARN_ON(clock->n == 0 || clock->p == 0))
574 return 0;
575 clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
576 clock->n << 22);
577 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
578
579 return clock->dot / 5;
580 }
581
582 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
583
584 /*
585 * Returns whether the given set of divisors are valid for a given refclk with
586 * the given connectors.
587 */
588 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
589 const struct intel_limit *limit,
590 const struct dpll *clock)
591 {
592 if (clock->n < limit->n.min || limit->n.max < clock->n)
593 INTELPllInvalid("n out of range\n");
594 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
595 INTELPllInvalid("p1 out of range\n");
596 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
597 INTELPllInvalid("m2 out of range\n");
598 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
599 INTELPllInvalid("m1 out of range\n");
600
601 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
602 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
603 if (clock->m1 <= clock->m2)
604 INTELPllInvalid("m1 <= m2\n");
605
606 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
607 !IS_GEN9_LP(dev_priv)) {
608 if (clock->p < limit->p.min || limit->p.max < clock->p)
609 INTELPllInvalid("p out of range\n");
610 if (clock->m < limit->m.min || limit->m.max < clock->m)
611 INTELPllInvalid("m out of range\n");
612 }
613
614 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
615 INTELPllInvalid("vco out of range\n");
616 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
617 * connector, etc., rather than just a single range.
618 */
619 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
620 INTELPllInvalid("dot out of range\n");
621
622 return true;
623 }
624
625 static int
626 i9xx_select_p2_div(const struct intel_limit *limit,
627 const struct intel_crtc_state *crtc_state,
628 int target)
629 {
630 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
631
632 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
633 /*
634 * For LVDS just rely on its current settings for dual-channel.
635 * We haven't figured out how to reliably set up different
636 * single/dual channel state, if we even can.
637 */
638 if (intel_is_dual_link_lvds(dev_priv))
639 return limit->p2.p2_fast;
640 else
641 return limit->p2.p2_slow;
642 } else {
643 if (target < limit->p2.dot_limit)
644 return limit->p2.p2_slow;
645 else
646 return limit->p2.p2_fast;
647 }
648 }
649
650 /*
651 * Returns a set of divisors for the desired target clock with the given
652 * refclk, or FALSE. The returned values represent the clock equation:
653 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
654 *
655 * Target and reference clocks are specified in kHz.
656 *
657 * If match_clock is provided, then best_clock P divider must match the P
658 * divider from @match_clock used for LVDS downclocking.
659 */
660 static bool
661 i9xx_find_best_dpll(const struct intel_limit *limit,
662 struct intel_crtc_state *crtc_state,
663 int target, int refclk, struct dpll *match_clock,
664 struct dpll *best_clock)
665 {
666 struct drm_device *dev = crtc_state->base.crtc->dev;
667 struct dpll clock;
668 int err = target;
669
670 memset(best_clock, 0, sizeof(*best_clock));
671
672 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
673
674 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
675 clock.m1++) {
676 for (clock.m2 = limit->m2.min;
677 clock.m2 <= limit->m2.max; clock.m2++) {
678 if (clock.m2 >= clock.m1)
679 break;
680 for (clock.n = limit->n.min;
681 clock.n <= limit->n.max; clock.n++) {
682 for (clock.p1 = limit->p1.min;
683 clock.p1 <= limit->p1.max; clock.p1++) {
684 int this_err;
685
686 i9xx_calc_dpll_params(refclk, &clock);
687 if (!intel_PLL_is_valid(to_i915(dev),
688 limit,
689 &clock))
690 continue;
691 if (match_clock &&
692 clock.p != match_clock->p)
693 continue;
694
695 this_err = abs(clock.dot - target);
696 if (this_err < err) {
697 *best_clock = clock;
698 err = this_err;
699 }
700 }
701 }
702 }
703 }
704
705 return (err != target);
706 }
707
708 /*
709 * Returns a set of divisors for the desired target clock with the given
710 * refclk, or FALSE. The returned values represent the clock equation:
711 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
712 *
713 * Target and reference clocks are specified in kHz.
714 *
715 * If match_clock is provided, then best_clock P divider must match the P
716 * divider from @match_clock used for LVDS downclocking.
717 */
718 static bool
719 pnv_find_best_dpll(const struct intel_limit *limit,
720 struct intel_crtc_state *crtc_state,
721 int target, int refclk, struct dpll *match_clock,
722 struct dpll *best_clock)
723 {
724 struct drm_device *dev = crtc_state->base.crtc->dev;
725 struct dpll clock;
726 int err = target;
727
728 memset(best_clock, 0, sizeof(*best_clock));
729
730 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
731
732 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
733 clock.m1++) {
734 for (clock.m2 = limit->m2.min;
735 clock.m2 <= limit->m2.max; clock.m2++) {
736 for (clock.n = limit->n.min;
737 clock.n <= limit->n.max; clock.n++) {
738 for (clock.p1 = limit->p1.min;
739 clock.p1 <= limit->p1.max; clock.p1++) {
740 int this_err;
741
742 pnv_calc_dpll_params(refclk, &clock);
743 if (!intel_PLL_is_valid(to_i915(dev),
744 limit,
745 &clock))
746 continue;
747 if (match_clock &&
748 clock.p != match_clock->p)
749 continue;
750
751 this_err = abs(clock.dot - target);
752 if (this_err < err) {
753 *best_clock = clock;
754 err = this_err;
755 }
756 }
757 }
758 }
759 }
760
761 return (err != target);
762 }
763
764 /*
765 * Returns a set of divisors for the desired target clock with the given
766 * refclk, or FALSE. The returned values represent the clock equation:
767 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
768 *
769 * Target and reference clocks are specified in kHz.
770 *
771 * If match_clock is provided, then best_clock P divider must match the P
772 * divider from @match_clock used for LVDS downclocking.
773 */
774 static bool
775 g4x_find_best_dpll(const struct intel_limit *limit,
776 struct intel_crtc_state *crtc_state,
777 int target, int refclk, struct dpll *match_clock,
778 struct dpll *best_clock)
779 {
780 struct drm_device *dev = crtc_state->base.crtc->dev;
781 struct dpll clock;
782 int max_n;
783 bool found = false;
784 /* approximately equals target * 0.00585 */
785 int err_most = (target >> 8) + (target >> 9);
786
787 memset(best_clock, 0, sizeof(*best_clock));
788
789 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
790
791 max_n = limit->n.max;
792 /* based on hardware requirement, prefer smaller n to precision */
793 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
794 /* based on hardware requirement, prefere larger m1,m2 */
795 for (clock.m1 = limit->m1.max;
796 clock.m1 >= limit->m1.min; clock.m1--) {
797 for (clock.m2 = limit->m2.max;
798 clock.m2 >= limit->m2.min; clock.m2--) {
799 for (clock.p1 = limit->p1.max;
800 clock.p1 >= limit->p1.min; clock.p1--) {
801 int this_err;
802
803 i9xx_calc_dpll_params(refclk, &clock);
804 if (!intel_PLL_is_valid(to_i915(dev),
805 limit,
806 &clock))
807 continue;
808
809 this_err = abs(clock.dot - target);
810 if (this_err < err_most) {
811 *best_clock = clock;
812 err_most = this_err;
813 max_n = clock.n;
814 found = true;
815 }
816 }
817 }
818 }
819 }
820 return found;
821 }
822
823 /*
824 * Check if the calculated PLL configuration is more optimal compared to the
825 * best configuration and error found so far. Return the calculated error.
826 */
827 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
828 const struct dpll *calculated_clock,
829 const struct dpll *best_clock,
830 unsigned int best_error_ppm,
831 unsigned int *error_ppm)
832 {
833 /*
834 * For CHV ignore the error and consider only the P value.
835 * Prefer a bigger P value based on HW requirements.
836 */
837 if (IS_CHERRYVIEW(to_i915(dev))) {
838 *error_ppm = 0;
839
840 return calculated_clock->p > best_clock->p;
841 }
842
843 if (WARN_ON_ONCE(!target_freq))
844 return false;
845
846 *error_ppm = div_u64(1000000ULL *
847 abs(target_freq - calculated_clock->dot),
848 target_freq);
849 /*
850 * Prefer a better P value over a better (smaller) error if the error
851 * is small. Ensure this preference for future configurations too by
852 * setting the error to 0.
853 */
854 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
855 *error_ppm = 0;
856
857 return true;
858 }
859
860 return *error_ppm + 10 < best_error_ppm;
861 }
862
863 /*
864 * Returns a set of divisors for the desired target clock with the given
865 * refclk, or FALSE. The returned values represent the clock equation:
866 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
867 */
868 static bool
869 vlv_find_best_dpll(const struct intel_limit *limit,
870 struct intel_crtc_state *crtc_state,
871 int target, int refclk, struct dpll *match_clock,
872 struct dpll *best_clock)
873 {
874 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
875 struct drm_device *dev = crtc->base.dev;
876 struct dpll clock;
877 unsigned int bestppm = 1000000;
878 /* min update 19.2 MHz */
879 int max_n = min(limit->n.max, refclk / 19200);
880 bool found = false;
881
882 target *= 5; /* fast clock */
883
884 memset(best_clock, 0, sizeof(*best_clock));
885
886 /* based on hardware requirement, prefer smaller n to precision */
887 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
888 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
890 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
891 clock.p = clock.p1 * clock.p2;
892 /* based on hardware requirement, prefer bigger m1,m2 values */
893 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
894 unsigned int ppm;
895
896 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
897 refclk * clock.m1);
898
899 vlv_calc_dpll_params(refclk, &clock);
900
901 if (!intel_PLL_is_valid(to_i915(dev),
902 limit,
903 &clock))
904 continue;
905
906 if (!vlv_PLL_is_optimal(dev, target,
907 &clock,
908 best_clock,
909 bestppm, &ppm))
910 continue;
911
912 *best_clock = clock;
913 bestppm = ppm;
914 found = true;
915 }
916 }
917 }
918 }
919
920 return found;
921 }
922
923 /*
924 * Returns a set of divisors for the desired target clock with the given
925 * refclk, or FALSE. The returned values represent the clock equation:
926 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
927 */
928 static bool
929 chv_find_best_dpll(const struct intel_limit *limit,
930 struct intel_crtc_state *crtc_state,
931 int target, int refclk, struct dpll *match_clock,
932 struct dpll *best_clock)
933 {
934 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
935 struct drm_device *dev = crtc->base.dev;
936 unsigned int best_error_ppm;
937 struct dpll clock;
938 u64 m2;
939 int found = false;
940
941 memset(best_clock, 0, sizeof(*best_clock));
942 best_error_ppm = 1000000;
943
944 /*
945 * Based on hardware doc, the n always set to 1, and m1 always
946 * set to 2. If requires to support 200Mhz refclk, we need to
947 * revisit this because n may not 1 anymore.
948 */
949 clock.n = 1, clock.m1 = 2;
950 target *= 5; /* fast clock */
951
952 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
953 for (clock.p2 = limit->p2.p2_fast;
954 clock.p2 >= limit->p2.p2_slow;
955 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
956 unsigned int error_ppm;
957
958 clock.p = clock.p1 * clock.p2;
959
960 m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
961 clock.n) << 22, refclk * clock.m1);
962
963 if (m2 > INT_MAX/clock.m1)
964 continue;
965
966 clock.m2 = m2;
967
968 chv_calc_dpll_params(refclk, &clock);
969
970 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
971 continue;
972
973 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
974 best_error_ppm, &error_ppm))
975 continue;
976
977 *best_clock = clock;
978 best_error_ppm = error_ppm;
979 found = true;
980 }
981 }
982
983 return found;
984 }
985
986 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
987 struct dpll *best_clock)
988 {
989 int refclk = 100000;
990 const struct intel_limit *limit = &intel_limits_bxt;
991
992 return chv_find_best_dpll(limit, crtc_state,
993 crtc_state->port_clock, refclk,
994 NULL, best_clock);
995 }
996
997 bool intel_crtc_active(struct intel_crtc *crtc)
998 {
999 /* Be paranoid as we can arrive here with only partial
1000 * state retrieved from the hardware during setup.
1001 *
1002 * We can ditch the adjusted_mode.crtc_clock check as soon
1003 * as Haswell has gained clock readout/fastboot support.
1004 *
1005 * We can ditch the crtc->primary->state->fb check as soon as we can
1006 * properly reconstruct framebuffers.
1007 *
1008 * FIXME: The intel_crtc->active here should be switched to
1009 * crtc->state->active once we have proper CRTC states wired up
1010 * for atomic.
1011 */
1012 return crtc->active && crtc->base.primary->state->fb &&
1013 crtc->config->base.adjusted_mode.crtc_clock;
1014 }
1015
1016 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1017 enum pipe pipe)
1018 {
1019 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1020
1021 return crtc->config->cpu_transcoder;
1022 }
1023
1024 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1025 enum pipe pipe)
1026 {
1027 i915_reg_t reg = PIPEDSL(pipe);
1028 u32 line1, line2;
1029 u32 line_mask;
1030
1031 if (IS_GEN(dev_priv, 2))
1032 line_mask = DSL_LINEMASK_GEN2;
1033 else
1034 line_mask = DSL_LINEMASK_GEN3;
1035
1036 line1 = I915_READ(reg) & line_mask;
1037 msleep(5);
1038 line2 = I915_READ(reg) & line_mask;
1039
1040 return line1 != line2;
1041 }
1042
1043 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1044 {
1045 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1046 enum pipe pipe = crtc->pipe;
1047
1048 /* Wait for the display line to settle/start moving */
1049 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1050 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1051 pipe_name(pipe), onoff(state));
1052 }
1053
1054 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1055 {
1056 wait_for_pipe_scanline_moving(crtc, false);
1057 }
1058
1059 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1060 {
1061 wait_for_pipe_scanline_moving(crtc, true);
1062 }
1063
1064 static void
1065 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1066 {
1067 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1068 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1069
1070 if (INTEL_GEN(dev_priv) >= 4) {
1071 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1072 i915_reg_t reg = PIPECONF(cpu_transcoder);
1073
1074 /* Wait for the Pipe State to go off */
1075 if (intel_wait_for_register(&dev_priv->uncore,
1076 reg, I965_PIPECONF_ACTIVE, 0,
1077 100))
1078 WARN(1, "pipe_off wait timed out\n");
1079 } else {
1080 intel_wait_for_pipe_scanline_stopped(crtc);
1081 }
1082 }
1083
1084 /* Only for pre-ILK configs */
1085 void assert_pll(struct drm_i915_private *dev_priv,
1086 enum pipe pipe, bool state)
1087 {
1088 u32 val;
1089 bool cur_state;
1090
1091 val = I915_READ(DPLL(pipe));
1092 cur_state = !!(val & DPLL_VCO_ENABLE);
1093 I915_STATE_WARN(cur_state != state,
1094 "PLL state assertion failure (expected %s, current %s)\n",
1095 onoff(state), onoff(cur_state));
1096 }
1097
1098 /* XXX: the dsi pll is shared between MIPI DSI ports */
1099 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1100 {
1101 u32 val;
1102 bool cur_state;
1103
1104 vlv_cck_get(dev_priv);
1105 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1106 vlv_cck_put(dev_priv);
1107
1108 cur_state = val & DSI_PLL_VCO_EN;
1109 I915_STATE_WARN(cur_state != state,
1110 "DSI PLL state assertion failure (expected %s, current %s)\n",
1111 onoff(state), onoff(cur_state));
1112 }
1113
1114 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1115 enum pipe pipe, bool state)
1116 {
1117 bool cur_state;
1118 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1119 pipe);
1120
1121 if (HAS_DDI(dev_priv)) {
1122 /* DDI does not have a specific FDI_TX register */
1123 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1124 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1125 } else {
1126 u32 val = I915_READ(FDI_TX_CTL(pipe));
1127 cur_state = !!(val & FDI_TX_ENABLE);
1128 }
1129 I915_STATE_WARN(cur_state != state,
1130 "FDI TX state assertion failure (expected %s, current %s)\n",
1131 onoff(state), onoff(cur_state));
1132 }
1133 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1134 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1135
1136 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1137 enum pipe pipe, bool state)
1138 {
1139 u32 val;
1140 bool cur_state;
1141
1142 val = I915_READ(FDI_RX_CTL(pipe));
1143 cur_state = !!(val & FDI_RX_ENABLE);
1144 I915_STATE_WARN(cur_state != state,
1145 "FDI RX state assertion failure (expected %s, current %s)\n",
1146 onoff(state), onoff(cur_state));
1147 }
1148 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1149 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1150
1151 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1152 enum pipe pipe)
1153 {
1154 u32 val;
1155
1156 /* ILK FDI PLL is always enabled */
1157 if (IS_GEN(dev_priv, 5))
1158 return;
1159
1160 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1161 if (HAS_DDI(dev_priv))
1162 return;
1163
1164 val = I915_READ(FDI_TX_CTL(pipe));
1165 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1166 }
1167
1168 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1169 enum pipe pipe, bool state)
1170 {
1171 u32 val;
1172 bool cur_state;
1173
1174 val = I915_READ(FDI_RX_CTL(pipe));
1175 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1176 I915_STATE_WARN(cur_state != state,
1177 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1178 onoff(state), onoff(cur_state));
1179 }
1180
1181 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1182 {
1183 i915_reg_t pp_reg;
1184 u32 val;
1185 enum pipe panel_pipe = INVALID_PIPE;
1186 bool locked = true;
1187
1188 if (WARN_ON(HAS_DDI(dev_priv)))
1189 return;
1190
1191 if (HAS_PCH_SPLIT(dev_priv)) {
1192 u32 port_sel;
1193
1194 pp_reg = PP_CONTROL(0);
1195 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1196
1197 switch (port_sel) {
1198 case PANEL_PORT_SELECT_LVDS:
1199 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1200 break;
1201 case PANEL_PORT_SELECT_DPA:
1202 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1203 break;
1204 case PANEL_PORT_SELECT_DPC:
1205 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1206 break;
1207 case PANEL_PORT_SELECT_DPD:
1208 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1209 break;
1210 default:
1211 MISSING_CASE(port_sel);
1212 break;
1213 }
1214 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1215 /* presumably write lock depends on pipe, not port select */
1216 pp_reg = PP_CONTROL(pipe);
1217 panel_pipe = pipe;
1218 } else {
1219 u32 port_sel;
1220
1221 pp_reg = PP_CONTROL(0);
1222 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1223
1224 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1225 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1226 }
1227
1228 val = I915_READ(pp_reg);
1229 if (!(val & PANEL_POWER_ON) ||
1230 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1231 locked = false;
1232
1233 I915_STATE_WARN(panel_pipe == pipe && locked,
1234 "panel assertion failure, pipe %c regs locked\n",
1235 pipe_name(pipe));
1236 }
1237
1238 void assert_pipe(struct drm_i915_private *dev_priv,
1239 enum pipe pipe, bool state)
1240 {
1241 bool cur_state;
1242 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1243 pipe);
1244 enum intel_display_power_domain power_domain;
1245 intel_wakeref_t wakeref;
1246
1247 /* we keep both pipes enabled on 830 */
1248 if (IS_I830(dev_priv))
1249 state = true;
1250
1251 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1252 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1253 if (wakeref) {
1254 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1255 cur_state = !!(val & PIPECONF_ENABLE);
1256
1257 intel_display_power_put(dev_priv, power_domain, wakeref);
1258 } else {
1259 cur_state = false;
1260 }
1261
1262 I915_STATE_WARN(cur_state != state,
1263 "pipe %c assertion failure (expected %s, current %s)\n",
1264 pipe_name(pipe), onoff(state), onoff(cur_state));
1265 }
1266
1267 static void assert_plane(struct intel_plane *plane, bool state)
1268 {
1269 enum pipe pipe;
1270 bool cur_state;
1271
1272 cur_state = plane->get_hw_state(plane, &pipe);
1273
1274 I915_STATE_WARN(cur_state != state,
1275 "%s assertion failure (expected %s, current %s)\n",
1276 plane->base.name, onoff(state), onoff(cur_state));
1277 }
1278
1279 #define assert_plane_enabled(p) assert_plane(p, true)
1280 #define assert_plane_disabled(p) assert_plane(p, false)
1281
1282 static void assert_planes_disabled(struct intel_crtc *crtc)
1283 {
1284 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1285 struct intel_plane *plane;
1286
1287 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1288 assert_plane_disabled(plane);
1289 }
1290
1291 static void assert_vblank_disabled(struct drm_crtc *crtc)
1292 {
1293 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1294 drm_crtc_vblank_put(crtc);
1295 }
1296
1297 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1298 enum pipe pipe)
1299 {
1300 u32 val;
1301 bool enabled;
1302
1303 val = I915_READ(PCH_TRANSCONF(pipe));
1304 enabled = !!(val & TRANS_ENABLE);
1305 I915_STATE_WARN(enabled,
1306 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1307 pipe_name(pipe));
1308 }
1309
1310 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1311 enum pipe pipe, enum port port,
1312 i915_reg_t dp_reg)
1313 {
1314 enum pipe port_pipe;
1315 bool state;
1316
1317 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1318
1319 I915_STATE_WARN(state && port_pipe == pipe,
1320 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1321 port_name(port), pipe_name(pipe));
1322
1323 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1324 "IBX PCH DP %c still using transcoder B\n",
1325 port_name(port));
1326 }
1327
1328 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1329 enum pipe pipe, enum port port,
1330 i915_reg_t hdmi_reg)
1331 {
1332 enum pipe port_pipe;
1333 bool state;
1334
1335 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1336
1337 I915_STATE_WARN(state && port_pipe == pipe,
1338 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1339 port_name(port), pipe_name(pipe));
1340
1341 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1342 "IBX PCH HDMI %c still using transcoder B\n",
1343 port_name(port));
1344 }
1345
1346 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1347 enum pipe pipe)
1348 {
1349 enum pipe port_pipe;
1350
1351 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1352 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1353 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1354
1355 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1356 port_pipe == pipe,
1357 "PCH VGA enabled on transcoder %c, should be disabled\n",
1358 pipe_name(pipe));
1359
1360 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1361 port_pipe == pipe,
1362 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1363 pipe_name(pipe));
1364
1365 /* PCH SDVOB multiplex with HDMIB */
1366 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1367 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1368 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1369 }
1370
1371 static void _vlv_enable_pll(struct intel_crtc *crtc,
1372 const struct intel_crtc_state *pipe_config)
1373 {
1374 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1375 enum pipe pipe = crtc->pipe;
1376
1377 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1378 POSTING_READ(DPLL(pipe));
1379 udelay(150);
1380
1381 if (intel_wait_for_register(&dev_priv->uncore,
1382 DPLL(pipe),
1383 DPLL_LOCK_VLV,
1384 DPLL_LOCK_VLV,
1385 1))
1386 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1387 }
1388
1389 static void vlv_enable_pll(struct intel_crtc *crtc,
1390 const struct intel_crtc_state *pipe_config)
1391 {
1392 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393 enum pipe pipe = crtc->pipe;
1394
1395 assert_pipe_disabled(dev_priv, pipe);
1396
1397 /* PLL is protected by panel, make sure we can write it */
1398 assert_panel_unlocked(dev_priv, pipe);
1399
1400 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1401 _vlv_enable_pll(crtc, pipe_config);
1402
1403 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1404 POSTING_READ(DPLL_MD(pipe));
1405 }
1406
1407
1408 static void _chv_enable_pll(struct intel_crtc *crtc,
1409 const struct intel_crtc_state *pipe_config)
1410 {
1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412 enum pipe pipe = crtc->pipe;
1413 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1414 u32 tmp;
1415
1416 vlv_dpio_get(dev_priv);
1417
1418 /* Enable back the 10bit clock to display controller */
1419 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1420 tmp |= DPIO_DCLKP_EN;
1421 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1422
1423 vlv_dpio_put(dev_priv);
1424
1425 /*
1426 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1427 */
1428 udelay(1);
1429
1430 /* Enable PLL */
1431 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1432
1433 /* Check PLL is locked */
1434 if (intel_wait_for_register(&dev_priv->uncore,
1435 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1436 1))
1437 DRM_ERROR("PLL %d failed to lock\n", pipe);
1438 }
1439
1440 static void chv_enable_pll(struct intel_crtc *crtc,
1441 const struct intel_crtc_state *pipe_config)
1442 {
1443 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1444 enum pipe pipe = crtc->pipe;
1445
1446 assert_pipe_disabled(dev_priv, pipe);
1447
1448 /* PLL is protected by panel, make sure we can write it */
1449 assert_panel_unlocked(dev_priv, pipe);
1450
1451 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1452 _chv_enable_pll(crtc, pipe_config);
1453
1454 if (pipe != PIPE_A) {
1455 /*
1456 * WaPixelRepeatModeFixForC0:chv
1457 *
1458 * DPLLCMD is AWOL. Use chicken bits to propagate
1459 * the value from DPLLBMD to either pipe B or C.
1460 */
1461 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1462 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1463 I915_WRITE(CBR4_VLV, 0);
1464 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1465
1466 /*
1467 * DPLLB VGA mode also seems to cause problems.
1468 * We should always have it disabled.
1469 */
1470 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1471 } else {
1472 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1473 POSTING_READ(DPLL_MD(pipe));
1474 }
1475 }
1476
1477 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1478 {
1479 if (IS_I830(dev_priv))
1480 return false;
1481
1482 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1483 }
1484
1485 static void i9xx_enable_pll(struct intel_crtc *crtc,
1486 const struct intel_crtc_state *crtc_state)
1487 {
1488 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1489 i915_reg_t reg = DPLL(crtc->pipe);
1490 u32 dpll = crtc_state->dpll_hw_state.dpll;
1491 int i;
1492
1493 assert_pipe_disabled(dev_priv, crtc->pipe);
1494
1495 /* PLL is protected by panel, make sure we can write it */
1496 if (i9xx_has_pps(dev_priv))
1497 assert_panel_unlocked(dev_priv, crtc->pipe);
1498
1499 /*
1500 * Apparently we need to have VGA mode enabled prior to changing
1501 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1502 * dividers, even though the register value does change.
1503 */
1504 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1505 I915_WRITE(reg, dpll);
1506
1507 /* Wait for the clocks to stabilize. */
1508 POSTING_READ(reg);
1509 udelay(150);
1510
1511 if (INTEL_GEN(dev_priv) >= 4) {
1512 I915_WRITE(DPLL_MD(crtc->pipe),
1513 crtc_state->dpll_hw_state.dpll_md);
1514 } else {
1515 /* The pixel multiplier can only be updated once the
1516 * DPLL is enabled and the clocks are stable.
1517 *
1518 * So write it again.
1519 */
1520 I915_WRITE(reg, dpll);
1521 }
1522
1523 /* We do this three times for luck */
1524 for (i = 0; i < 3; i++) {
1525 I915_WRITE(reg, dpll);
1526 POSTING_READ(reg);
1527 udelay(150); /* wait for warmup */
1528 }
1529 }
1530
1531 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1532 {
1533 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1535 enum pipe pipe = crtc->pipe;
1536
1537 /* Don't disable pipe or pipe PLLs if needed */
1538 if (IS_I830(dev_priv))
1539 return;
1540
1541 /* Make sure the pipe isn't still relying on us */
1542 assert_pipe_disabled(dev_priv, pipe);
1543
1544 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1545 POSTING_READ(DPLL(pipe));
1546 }
1547
1548 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1549 {
1550 u32 val;
1551
1552 /* Make sure the pipe isn't still relying on us */
1553 assert_pipe_disabled(dev_priv, pipe);
1554
1555 val = DPLL_INTEGRATED_REF_CLK_VLV |
1556 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1557 if (pipe != PIPE_A)
1558 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1559
1560 I915_WRITE(DPLL(pipe), val);
1561 POSTING_READ(DPLL(pipe));
1562 }
1563
1564 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1565 {
1566 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1567 u32 val;
1568
1569 /* Make sure the pipe isn't still relying on us */
1570 assert_pipe_disabled(dev_priv, pipe);
1571
1572 val = DPLL_SSC_REF_CLK_CHV |
1573 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1574 if (pipe != PIPE_A)
1575 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1576
1577 I915_WRITE(DPLL(pipe), val);
1578 POSTING_READ(DPLL(pipe));
1579
1580 vlv_dpio_get(dev_priv);
1581
1582 /* Disable 10bit clock to display controller */
1583 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1584 val &= ~DPIO_DCLKP_EN;
1585 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1586
1587 vlv_dpio_put(dev_priv);
1588 }
1589
1590 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1591 struct intel_digital_port *dport,
1592 unsigned int expected_mask)
1593 {
1594 u32 port_mask;
1595 i915_reg_t dpll_reg;
1596
1597 switch (dport->base.port) {
1598 case PORT_B:
1599 port_mask = DPLL_PORTB_READY_MASK;
1600 dpll_reg = DPLL(0);
1601 break;
1602 case PORT_C:
1603 port_mask = DPLL_PORTC_READY_MASK;
1604 dpll_reg = DPLL(0);
1605 expected_mask <<= 4;
1606 break;
1607 case PORT_D:
1608 port_mask = DPLL_PORTD_READY_MASK;
1609 dpll_reg = DPIO_PHY_STATUS;
1610 break;
1611 default:
1612 BUG();
1613 }
1614
1615 if (intel_wait_for_register(&dev_priv->uncore,
1616 dpll_reg, port_mask, expected_mask,
1617 1000))
1618 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1619 port_name(dport->base.port),
1620 I915_READ(dpll_reg) & port_mask, expected_mask);
1621 }
1622
1623 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1624 {
1625 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1626 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1627 enum pipe pipe = crtc->pipe;
1628 i915_reg_t reg;
1629 u32 val, pipeconf_val;
1630
1631 /* Make sure PCH DPLL is enabled */
1632 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1633
1634 /* FDI must be feeding us bits for PCH ports */
1635 assert_fdi_tx_enabled(dev_priv, pipe);
1636 assert_fdi_rx_enabled(dev_priv, pipe);
1637
1638 if (HAS_PCH_CPT(dev_priv)) {
1639 /* Workaround: Set the timing override bit before enabling the
1640 * pch transcoder. */
1641 reg = TRANS_CHICKEN2(pipe);
1642 val = I915_READ(reg);
1643 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1644 I915_WRITE(reg, val);
1645 }
1646
1647 reg = PCH_TRANSCONF(pipe);
1648 val = I915_READ(reg);
1649 pipeconf_val = I915_READ(PIPECONF(pipe));
1650
1651 if (HAS_PCH_IBX(dev_priv)) {
1652 /*
1653 * Make the BPC in transcoder be consistent with
1654 * that in pipeconf reg. For HDMI we must use 8bpc
1655 * here for both 8bpc and 12bpc.
1656 */
1657 val &= ~PIPECONF_BPC_MASK;
1658 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1659 val |= PIPECONF_8BPC;
1660 else
1661 val |= pipeconf_val & PIPECONF_BPC_MASK;
1662 }
1663
1664 val &= ~TRANS_INTERLACE_MASK;
1665 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1666 if (HAS_PCH_IBX(dev_priv) &&
1667 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1668 val |= TRANS_LEGACY_INTERLACED_ILK;
1669 else
1670 val |= TRANS_INTERLACED;
1671 } else {
1672 val |= TRANS_PROGRESSIVE;
1673 }
1674
1675 I915_WRITE(reg, val | TRANS_ENABLE);
1676 if (intel_wait_for_register(&dev_priv->uncore,
1677 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1678 100))
1679 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1680 }
1681
1682 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1683 enum transcoder cpu_transcoder)
1684 {
1685 u32 val, pipeconf_val;
1686
1687 /* FDI must be feeding us bits for PCH ports */
1688 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1689 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1690
1691 /* Workaround: set timing override bit. */
1692 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1693 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1694 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1695
1696 val = TRANS_ENABLE;
1697 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1698
1699 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1700 PIPECONF_INTERLACED_ILK)
1701 val |= TRANS_INTERLACED;
1702 else
1703 val |= TRANS_PROGRESSIVE;
1704
1705 I915_WRITE(LPT_TRANSCONF, val);
1706 if (intel_wait_for_register(&dev_priv->uncore,
1707 LPT_TRANSCONF,
1708 TRANS_STATE_ENABLE,
1709 TRANS_STATE_ENABLE,
1710 100))
1711 DRM_ERROR("Failed to enable PCH transcoder\n");
1712 }
1713
1714 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1715 enum pipe pipe)
1716 {
1717 i915_reg_t reg;
1718 u32 val;
1719
1720 /* FDI relies on the transcoder */
1721 assert_fdi_tx_disabled(dev_priv, pipe);
1722 assert_fdi_rx_disabled(dev_priv, pipe);
1723
1724 /* Ports must be off as well */
1725 assert_pch_ports_disabled(dev_priv, pipe);
1726
1727 reg = PCH_TRANSCONF(pipe);
1728 val = I915_READ(reg);
1729 val &= ~TRANS_ENABLE;
1730 I915_WRITE(reg, val);
1731 /* wait for PCH transcoder off, transcoder state */
1732 if (intel_wait_for_register(&dev_priv->uncore,
1733 reg, TRANS_STATE_ENABLE, 0,
1734 50))
1735 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1736
1737 if (HAS_PCH_CPT(dev_priv)) {
1738 /* Workaround: Clear the timing override chicken bit again. */
1739 reg = TRANS_CHICKEN2(pipe);
1740 val = I915_READ(reg);
1741 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1742 I915_WRITE(reg, val);
1743 }
1744 }
1745
1746 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1747 {
1748 u32 val;
1749
1750 val = I915_READ(LPT_TRANSCONF);
1751 val &= ~TRANS_ENABLE;
1752 I915_WRITE(LPT_TRANSCONF, val);
1753 /* wait for PCH transcoder off, transcoder state */
1754 if (intel_wait_for_register(&dev_priv->uncore,
1755 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1756 50))
1757 DRM_ERROR("Failed to disable PCH transcoder\n");
1758
1759 /* Workaround: clear timing override bit. */
1760 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1761 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1762 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1763 }
1764
1765 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1766 {
1767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1768
1769 if (HAS_PCH_LPT(dev_priv))
1770 return PIPE_A;
1771 else
1772 return crtc->pipe;
1773 }
1774
1775 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1776 {
1777 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1778
1779 /*
1780 * On i965gm the hardware frame counter reads
1781 * zero when the TV encoder is enabled :(
1782 */
1783 if (IS_I965GM(dev_priv) &&
1784 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1785 return 0;
1786
1787 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1788 return 0xffffffff; /* full 32 bit counter */
1789 else if (INTEL_GEN(dev_priv) >= 3)
1790 return 0xffffff; /* only 24 bits of frame count */
1791 else
1792 return 0; /* Gen2 doesn't have a hardware frame counter */
1793 }
1794
1795 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1796 {
1797 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1798
1799 drm_crtc_set_max_vblank_count(&crtc->base,
1800 intel_crtc_max_vblank_count(crtc_state));
1801 drm_crtc_vblank_on(&crtc->base);
1802 }
1803
1804 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1805 {
1806 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1807 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1808 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1809 enum pipe pipe = crtc->pipe;
1810 i915_reg_t reg;
1811 u32 val;
1812
1813 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1814
1815 assert_planes_disabled(crtc);
1816
1817 /*
1818 * A pipe without a PLL won't actually be able to drive bits from
1819 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1820 * need the check.
1821 */
1822 if (HAS_GMCH(dev_priv)) {
1823 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1824 assert_dsi_pll_enabled(dev_priv);
1825 else
1826 assert_pll_enabled(dev_priv, pipe);
1827 } else {
1828 if (new_crtc_state->has_pch_encoder) {
1829 /* if driving the PCH, we need FDI enabled */
1830 assert_fdi_rx_pll_enabled(dev_priv,
1831 intel_crtc_pch_transcoder(crtc));
1832 assert_fdi_tx_pll_enabled(dev_priv,
1833 (enum pipe) cpu_transcoder);
1834 }
1835 /* FIXME: assert CPU port conditions for SNB+ */
1836 }
1837
1838 trace_intel_pipe_enable(dev_priv, pipe);
1839
1840 reg = PIPECONF(cpu_transcoder);
1841 val = I915_READ(reg);
1842 if (val & PIPECONF_ENABLE) {
1843 /* we keep both pipes enabled on 830 */
1844 WARN_ON(!IS_I830(dev_priv));
1845 return;
1846 }
1847
1848 I915_WRITE(reg, val | PIPECONF_ENABLE);
1849 POSTING_READ(reg);
1850
1851 /*
1852 * Until the pipe starts PIPEDSL reads will return a stale value,
1853 * which causes an apparent vblank timestamp jump when PIPEDSL
1854 * resets to its proper value. That also messes up the frame count
1855 * when it's derived from the timestamps. So let's wait for the
1856 * pipe to start properly before we call drm_crtc_vblank_on()
1857 */
1858 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1859 intel_wait_for_pipe_scanline_moving(crtc);
1860 }
1861
1862 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1863 {
1864 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1865 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1866 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1867 enum pipe pipe = crtc->pipe;
1868 i915_reg_t reg;
1869 u32 val;
1870
1871 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1872
1873 /*
1874 * Make sure planes won't keep trying to pump pixels to us,
1875 * or we might hang the display.
1876 */
1877 assert_planes_disabled(crtc);
1878
1879 trace_intel_pipe_disable(dev_priv, pipe);
1880
1881 reg = PIPECONF(cpu_transcoder);
1882 val = I915_READ(reg);
1883 if ((val & PIPECONF_ENABLE) == 0)
1884 return;
1885
1886 /*
1887 * Double wide has implications for planes
1888 * so best keep it disabled when not needed.
1889 */
1890 if (old_crtc_state->double_wide)
1891 val &= ~PIPECONF_DOUBLE_WIDE;
1892
1893 /* Don't disable pipe or pipe PLLs if needed */
1894 if (!IS_I830(dev_priv))
1895 val &= ~PIPECONF_ENABLE;
1896
1897 I915_WRITE(reg, val);
1898 if ((val & PIPECONF_ENABLE) == 0)
1899 intel_wait_for_pipe_off(old_crtc_state);
1900 }
1901
1902 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1903 {
1904 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1905 }
1906
1907 static unsigned int
1908 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1909 {
1910 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1911 unsigned int cpp = fb->format->cpp[color_plane];
1912
1913 switch (fb->modifier) {
1914 case DRM_FORMAT_MOD_LINEAR:
1915 return cpp;
1916 case I915_FORMAT_MOD_X_TILED:
1917 if (IS_GEN(dev_priv, 2))
1918 return 128;
1919 else
1920 return 512;
1921 case I915_FORMAT_MOD_Y_TILED_CCS:
1922 if (color_plane == 1)
1923 return 128;
1924 /* fall through */
1925 case I915_FORMAT_MOD_Y_TILED:
1926 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1927 return 128;
1928 else
1929 return 512;
1930 case I915_FORMAT_MOD_Yf_TILED_CCS:
1931 if (color_plane == 1)
1932 return 128;
1933 /* fall through */
1934 case I915_FORMAT_MOD_Yf_TILED:
1935 switch (cpp) {
1936 case 1:
1937 return 64;
1938 case 2:
1939 case 4:
1940 return 128;
1941 case 8:
1942 case 16:
1943 return 256;
1944 default:
1945 MISSING_CASE(cpp);
1946 return cpp;
1947 }
1948 break;
1949 default:
1950 MISSING_CASE(fb->modifier);
1951 return cpp;
1952 }
1953 }
1954
1955 static unsigned int
1956 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1957 {
1958 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1959 return 1;
1960 else
1961 return intel_tile_size(to_i915(fb->dev)) /
1962 intel_tile_width_bytes(fb, color_plane);
1963 }
1964
1965 /* Return the tile dimensions in pixel units */
1966 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1967 unsigned int *tile_width,
1968 unsigned int *tile_height)
1969 {
1970 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1971 unsigned int cpp = fb->format->cpp[color_plane];
1972
1973 *tile_width = tile_width_bytes / cpp;
1974 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1975 }
1976
1977 unsigned int
1978 intel_fb_align_height(const struct drm_framebuffer *fb,
1979 int color_plane, unsigned int height)
1980 {
1981 unsigned int tile_height = intel_tile_height(fb, color_plane);
1982
1983 return ALIGN(height, tile_height);
1984 }
1985
1986 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1987 {
1988 unsigned int size = 0;
1989 int i;
1990
1991 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1992 size += rot_info->plane[i].width * rot_info->plane[i].height;
1993
1994 return size;
1995 }
1996
1997 static void
1998 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1999 const struct drm_framebuffer *fb,
2000 unsigned int rotation)
2001 {
2002 view->type = I915_GGTT_VIEW_NORMAL;
2003 if (drm_rotation_90_or_270(rotation)) {
2004 view->type = I915_GGTT_VIEW_ROTATED;
2005 view->rotated = to_intel_framebuffer(fb)->rot_info;
2006 }
2007 }
2008
2009 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2010 {
2011 if (IS_I830(dev_priv))
2012 return 16 * 1024;
2013 else if (IS_I85X(dev_priv))
2014 return 256;
2015 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2016 return 32;
2017 else
2018 return 4 * 1024;
2019 }
2020
2021 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2022 {
2023 if (INTEL_GEN(dev_priv) >= 9)
2024 return 256 * 1024;
2025 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2026 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2027 return 128 * 1024;
2028 else if (INTEL_GEN(dev_priv) >= 4)
2029 return 4 * 1024;
2030 else
2031 return 0;
2032 }
2033
2034 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2035 int color_plane)
2036 {
2037 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2038
2039 /* AUX_DIST needs only 4K alignment */
2040 if (color_plane == 1)
2041 return 4096;
2042
2043 switch (fb->modifier) {
2044 case DRM_FORMAT_MOD_LINEAR:
2045 return intel_linear_alignment(dev_priv);
2046 case I915_FORMAT_MOD_X_TILED:
2047 if (INTEL_GEN(dev_priv) >= 9)
2048 return 256 * 1024;
2049 return 0;
2050 case I915_FORMAT_MOD_Y_TILED_CCS:
2051 case I915_FORMAT_MOD_Yf_TILED_CCS:
2052 case I915_FORMAT_MOD_Y_TILED:
2053 case I915_FORMAT_MOD_Yf_TILED:
2054 return 1 * 1024 * 1024;
2055 default:
2056 MISSING_CASE(fb->modifier);
2057 return 0;
2058 }
2059 }
2060
2061 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2062 {
2063 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2064 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2065
2066 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2067 }
2068
2069 struct i915_vma *
2070 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2071 const struct i915_ggtt_view *view,
2072 bool uses_fence,
2073 unsigned long *out_flags)
2074 {
2075 struct drm_device *dev = fb->dev;
2076 struct drm_i915_private *dev_priv = to_i915(dev);
2077 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2078 intel_wakeref_t wakeref;
2079 struct i915_vma *vma;
2080 unsigned int pinctl;
2081 u32 alignment;
2082
2083 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2084
2085 alignment = intel_surf_alignment(fb, 0);
2086
2087 /* Note that the w/a also requires 64 PTE of padding following the
2088 * bo. We currently fill all unused PTE with the shadow page and so
2089 * we should always have valid PTE following the scanout preventing
2090 * the VT-d warning.
2091 */
2092 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2093 alignment = 256 * 1024;
2094
2095 /*
2096 * Global gtt pte registers are special registers which actually forward
2097 * writes to a chunk of system memory. Which means that there is no risk
2098 * that the register values disappear as soon as we call
2099 * intel_runtime_pm_put(), so it is correct to wrap only the
2100 * pin/unpin/fence and not more.
2101 */
2102 wakeref = intel_runtime_pm_get(dev_priv);
2103
2104 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2105
2106 pinctl = 0;
2107
2108 /* Valleyview is definitely limited to scanning out the first
2109 * 512MiB. Lets presume this behaviour was inherited from the
2110 * g4x display engine and that all earlier gen are similarly
2111 * limited. Testing suggests that it is a little more
2112 * complicated than this. For example, Cherryview appears quite
2113 * happy to scanout from anywhere within its global aperture.
2114 */
2115 if (HAS_GMCH(dev_priv))
2116 pinctl |= PIN_MAPPABLE;
2117
2118 vma = i915_gem_object_pin_to_display_plane(obj,
2119 alignment, view, pinctl);
2120 if (IS_ERR(vma))
2121 goto err;
2122
2123 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2124 int ret;
2125
2126 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2127 * fence, whereas 965+ only requires a fence if using
2128 * framebuffer compression. For simplicity, we always, when
2129 * possible, install a fence as the cost is not that onerous.
2130 *
2131 * If we fail to fence the tiled scanout, then either the
2132 * modeset will reject the change (which is highly unlikely as
2133 * the affected systems, all but one, do not have unmappable
2134 * space) or we will not be able to enable full powersaving
2135 * techniques (also likely not to apply due to various limits
2136 * FBC and the like impose on the size of the buffer, which
2137 * presumably we violated anyway with this unmappable buffer).
2138 * Anyway, it is presumably better to stumble onwards with
2139 * something and try to run the system in a "less than optimal"
2140 * mode that matches the user configuration.
2141 */
2142 ret = i915_vma_pin_fence(vma);
2143 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2144 i915_gem_object_unpin_from_display_plane(vma);
2145 vma = ERR_PTR(ret);
2146 goto err;
2147 }
2148
2149 if (ret == 0 && vma->fence)
2150 *out_flags |= PLANE_HAS_FENCE;
2151 }
2152
2153 i915_vma_get(vma);
2154 err:
2155 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2156
2157 intel_runtime_pm_put(dev_priv, wakeref);
2158 return vma;
2159 }
2160
2161 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2162 {
2163 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2164
2165 if (flags & PLANE_HAS_FENCE)
2166 i915_vma_unpin_fence(vma);
2167 i915_gem_object_unpin_from_display_plane(vma);
2168 i915_vma_put(vma);
2169 }
2170
2171 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2172 unsigned int rotation)
2173 {
2174 if (drm_rotation_90_or_270(rotation))
2175 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2176 else
2177 return fb->pitches[color_plane];
2178 }
2179
2180 /*
2181 * Convert the x/y offsets into a linear offset.
2182 * Only valid with 0/180 degree rotation, which is fine since linear
2183 * offset is only used with linear buffers on pre-hsw and tiled buffers
2184 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2185 */
2186 u32 intel_fb_xy_to_linear(int x, int y,
2187 const struct intel_plane_state *state,
2188 int color_plane)
2189 {
2190 const struct drm_framebuffer *fb = state->base.fb;
2191 unsigned int cpp = fb->format->cpp[color_plane];
2192 unsigned int pitch = state->color_plane[color_plane].stride;
2193
2194 return y * pitch + x * cpp;
2195 }
2196
2197 /*
2198 * Add the x/y offsets derived from fb->offsets[] to the user
2199 * specified plane src x/y offsets. The resulting x/y offsets
2200 * specify the start of scanout from the beginning of the gtt mapping.
2201 */
2202 void intel_add_fb_offsets(int *x, int *y,
2203 const struct intel_plane_state *state,
2204 int color_plane)
2205
2206 {
2207 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2208 unsigned int rotation = state->base.rotation;
2209
2210 if (drm_rotation_90_or_270(rotation)) {
2211 *x += intel_fb->rotated[color_plane].x;
2212 *y += intel_fb->rotated[color_plane].y;
2213 } else {
2214 *x += intel_fb->normal[color_plane].x;
2215 *y += intel_fb->normal[color_plane].y;
2216 }
2217 }
2218
2219 static u32 intel_adjust_tile_offset(int *x, int *y,
2220 unsigned int tile_width,
2221 unsigned int tile_height,
2222 unsigned int tile_size,
2223 unsigned int pitch_tiles,
2224 u32 old_offset,
2225 u32 new_offset)
2226 {
2227 unsigned int pitch_pixels = pitch_tiles * tile_width;
2228 unsigned int tiles;
2229
2230 WARN_ON(old_offset & (tile_size - 1));
2231 WARN_ON(new_offset & (tile_size - 1));
2232 WARN_ON(new_offset > old_offset);
2233
2234 tiles = (old_offset - new_offset) / tile_size;
2235
2236 *y += tiles / pitch_tiles * tile_height;
2237 *x += tiles % pitch_tiles * tile_width;
2238
2239 /* minimize x in case it got needlessly big */
2240 *y += *x / pitch_pixels * tile_height;
2241 *x %= pitch_pixels;
2242
2243 return new_offset;
2244 }
2245
2246 static bool is_surface_linear(u64 modifier, int color_plane)
2247 {
2248 return modifier == DRM_FORMAT_MOD_LINEAR;
2249 }
2250
2251 static u32 intel_adjust_aligned_offset(int *x, int *y,
2252 const struct drm_framebuffer *fb,
2253 int color_plane,
2254 unsigned int rotation,
2255 unsigned int pitch,
2256 u32 old_offset, u32 new_offset)
2257 {
2258 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2259 unsigned int cpp = fb->format->cpp[color_plane];
2260
2261 WARN_ON(new_offset > old_offset);
2262
2263 if (!is_surface_linear(fb->modifier, color_plane)) {
2264 unsigned int tile_size, tile_width, tile_height;
2265 unsigned int pitch_tiles;
2266
2267 tile_size = intel_tile_size(dev_priv);
2268 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2269
2270 if (drm_rotation_90_or_270(rotation)) {
2271 pitch_tiles = pitch / tile_height;
2272 swap(tile_width, tile_height);
2273 } else {
2274 pitch_tiles = pitch / (tile_width * cpp);
2275 }
2276
2277 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2278 tile_size, pitch_tiles,
2279 old_offset, new_offset);
2280 } else {
2281 old_offset += *y * pitch + *x * cpp;
2282
2283 *y = (old_offset - new_offset) / pitch;
2284 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2285 }
2286
2287 return new_offset;
2288 }
2289
2290 /*
2291 * Adjust the tile offset by moving the difference into
2292 * the x/y offsets.
2293 */
2294 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2295 const struct intel_plane_state *state,
2296 int color_plane,
2297 u32 old_offset, u32 new_offset)
2298 {
2299 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2300 state->base.rotation,
2301 state->color_plane[color_plane].stride,
2302 old_offset, new_offset);
2303 }
2304
2305 /*
2306 * Computes the aligned offset to the base tile and adjusts
2307 * x, y. bytes per pixel is assumed to be a power-of-two.
2308 *
2309 * In the 90/270 rotated case, x and y are assumed
2310 * to be already rotated to match the rotated GTT view, and
2311 * pitch is the tile_height aligned framebuffer height.
2312 *
2313 * This function is used when computing the derived information
2314 * under intel_framebuffer, so using any of that information
2315 * here is not allowed. Anything under drm_framebuffer can be
2316 * used. This is why the user has to pass in the pitch since it
2317 * is specified in the rotated orientation.
2318 */
2319 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2320 int *x, int *y,
2321 const struct drm_framebuffer *fb,
2322 int color_plane,
2323 unsigned int pitch,
2324 unsigned int rotation,
2325 u32 alignment)
2326 {
2327 unsigned int cpp = fb->format->cpp[color_plane];
2328 u32 offset, offset_aligned;
2329
2330 if (alignment)
2331 alignment--;
2332
2333 if (!is_surface_linear(fb->modifier, color_plane)) {
2334 unsigned int tile_size, tile_width, tile_height;
2335 unsigned int tile_rows, tiles, pitch_tiles;
2336
2337 tile_size = intel_tile_size(dev_priv);
2338 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2339
2340 if (drm_rotation_90_or_270(rotation)) {
2341 pitch_tiles = pitch / tile_height;
2342 swap(tile_width, tile_height);
2343 } else {
2344 pitch_tiles = pitch / (tile_width * cpp);
2345 }
2346
2347 tile_rows = *y / tile_height;
2348 *y %= tile_height;
2349
2350 tiles = *x / tile_width;
2351 *x %= tile_width;
2352
2353 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2354 offset_aligned = offset & ~alignment;
2355
2356 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2357 tile_size, pitch_tiles,
2358 offset, offset_aligned);
2359 } else {
2360 offset = *y * pitch + *x * cpp;
2361 offset_aligned = offset & ~alignment;
2362
2363 *y = (offset & alignment) / pitch;
2364 *x = ((offset & alignment) - *y * pitch) / cpp;
2365 }
2366
2367 return offset_aligned;
2368 }
2369
2370 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2371 const struct intel_plane_state *state,
2372 int color_plane)
2373 {
2374 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2375 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2376 const struct drm_framebuffer *fb = state->base.fb;
2377 unsigned int rotation = state->base.rotation;
2378 int pitch = state->color_plane[color_plane].stride;
2379 u32 alignment;
2380
2381 if (intel_plane->id == PLANE_CURSOR)
2382 alignment = intel_cursor_alignment(dev_priv);
2383 else
2384 alignment = intel_surf_alignment(fb, color_plane);
2385
2386 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2387 pitch, rotation, alignment);
2388 }
2389
2390 /* Convert the fb->offset[] into x/y offsets */
2391 static int intel_fb_offset_to_xy(int *x, int *y,
2392 const struct drm_framebuffer *fb,
2393 int color_plane)
2394 {
2395 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2396 unsigned int height;
2397
2398 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2399 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2400 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2401 fb->offsets[color_plane], color_plane);
2402 return -EINVAL;
2403 }
2404
2405 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2406 height = ALIGN(height, intel_tile_height(fb, color_plane));
2407
2408 /* Catch potential overflows early */
2409 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2410 fb->offsets[color_plane])) {
2411 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2412 fb->offsets[color_plane], fb->pitches[color_plane],
2413 color_plane);
2414 return -ERANGE;
2415 }
2416
2417 *x = 0;
2418 *y = 0;
2419
2420 intel_adjust_aligned_offset(x, y,
2421 fb, color_plane, DRM_MODE_ROTATE_0,
2422 fb->pitches[color_plane],
2423 fb->offsets[color_plane], 0);
2424
2425 return 0;
2426 }
2427
2428 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2429 {
2430 switch (fb_modifier) {
2431 case I915_FORMAT_MOD_X_TILED:
2432 return I915_TILING_X;
2433 case I915_FORMAT_MOD_Y_TILED:
2434 case I915_FORMAT_MOD_Y_TILED_CCS:
2435 return I915_TILING_Y;
2436 default:
2437 return I915_TILING_NONE;
2438 }
2439 }
2440
2441 /*
2442 * From the Sky Lake PRM:
2443 * "The Color Control Surface (CCS) contains the compression status of
2444 * the cache-line pairs. The compression state of the cache-line pair
2445 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2446 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2447 * cache-line-pairs. CCS is always Y tiled."
2448 *
2449 * Since cache line pairs refers to horizontally adjacent cache lines,
2450 * each cache line in the CCS corresponds to an area of 32x16 cache
2451 * lines on the main surface. Since each pixel is 4 bytes, this gives
2452 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2453 * main surface.
2454 */
2455 static const struct drm_format_info ccs_formats[] = {
2456 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2457 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2458 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2459 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2460 };
2461
2462 static const struct drm_format_info *
2463 lookup_format_info(const struct drm_format_info formats[],
2464 int num_formats, u32 format)
2465 {
2466 int i;
2467
2468 for (i = 0; i < num_formats; i++) {
2469 if (formats[i].format == format)
2470 return &formats[i];
2471 }
2472
2473 return NULL;
2474 }
2475
2476 static const struct drm_format_info *
2477 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2478 {
2479 switch (cmd->modifier[0]) {
2480 case I915_FORMAT_MOD_Y_TILED_CCS:
2481 case I915_FORMAT_MOD_Yf_TILED_CCS:
2482 return lookup_format_info(ccs_formats,
2483 ARRAY_SIZE(ccs_formats),
2484 cmd->pixel_format);
2485 default:
2486 return NULL;
2487 }
2488 }
2489
2490 bool is_ccs_modifier(u64 modifier)
2491 {
2492 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2493 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2494 }
2495
2496 static int
2497 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2498 struct drm_framebuffer *fb)
2499 {
2500 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2501 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2502 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2503 u32 gtt_offset_rotated = 0;
2504 unsigned int max_size = 0;
2505 int i, num_planes = fb->format->num_planes;
2506 unsigned int tile_size = intel_tile_size(dev_priv);
2507
2508 for (i = 0; i < num_planes; i++) {
2509 unsigned int width, height;
2510 unsigned int cpp, size;
2511 u32 offset;
2512 int x, y;
2513 int ret;
2514
2515 cpp = fb->format->cpp[i];
2516 width = drm_framebuffer_plane_width(fb->width, fb, i);
2517 height = drm_framebuffer_plane_height(fb->height, fb, i);
2518
2519 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2520 if (ret) {
2521 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2522 i, fb->offsets[i]);
2523 return ret;
2524 }
2525
2526 if (is_ccs_modifier(fb->modifier) && i == 1) {
2527 int hsub = fb->format->hsub;
2528 int vsub = fb->format->vsub;
2529 int tile_width, tile_height;
2530 int main_x, main_y;
2531 int ccs_x, ccs_y;
2532
2533 intel_tile_dims(fb, i, &tile_width, &tile_height);
2534 tile_width *= hsub;
2535 tile_height *= vsub;
2536
2537 ccs_x = (x * hsub) % tile_width;
2538 ccs_y = (y * vsub) % tile_height;
2539 main_x = intel_fb->normal[0].x % tile_width;
2540 main_y = intel_fb->normal[0].y % tile_height;
2541
2542 /*
2543 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2544 * x/y offsets must match between CCS and the main surface.
2545 */
2546 if (main_x != ccs_x || main_y != ccs_y) {
2547 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2548 main_x, main_y,
2549 ccs_x, ccs_y,
2550 intel_fb->normal[0].x,
2551 intel_fb->normal[0].y,
2552 x, y);
2553 return -EINVAL;
2554 }
2555 }
2556
2557 /*
2558 * The fence (if used) is aligned to the start of the object
2559 * so having the framebuffer wrap around across the edge of the
2560 * fenced region doesn't really work. We have no API to configure
2561 * the fence start offset within the object (nor could we probably
2562 * on gen2/3). So it's just easier if we just require that the
2563 * fb layout agrees with the fence layout. We already check that the
2564 * fb stride matches the fence stride elsewhere.
2565 */
2566 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2567 (x + width) * cpp > fb->pitches[i]) {
2568 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2569 i, fb->offsets[i]);
2570 return -EINVAL;
2571 }
2572
2573 /*
2574 * First pixel of the framebuffer from
2575 * the start of the normal gtt mapping.
2576 */
2577 intel_fb->normal[i].x = x;
2578 intel_fb->normal[i].y = y;
2579
2580 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2581 fb->pitches[i],
2582 DRM_MODE_ROTATE_0,
2583 tile_size);
2584 offset /= tile_size;
2585
2586 if (!is_surface_linear(fb->modifier, i)) {
2587 unsigned int tile_width, tile_height;
2588 unsigned int pitch_tiles;
2589 struct drm_rect r;
2590
2591 intel_tile_dims(fb, i, &tile_width, &tile_height);
2592
2593 rot_info->plane[i].offset = offset;
2594 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2595 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2596 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2597
2598 intel_fb->rotated[i].pitch =
2599 rot_info->plane[i].height * tile_height;
2600
2601 /* how many tiles does this plane need */
2602 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2603 /*
2604 * If the plane isn't horizontally tile aligned,
2605 * we need one more tile.
2606 */
2607 if (x != 0)
2608 size++;
2609
2610 /* rotate the x/y offsets to match the GTT view */
2611 r.x1 = x;
2612 r.y1 = y;
2613 r.x2 = x + width;
2614 r.y2 = y + height;
2615 drm_rect_rotate(&r,
2616 rot_info->plane[i].width * tile_width,
2617 rot_info->plane[i].height * tile_height,
2618 DRM_MODE_ROTATE_270);
2619 x = r.x1;
2620 y = r.y1;
2621
2622 /* rotate the tile dimensions to match the GTT view */
2623 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2624 swap(tile_width, tile_height);
2625
2626 /*
2627 * We only keep the x/y offsets, so push all of the
2628 * gtt offset into the x/y offsets.
2629 */
2630 intel_adjust_tile_offset(&x, &y,
2631 tile_width, tile_height,
2632 tile_size, pitch_tiles,
2633 gtt_offset_rotated * tile_size, 0);
2634
2635 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2636
2637 /*
2638 * First pixel of the framebuffer from
2639 * the start of the rotated gtt mapping.
2640 */
2641 intel_fb->rotated[i].x = x;
2642 intel_fb->rotated[i].y = y;
2643 } else {
2644 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2645 x * cpp, tile_size);
2646 }
2647
2648 /* how many tiles in total needed in the bo */
2649 max_size = max(max_size, offset + size);
2650 }
2651
2652 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2653 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2654 mul_u32_u32(max_size, tile_size), obj->base.size);
2655 return -EINVAL;
2656 }
2657
2658 return 0;
2659 }
2660
2661 static int i9xx_format_to_fourcc(int format)
2662 {
2663 switch (format) {
2664 case DISPPLANE_8BPP:
2665 return DRM_FORMAT_C8;
2666 case DISPPLANE_BGRX555:
2667 return DRM_FORMAT_XRGB1555;
2668 case DISPPLANE_BGRX565:
2669 return DRM_FORMAT_RGB565;
2670 default:
2671 case DISPPLANE_BGRX888:
2672 return DRM_FORMAT_XRGB8888;
2673 case DISPPLANE_RGBX888:
2674 return DRM_FORMAT_XBGR8888;
2675 case DISPPLANE_BGRX101010:
2676 return DRM_FORMAT_XRGB2101010;
2677 case DISPPLANE_RGBX101010:
2678 return DRM_FORMAT_XBGR2101010;
2679 }
2680 }
2681
2682 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2683 {
2684 switch (format) {
2685 case PLANE_CTL_FORMAT_RGB_565:
2686 return DRM_FORMAT_RGB565;
2687 case PLANE_CTL_FORMAT_NV12:
2688 return DRM_FORMAT_NV12;
2689 case PLANE_CTL_FORMAT_P010:
2690 return DRM_FORMAT_P010;
2691 case PLANE_CTL_FORMAT_P012:
2692 return DRM_FORMAT_P012;
2693 case PLANE_CTL_FORMAT_P016:
2694 return DRM_FORMAT_P016;
2695 case PLANE_CTL_FORMAT_Y210:
2696 return DRM_FORMAT_Y210;
2697 case PLANE_CTL_FORMAT_Y212:
2698 return DRM_FORMAT_Y212;
2699 case PLANE_CTL_FORMAT_Y216:
2700 return DRM_FORMAT_Y216;
2701 case PLANE_CTL_FORMAT_Y410:
2702 return DRM_FORMAT_XVYU2101010;
2703 case PLANE_CTL_FORMAT_Y412:
2704 return DRM_FORMAT_XVYU12_16161616;
2705 case PLANE_CTL_FORMAT_Y416:
2706 return DRM_FORMAT_XVYU16161616;
2707 default:
2708 case PLANE_CTL_FORMAT_XRGB_8888:
2709 if (rgb_order) {
2710 if (alpha)
2711 return DRM_FORMAT_ABGR8888;
2712 else
2713 return DRM_FORMAT_XBGR8888;
2714 } else {
2715 if (alpha)
2716 return DRM_FORMAT_ARGB8888;
2717 else
2718 return DRM_FORMAT_XRGB8888;
2719 }
2720 case PLANE_CTL_FORMAT_XRGB_2101010:
2721 if (rgb_order)
2722 return DRM_FORMAT_XBGR2101010;
2723 else
2724 return DRM_FORMAT_XRGB2101010;
2725 case PLANE_CTL_FORMAT_XRGB_16161616F:
2726 if (rgb_order) {
2727 if (alpha)
2728 return DRM_FORMAT_ABGR16161616F;
2729 else
2730 return DRM_FORMAT_XBGR16161616F;
2731 } else {
2732 if (alpha)
2733 return DRM_FORMAT_ARGB16161616F;
2734 else
2735 return DRM_FORMAT_XRGB16161616F;
2736 }
2737 }
2738 }
2739
2740 static bool
2741 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2742 struct intel_initial_plane_config *plane_config)
2743 {
2744 struct drm_device *dev = crtc->base.dev;
2745 struct drm_i915_private *dev_priv = to_i915(dev);
2746 struct drm_i915_gem_object *obj = NULL;
2747 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2748 struct drm_framebuffer *fb = &plane_config->fb->base;
2749 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2750 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2751 PAGE_SIZE);
2752
2753 size_aligned -= base_aligned;
2754
2755 if (plane_config->size == 0)
2756 return false;
2757
2758 /* If the FB is too big, just don't use it since fbdev is not very
2759 * important and we should probably use that space with FBC or other
2760 * features. */
2761 if (size_aligned * 2 > dev_priv->stolen_usable_size)
2762 return false;
2763
2764 switch (fb->modifier) {
2765 case DRM_FORMAT_MOD_LINEAR:
2766 case I915_FORMAT_MOD_X_TILED:
2767 case I915_FORMAT_MOD_Y_TILED:
2768 break;
2769 default:
2770 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2771 fb->modifier);
2772 return false;
2773 }
2774
2775 mutex_lock(&dev->struct_mutex);
2776 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2777 base_aligned,
2778 base_aligned,
2779 size_aligned);
2780 mutex_unlock(&dev->struct_mutex);
2781 if (!obj)
2782 return false;
2783
2784 switch (plane_config->tiling) {
2785 case I915_TILING_NONE:
2786 break;
2787 case I915_TILING_X:
2788 case I915_TILING_Y:
2789 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2790 break;
2791 default:
2792 MISSING_CASE(plane_config->tiling);
2793 return false;
2794 }
2795
2796 mode_cmd.pixel_format = fb->format->format;
2797 mode_cmd.width = fb->width;
2798 mode_cmd.height = fb->height;
2799 mode_cmd.pitches[0] = fb->pitches[0];
2800 mode_cmd.modifier[0] = fb->modifier;
2801 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2802
2803 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2804 DRM_DEBUG_KMS("intel fb init failed\n");
2805 goto out_unref_obj;
2806 }
2807
2808
2809 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2810 return true;
2811
2812 out_unref_obj:
2813 i915_gem_object_put(obj);
2814 return false;
2815 }
2816
2817 static void
2818 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2819 struct intel_plane_state *plane_state,
2820 bool visible)
2821 {
2822 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2823
2824 plane_state->base.visible = visible;
2825
2826 if (visible)
2827 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2828 else
2829 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2830 }
2831
2832 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2833 {
2834 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2835 struct drm_plane *plane;
2836
2837 /*
2838 * Active_planes aliases if multiple "primary" or cursor planes
2839 * have been used on the same (or wrong) pipe. plane_mask uses
2840 * unique ids, hence we can use that to reconstruct active_planes.
2841 */
2842 crtc_state->active_planes = 0;
2843
2844 drm_for_each_plane_mask(plane, &dev_priv->drm,
2845 crtc_state->base.plane_mask)
2846 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2847 }
2848
2849 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2850 struct intel_plane *plane)
2851 {
2852 struct intel_crtc_state *crtc_state =
2853 to_intel_crtc_state(crtc->base.state);
2854 struct intel_plane_state *plane_state =
2855 to_intel_plane_state(plane->base.state);
2856
2857 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2858 plane->base.base.id, plane->base.name,
2859 crtc->base.base.id, crtc->base.name);
2860
2861 intel_set_plane_visible(crtc_state, plane_state, false);
2862 fixup_active_planes(crtc_state);
2863
2864 if (plane->id == PLANE_PRIMARY)
2865 intel_pre_disable_primary_noatomic(&crtc->base);
2866
2867 intel_disable_plane(plane, crtc_state);
2868 }
2869
2870 static void
2871 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2872 struct intel_initial_plane_config *plane_config)
2873 {
2874 struct drm_device *dev = intel_crtc->base.dev;
2875 struct drm_i915_private *dev_priv = to_i915(dev);
2876 struct drm_crtc *c;
2877 struct drm_i915_gem_object *obj;
2878 struct drm_plane *primary = intel_crtc->base.primary;
2879 struct drm_plane_state *plane_state = primary->state;
2880 struct intel_plane *intel_plane = to_intel_plane(primary);
2881 struct intel_plane_state *intel_state =
2882 to_intel_plane_state(plane_state);
2883 struct drm_framebuffer *fb;
2884
2885 if (!plane_config->fb)
2886 return;
2887
2888 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2889 fb = &plane_config->fb->base;
2890 goto valid_fb;
2891 }
2892
2893 kfree(plane_config->fb);
2894
2895 /*
2896 * Failed to alloc the obj, check to see if we should share
2897 * an fb with another CRTC instead
2898 */
2899 for_each_crtc(dev, c) {
2900 struct intel_plane_state *state;
2901
2902 if (c == &intel_crtc->base)
2903 continue;
2904
2905 if (!to_intel_crtc(c)->active)
2906 continue;
2907
2908 state = to_intel_plane_state(c->primary->state);
2909 if (!state->vma)
2910 continue;
2911
2912 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2913 fb = state->base.fb;
2914 drm_framebuffer_get(fb);
2915 goto valid_fb;
2916 }
2917 }
2918
2919 /*
2920 * We've failed to reconstruct the BIOS FB. Current display state
2921 * indicates that the primary plane is visible, but has a NULL FB,
2922 * which will lead to problems later if we don't fix it up. The
2923 * simplest solution is to just disable the primary plane now and
2924 * pretend the BIOS never had it enabled.
2925 */
2926 intel_plane_disable_noatomic(intel_crtc, intel_plane);
2927
2928 return;
2929
2930 valid_fb:
2931 intel_state->base.rotation = plane_config->rotation;
2932 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2933 intel_state->base.rotation);
2934 intel_state->color_plane[0].stride =
2935 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2936
2937 mutex_lock(&dev->struct_mutex);
2938 intel_state->vma =
2939 intel_pin_and_fence_fb_obj(fb,
2940 &intel_state->view,
2941 intel_plane_uses_fence(intel_state),
2942 &intel_state->flags);
2943 mutex_unlock(&dev->struct_mutex);
2944 if (IS_ERR(intel_state->vma)) {
2945 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2946 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2947
2948 intel_state->vma = NULL;
2949 drm_framebuffer_put(fb);
2950 return;
2951 }
2952
2953 obj = intel_fb_obj(fb);
2954 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2955
2956 plane_state->src_x = 0;
2957 plane_state->src_y = 0;
2958 plane_state->src_w = fb->width << 16;
2959 plane_state->src_h = fb->height << 16;
2960
2961 plane_state->crtc_x = 0;
2962 plane_state->crtc_y = 0;
2963 plane_state->crtc_w = fb->width;
2964 plane_state->crtc_h = fb->height;
2965
2966 intel_state->base.src = drm_plane_state_src(plane_state);
2967 intel_state->base.dst = drm_plane_state_dest(plane_state);
2968
2969 if (i915_gem_object_is_tiled(obj))
2970 dev_priv->preserve_bios_swizzle = true;
2971
2972 plane_state->fb = fb;
2973 plane_state->crtc = &intel_crtc->base;
2974
2975 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2976 &obj->frontbuffer_bits);
2977 }
2978
2979 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2980 int color_plane,
2981 unsigned int rotation)
2982 {
2983 int cpp = fb->format->cpp[color_plane];
2984
2985 switch (fb->modifier) {
2986 case DRM_FORMAT_MOD_LINEAR:
2987 case I915_FORMAT_MOD_X_TILED:
2988 return 4096;
2989 case I915_FORMAT_MOD_Y_TILED_CCS:
2990 case I915_FORMAT_MOD_Yf_TILED_CCS:
2991 /* FIXME AUX plane? */
2992 case I915_FORMAT_MOD_Y_TILED:
2993 case I915_FORMAT_MOD_Yf_TILED:
2994 if (cpp == 8)
2995 return 2048;
2996 else
2997 return 4096;
2998 default:
2999 MISSING_CASE(fb->modifier);
3000 return 2048;
3001 }
3002 }
3003
3004 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3005 int color_plane,
3006 unsigned int rotation)
3007 {
3008 int cpp = fb->format->cpp[color_plane];
3009
3010 switch (fb->modifier) {
3011 case DRM_FORMAT_MOD_LINEAR:
3012 case I915_FORMAT_MOD_X_TILED:
3013 if (cpp == 8)
3014 return 4096;
3015 else
3016 return 5120;
3017 case I915_FORMAT_MOD_Y_TILED_CCS:
3018 case I915_FORMAT_MOD_Yf_TILED_CCS:
3019 /* FIXME AUX plane? */
3020 case I915_FORMAT_MOD_Y_TILED:
3021 case I915_FORMAT_MOD_Yf_TILED:
3022 if (cpp == 8)
3023 return 2048;
3024 else
3025 return 5120;
3026 default:
3027 MISSING_CASE(fb->modifier);
3028 return 2048;
3029 }
3030 }
3031
3032 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3033 int color_plane,
3034 unsigned int rotation)
3035 {
3036 return 5120;
3037 }
3038
3039 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3040 int main_x, int main_y, u32 main_offset)
3041 {
3042 const struct drm_framebuffer *fb = plane_state->base.fb;
3043 int hsub = fb->format->hsub;
3044 int vsub = fb->format->vsub;
3045 int aux_x = plane_state->color_plane[1].x;
3046 int aux_y = plane_state->color_plane[1].y;
3047 u32 aux_offset = plane_state->color_plane[1].offset;
3048 u32 alignment = intel_surf_alignment(fb, 1);
3049
3050 while (aux_offset >= main_offset && aux_y <= main_y) {
3051 int x, y;
3052
3053 if (aux_x == main_x && aux_y == main_y)
3054 break;
3055
3056 if (aux_offset == 0)
3057 break;
3058
3059 x = aux_x / hsub;
3060 y = aux_y / vsub;
3061 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3062 aux_offset, aux_offset - alignment);
3063 aux_x = x * hsub + aux_x % hsub;
3064 aux_y = y * vsub + aux_y % vsub;
3065 }
3066
3067 if (aux_x != main_x || aux_y != main_y)
3068 return false;
3069
3070 plane_state->color_plane[1].offset = aux_offset;
3071 plane_state->color_plane[1].x = aux_x;
3072 plane_state->color_plane[1].y = aux_y;
3073
3074 return true;
3075 }
3076
3077 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3078 {
3079 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3080 const struct drm_framebuffer *fb = plane_state->base.fb;
3081 unsigned int rotation = plane_state->base.rotation;
3082 int x = plane_state->base.src.x1 >> 16;
3083 int y = plane_state->base.src.y1 >> 16;
3084 int w = drm_rect_width(&plane_state->base.src) >> 16;
3085 int h = drm_rect_height(&plane_state->base.src) >> 16;
3086 int max_width;
3087 int max_height = 4096;
3088 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3089
3090 if (INTEL_GEN(dev_priv) >= 11)
3091 max_width = icl_max_plane_width(fb, 0, rotation);
3092 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3093 max_width = glk_max_plane_width(fb, 0, rotation);
3094 else
3095 max_width = skl_max_plane_width(fb, 0, rotation);
3096
3097 if (w > max_width || h > max_height) {
3098 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3099 w, h, max_width, max_height);
3100 return -EINVAL;
3101 }
3102
3103 intel_add_fb_offsets(&x, &y, plane_state, 0);
3104 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3105 alignment = intel_surf_alignment(fb, 0);
3106
3107 /*
3108 * AUX surface offset is specified as the distance from the
3109 * main surface offset, and it must be non-negative. Make
3110 * sure that is what we will get.
3111 */
3112 if (offset > aux_offset)
3113 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3114 offset, aux_offset & ~(alignment - 1));
3115
3116 /*
3117 * When using an X-tiled surface, the plane blows up
3118 * if the x offset + width exceed the stride.
3119 *
3120 * TODO: linear and Y-tiled seem fine, Yf untested,
3121 */
3122 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3123 int cpp = fb->format->cpp[0];
3124
3125 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3126 if (offset == 0) {
3127 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3128 return -EINVAL;
3129 }
3130
3131 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3132 offset, offset - alignment);
3133 }
3134 }
3135
3136 /*
3137 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3138 * they match with the main surface x/y offsets.
3139 */
3140 if (is_ccs_modifier(fb->modifier)) {
3141 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3142 if (offset == 0)
3143 break;
3144
3145 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3146 offset, offset - alignment);
3147 }
3148
3149 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3150 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3151 return -EINVAL;
3152 }
3153 }
3154
3155 plane_state->color_plane[0].offset = offset;
3156 plane_state->color_plane[0].x = x;
3157 plane_state->color_plane[0].y = y;
3158
3159 return 0;
3160 }
3161
3162 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3163 {
3164 const struct drm_framebuffer *fb = plane_state->base.fb;
3165 unsigned int rotation = plane_state->base.rotation;
3166 int max_width = skl_max_plane_width(fb, 1, rotation);
3167 int max_height = 4096;
3168 int x = plane_state->base.src.x1 >> 17;
3169 int y = plane_state->base.src.y1 >> 17;
3170 int w = drm_rect_width(&plane_state->base.src) >> 17;
3171 int h = drm_rect_height(&plane_state->base.src) >> 17;
3172 u32 offset;
3173
3174 intel_add_fb_offsets(&x, &y, plane_state, 1);
3175 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3176
3177 /* FIXME not quite sure how/if these apply to the chroma plane */
3178 if (w > max_width || h > max_height) {
3179 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3180 w, h, max_width, max_height);
3181 return -EINVAL;
3182 }
3183
3184 plane_state->color_plane[1].offset = offset;
3185 plane_state->color_plane[1].x = x;
3186 plane_state->color_plane[1].y = y;
3187
3188 return 0;
3189 }
3190
3191 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3192 {
3193 const struct drm_framebuffer *fb = plane_state->base.fb;
3194 int src_x = plane_state->base.src.x1 >> 16;
3195 int src_y = plane_state->base.src.y1 >> 16;
3196 int hsub = fb->format->hsub;
3197 int vsub = fb->format->vsub;
3198 int x = src_x / hsub;
3199 int y = src_y / vsub;
3200 u32 offset;
3201
3202 intel_add_fb_offsets(&x, &y, plane_state, 1);
3203 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3204
3205 plane_state->color_plane[1].offset = offset;
3206 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3207 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3208
3209 return 0;
3210 }
3211
3212 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3213 {
3214 const struct drm_framebuffer *fb = plane_state->base.fb;
3215 unsigned int rotation = plane_state->base.rotation;
3216 int ret;
3217
3218 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3219 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3220 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3221
3222 ret = intel_plane_check_stride(plane_state);
3223 if (ret)
3224 return ret;
3225
3226 if (!plane_state->base.visible)
3227 return 0;
3228
3229 /* Rotate src coordinates to match rotated GTT view */
3230 if (drm_rotation_90_or_270(rotation))
3231 drm_rect_rotate(&plane_state->base.src,
3232 fb->width << 16, fb->height << 16,
3233 DRM_MODE_ROTATE_270);
3234
3235 /*
3236 * Handle the AUX surface first since
3237 * the main surface setup depends on it.
3238 */
3239 if (is_planar_yuv_format(fb->format->format)) {
3240 ret = skl_check_nv12_aux_surface(plane_state);
3241 if (ret)
3242 return ret;
3243 } else if (is_ccs_modifier(fb->modifier)) {
3244 ret = skl_check_ccs_aux_surface(plane_state);
3245 if (ret)
3246 return ret;
3247 } else {
3248 plane_state->color_plane[1].offset = ~0xfff;
3249 plane_state->color_plane[1].x = 0;
3250 plane_state->color_plane[1].y = 0;
3251 }
3252
3253 ret = skl_check_main_surface(plane_state);
3254 if (ret)
3255 return ret;
3256
3257 return 0;
3258 }
3259
3260 unsigned int
3261 i9xx_plane_max_stride(struct intel_plane *plane,
3262 u32 pixel_format, u64 modifier,
3263 unsigned int rotation)
3264 {
3265 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3266
3267 if (!HAS_GMCH(dev_priv)) {
3268 return 32*1024;
3269 } else if (INTEL_GEN(dev_priv) >= 4) {
3270 if (modifier == I915_FORMAT_MOD_X_TILED)
3271 return 16*1024;
3272 else
3273 return 32*1024;
3274 } else if (INTEL_GEN(dev_priv) >= 3) {
3275 if (modifier == I915_FORMAT_MOD_X_TILED)
3276 return 8*1024;
3277 else
3278 return 16*1024;
3279 } else {
3280 if (plane->i9xx_plane == PLANE_C)
3281 return 4*1024;
3282 else
3283 return 8*1024;
3284 }
3285 }
3286
3287 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3288 {
3289 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3291 u32 dspcntr = 0;
3292
3293 if (crtc_state->gamma_enable)
3294 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3295
3296 if (crtc_state->csc_enable)
3297 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3298
3299 if (INTEL_GEN(dev_priv) < 5)
3300 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3301
3302 return dspcntr;
3303 }
3304
3305 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3306 const struct intel_plane_state *plane_state)
3307 {
3308 struct drm_i915_private *dev_priv =
3309 to_i915(plane_state->base.plane->dev);
3310 const struct drm_framebuffer *fb = plane_state->base.fb;
3311 unsigned int rotation = plane_state->base.rotation;
3312 u32 dspcntr;
3313
3314 dspcntr = DISPLAY_PLANE_ENABLE;
3315
3316 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3317 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3318 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3319
3320 switch (fb->format->format) {
3321 case DRM_FORMAT_C8:
3322 dspcntr |= DISPPLANE_8BPP;
3323 break;
3324 case DRM_FORMAT_XRGB1555:
3325 dspcntr |= DISPPLANE_BGRX555;
3326 break;
3327 case DRM_FORMAT_RGB565:
3328 dspcntr |= DISPPLANE_BGRX565;
3329 break;
3330 case DRM_FORMAT_XRGB8888:
3331 dspcntr |= DISPPLANE_BGRX888;
3332 break;
3333 case DRM_FORMAT_XBGR8888:
3334 dspcntr |= DISPPLANE_RGBX888;
3335 break;
3336 case DRM_FORMAT_XRGB2101010:
3337 dspcntr |= DISPPLANE_BGRX101010;
3338 break;
3339 case DRM_FORMAT_XBGR2101010:
3340 dspcntr |= DISPPLANE_RGBX101010;
3341 break;
3342 default:
3343 MISSING_CASE(fb->format->format);
3344 return 0;
3345 }
3346
3347 if (INTEL_GEN(dev_priv) >= 4 &&
3348 fb->modifier == I915_FORMAT_MOD_X_TILED)
3349 dspcntr |= DISPPLANE_TILED;
3350
3351 if (rotation & DRM_MODE_ROTATE_180)
3352 dspcntr |= DISPPLANE_ROTATE_180;
3353
3354 if (rotation & DRM_MODE_REFLECT_X)
3355 dspcntr |= DISPPLANE_MIRROR;
3356
3357 return dspcntr;
3358 }
3359
3360 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3361 {
3362 struct drm_i915_private *dev_priv =
3363 to_i915(plane_state->base.plane->dev);
3364 const struct drm_framebuffer *fb = plane_state->base.fb;
3365 unsigned int rotation = plane_state->base.rotation;
3366 int src_x = plane_state->base.src.x1 >> 16;
3367 int src_y = plane_state->base.src.y1 >> 16;
3368 u32 offset;
3369 int ret;
3370
3371 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3372 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3373
3374 ret = intel_plane_check_stride(plane_state);
3375 if (ret)
3376 return ret;
3377
3378 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3379
3380 if (INTEL_GEN(dev_priv) >= 4)
3381 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3382 plane_state, 0);
3383 else
3384 offset = 0;
3385
3386 /* HSW/BDW do this automagically in hardware */
3387 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3388 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3389 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3390
3391 if (rotation & DRM_MODE_ROTATE_180) {
3392 src_x += src_w - 1;
3393 src_y += src_h - 1;
3394 } else if (rotation & DRM_MODE_REFLECT_X) {
3395 src_x += src_w - 1;
3396 }
3397 }
3398
3399 plane_state->color_plane[0].offset = offset;
3400 plane_state->color_plane[0].x = src_x;
3401 plane_state->color_plane[0].y = src_y;
3402
3403 return 0;
3404 }
3405
3406 static int
3407 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3408 struct intel_plane_state *plane_state)
3409 {
3410 int ret;
3411
3412 ret = chv_plane_check_rotation(plane_state);
3413 if (ret)
3414 return ret;
3415
3416 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3417 &crtc_state->base,
3418 DRM_PLANE_HELPER_NO_SCALING,
3419 DRM_PLANE_HELPER_NO_SCALING,
3420 false, true);
3421 if (ret)
3422 return ret;
3423
3424 if (!plane_state->base.visible)
3425 return 0;
3426
3427 ret = intel_plane_check_src_coordinates(plane_state);
3428 if (ret)
3429 return ret;
3430
3431 ret = i9xx_check_plane_surface(plane_state);
3432 if (ret)
3433 return ret;
3434
3435 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3436
3437 return 0;
3438 }
3439
3440 static void i9xx_update_plane(struct intel_plane *plane,
3441 const struct intel_crtc_state *crtc_state,
3442 const struct intel_plane_state *plane_state)
3443 {
3444 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3445 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3446 u32 linear_offset;
3447 int x = plane_state->color_plane[0].x;
3448 int y = plane_state->color_plane[0].y;
3449 unsigned long irqflags;
3450 u32 dspaddr_offset;
3451 u32 dspcntr;
3452
3453 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3454
3455 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3456
3457 if (INTEL_GEN(dev_priv) >= 4)
3458 dspaddr_offset = plane_state->color_plane[0].offset;
3459 else
3460 dspaddr_offset = linear_offset;
3461
3462 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3463
3464 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3465
3466 if (INTEL_GEN(dev_priv) < 4) {
3467 /* pipesrc and dspsize control the size that is scaled from,
3468 * which should always be the user's requested size.
3469 */
3470 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3471 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3472 ((crtc_state->pipe_src_h - 1) << 16) |
3473 (crtc_state->pipe_src_w - 1));
3474 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3475 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3476 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3477 ((crtc_state->pipe_src_h - 1) << 16) |
3478 (crtc_state->pipe_src_w - 1));
3479 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3480 }
3481
3482 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3483 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3484 } else if (INTEL_GEN(dev_priv) >= 4) {
3485 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3486 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3487 }
3488
3489 /*
3490 * The control register self-arms if the plane was previously
3491 * disabled. Try to make the plane enable atomic by writing
3492 * the control register just before the surface register.
3493 */
3494 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3495 if (INTEL_GEN(dev_priv) >= 4)
3496 I915_WRITE_FW(DSPSURF(i9xx_plane),
3497 intel_plane_ggtt_offset(plane_state) +
3498 dspaddr_offset);
3499 else
3500 I915_WRITE_FW(DSPADDR(i9xx_plane),
3501 intel_plane_ggtt_offset(plane_state) +
3502 dspaddr_offset);
3503
3504 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3505 }
3506
3507 static void i9xx_disable_plane(struct intel_plane *plane,
3508 const struct intel_crtc_state *crtc_state)
3509 {
3510 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3511 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3512 unsigned long irqflags;
3513 u32 dspcntr;
3514
3515 /*
3516 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3517 * enable on ilk+ affect the pipe bottom color as
3518 * well, so we must configure them even if the plane
3519 * is disabled.
3520 *
3521 * On pre-g4x there is no way to gamma correct the
3522 * pipe bottom color but we'll keep on doing this
3523 * anyway so that the crtc state readout works correctly.
3524 */
3525 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3526
3527 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3528
3529 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3530 if (INTEL_GEN(dev_priv) >= 4)
3531 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3532 else
3533 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3534
3535 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3536 }
3537
3538 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3539 enum pipe *pipe)
3540 {
3541 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3542 enum intel_display_power_domain power_domain;
3543 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3544 intel_wakeref_t wakeref;
3545 bool ret;
3546 u32 val;
3547
3548 /*
3549 * Not 100% correct for planes that can move between pipes,
3550 * but that's only the case for gen2-4 which don't have any
3551 * display power wells.
3552 */
3553 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3554 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3555 if (!wakeref)
3556 return false;
3557
3558 val = I915_READ(DSPCNTR(i9xx_plane));
3559
3560 ret = val & DISPLAY_PLANE_ENABLE;
3561
3562 if (INTEL_GEN(dev_priv) >= 5)
3563 *pipe = plane->pipe;
3564 else
3565 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3566 DISPPLANE_SEL_PIPE_SHIFT;
3567
3568 intel_display_power_put(dev_priv, power_domain, wakeref);
3569
3570 return ret;
3571 }
3572
3573 static u32
3574 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3575 {
3576 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3577 return 64;
3578 else
3579 return intel_tile_width_bytes(fb, color_plane);
3580 }
3581
3582 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3583 {
3584 struct drm_device *dev = intel_crtc->base.dev;
3585 struct drm_i915_private *dev_priv = to_i915(dev);
3586
3587 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3588 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3589 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3590 }
3591
3592 /*
3593 * This function detaches (aka. unbinds) unused scalers in hardware
3594 */
3595 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3596 {
3597 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3598 const struct intel_crtc_scaler_state *scaler_state =
3599 &crtc_state->scaler_state;
3600 int i;
3601
3602 /* loop through and disable scalers that aren't in use */
3603 for (i = 0; i < intel_crtc->num_scalers; i++) {
3604 if (!scaler_state->scalers[i].in_use)
3605 skl_detach_scaler(intel_crtc, i);
3606 }
3607 }
3608
3609 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3610 int color_plane, unsigned int rotation)
3611 {
3612 /*
3613 * The stride is either expressed as a multiple of 64 bytes chunks for
3614 * linear buffers or in number of tiles for tiled buffers.
3615 */
3616 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3617 return 64;
3618 else if (drm_rotation_90_or_270(rotation))
3619 return intel_tile_height(fb, color_plane);
3620 else
3621 return intel_tile_width_bytes(fb, color_plane);
3622 }
3623
3624 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3625 int color_plane)
3626 {
3627 const struct drm_framebuffer *fb = plane_state->base.fb;
3628 unsigned int rotation = plane_state->base.rotation;
3629 u32 stride = plane_state->color_plane[color_plane].stride;
3630
3631 if (color_plane >= fb->format->num_planes)
3632 return 0;
3633
3634 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3635 }
3636
3637 static u32 skl_plane_ctl_format(u32 pixel_format)
3638 {
3639 switch (pixel_format) {
3640 case DRM_FORMAT_C8:
3641 return PLANE_CTL_FORMAT_INDEXED;
3642 case DRM_FORMAT_RGB565:
3643 return PLANE_CTL_FORMAT_RGB_565;
3644 case DRM_FORMAT_XBGR8888:
3645 case DRM_FORMAT_ABGR8888:
3646 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3647 case DRM_FORMAT_XRGB8888:
3648 case DRM_FORMAT_ARGB8888:
3649 return PLANE_CTL_FORMAT_XRGB_8888;
3650 case DRM_FORMAT_XRGB2101010:
3651 return PLANE_CTL_FORMAT_XRGB_2101010;
3652 case DRM_FORMAT_XBGR2101010:
3653 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3654 case DRM_FORMAT_XBGR16161616F:
3655 case DRM_FORMAT_ABGR16161616F:
3656 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3657 case DRM_FORMAT_XRGB16161616F:
3658 case DRM_FORMAT_ARGB16161616F:
3659 return PLANE_CTL_FORMAT_XRGB_16161616F;
3660 case DRM_FORMAT_YUYV:
3661 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3662 case DRM_FORMAT_YVYU:
3663 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3664 case DRM_FORMAT_UYVY:
3665 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3666 case DRM_FORMAT_VYUY:
3667 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3668 case DRM_FORMAT_NV12:
3669 return PLANE_CTL_FORMAT_NV12;
3670 case DRM_FORMAT_P010:
3671 return PLANE_CTL_FORMAT_P010;
3672 case DRM_FORMAT_P012:
3673 return PLANE_CTL_FORMAT_P012;
3674 case DRM_FORMAT_P016:
3675 return PLANE_CTL_FORMAT_P016;
3676 case DRM_FORMAT_Y210:
3677 return PLANE_CTL_FORMAT_Y210;
3678 case DRM_FORMAT_Y212:
3679 return PLANE_CTL_FORMAT_Y212;
3680 case DRM_FORMAT_Y216:
3681 return PLANE_CTL_FORMAT_Y216;
3682 case DRM_FORMAT_XVYU2101010:
3683 return PLANE_CTL_FORMAT_Y410;
3684 case DRM_FORMAT_XVYU12_16161616:
3685 return PLANE_CTL_FORMAT_Y412;
3686 case DRM_FORMAT_XVYU16161616:
3687 return PLANE_CTL_FORMAT_Y416;
3688 default:
3689 MISSING_CASE(pixel_format);
3690 }
3691
3692 return 0;
3693 }
3694
3695 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3696 {
3697 if (!plane_state->base.fb->format->has_alpha)
3698 return PLANE_CTL_ALPHA_DISABLE;
3699
3700 switch (plane_state->base.pixel_blend_mode) {
3701 case DRM_MODE_BLEND_PIXEL_NONE:
3702 return PLANE_CTL_ALPHA_DISABLE;
3703 case DRM_MODE_BLEND_PREMULTI:
3704 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3705 case DRM_MODE_BLEND_COVERAGE:
3706 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3707 default:
3708 MISSING_CASE(plane_state->base.pixel_blend_mode);
3709 return PLANE_CTL_ALPHA_DISABLE;
3710 }
3711 }
3712
3713 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3714 {
3715 if (!plane_state->base.fb->format->has_alpha)
3716 return PLANE_COLOR_ALPHA_DISABLE;
3717
3718 switch (plane_state->base.pixel_blend_mode) {
3719 case DRM_MODE_BLEND_PIXEL_NONE:
3720 return PLANE_COLOR_ALPHA_DISABLE;
3721 case DRM_MODE_BLEND_PREMULTI:
3722 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3723 case DRM_MODE_BLEND_COVERAGE:
3724 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3725 default:
3726 MISSING_CASE(plane_state->base.pixel_blend_mode);
3727 return PLANE_COLOR_ALPHA_DISABLE;
3728 }
3729 }
3730
3731 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
3732 {
3733 switch (fb_modifier) {
3734 case DRM_FORMAT_MOD_LINEAR:
3735 break;
3736 case I915_FORMAT_MOD_X_TILED:
3737 return PLANE_CTL_TILED_X;
3738 case I915_FORMAT_MOD_Y_TILED:
3739 return PLANE_CTL_TILED_Y;
3740 case I915_FORMAT_MOD_Y_TILED_CCS:
3741 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3742 case I915_FORMAT_MOD_Yf_TILED:
3743 return PLANE_CTL_TILED_YF;
3744 case I915_FORMAT_MOD_Yf_TILED_CCS:
3745 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3746 default:
3747 MISSING_CASE(fb_modifier);
3748 }
3749
3750 return 0;
3751 }
3752
3753 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3754 {
3755 switch (rotate) {
3756 case DRM_MODE_ROTATE_0:
3757 break;
3758 /*
3759 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3760 * while i915 HW rotation is clockwise, thats why this swapping.
3761 */
3762 case DRM_MODE_ROTATE_90:
3763 return PLANE_CTL_ROTATE_270;
3764 case DRM_MODE_ROTATE_180:
3765 return PLANE_CTL_ROTATE_180;
3766 case DRM_MODE_ROTATE_270:
3767 return PLANE_CTL_ROTATE_90;
3768 default:
3769 MISSING_CASE(rotate);
3770 }
3771
3772 return 0;
3773 }
3774
3775 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3776 {
3777 switch (reflect) {
3778 case 0:
3779 break;
3780 case DRM_MODE_REFLECT_X:
3781 return PLANE_CTL_FLIP_HORIZONTAL;
3782 case DRM_MODE_REFLECT_Y:
3783 default:
3784 MISSING_CASE(reflect);
3785 }
3786
3787 return 0;
3788 }
3789
3790 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3791 {
3792 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3793 u32 plane_ctl = 0;
3794
3795 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3796 return plane_ctl;
3797
3798 if (crtc_state->gamma_enable)
3799 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
3800
3801 if (crtc_state->csc_enable)
3802 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
3803
3804 return plane_ctl;
3805 }
3806
3807 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3808 const struct intel_plane_state *plane_state)
3809 {
3810 struct drm_i915_private *dev_priv =
3811 to_i915(plane_state->base.plane->dev);
3812 const struct drm_framebuffer *fb = plane_state->base.fb;
3813 unsigned int rotation = plane_state->base.rotation;
3814 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3815 u32 plane_ctl;
3816
3817 plane_ctl = PLANE_CTL_ENABLE;
3818
3819 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3820 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3821 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3822
3823 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3824 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3825
3826 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3827 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3828 }
3829
3830 plane_ctl |= skl_plane_ctl_format(fb->format->format);
3831 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3832 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3833
3834 if (INTEL_GEN(dev_priv) >= 10)
3835 plane_ctl |= cnl_plane_ctl_flip(rotation &
3836 DRM_MODE_REFLECT_MASK);
3837
3838 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3839 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3840 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3841 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3842
3843 return plane_ctl;
3844 }
3845
3846 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3847 {
3848 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3849 u32 plane_color_ctl = 0;
3850
3851 if (INTEL_GEN(dev_priv) >= 11)
3852 return plane_color_ctl;
3853
3854 if (crtc_state->gamma_enable)
3855 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3856
3857 if (crtc_state->csc_enable)
3858 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3859
3860 return plane_color_ctl;
3861 }
3862
3863 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3864 const struct intel_plane_state *plane_state)
3865 {
3866 struct drm_i915_private *dev_priv =
3867 to_i915(plane_state->base.plane->dev);
3868 const struct drm_framebuffer *fb = plane_state->base.fb;
3869 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3870 u32 plane_color_ctl = 0;
3871
3872 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3873 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3874
3875 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
3876 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3877 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3878 else
3879 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3880
3881 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3882 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3883 } else if (fb->format->is_yuv) {
3884 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3885 }
3886
3887 return plane_color_ctl;
3888 }
3889
3890 static int
3891 __intel_display_resume(struct drm_device *dev,
3892 struct drm_atomic_state *state,
3893 struct drm_modeset_acquire_ctx *ctx)
3894 {
3895 struct drm_crtc_state *crtc_state;
3896 struct drm_crtc *crtc;
3897 int i, ret;
3898
3899 intel_modeset_setup_hw_state(dev, ctx);
3900 i915_redisable_vga(to_i915(dev));
3901
3902 if (!state)
3903 return 0;
3904
3905 /*
3906 * We've duplicated the state, pointers to the old state are invalid.
3907 *
3908 * Don't attempt to use the old state until we commit the duplicated state.
3909 */
3910 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3911 /*
3912 * Force recalculation even if we restore
3913 * current state. With fast modeset this may not result
3914 * in a modeset when the state is compatible.
3915 */
3916 crtc_state->mode_changed = true;
3917 }
3918
3919 /* ignore any reset values/BIOS leftovers in the WM registers */
3920 if (!HAS_GMCH(to_i915(dev)))
3921 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3922
3923 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3924
3925 WARN_ON(ret == -EDEADLK);
3926 return ret;
3927 }
3928
3929 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3930 {
3931 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3932 intel_has_gpu_reset(dev_priv));
3933 }
3934
3935 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3936 {
3937 struct drm_device *dev = &dev_priv->drm;
3938 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3939 struct drm_atomic_state *state;
3940 int ret;
3941
3942 /* reset doesn't touch the display */
3943 if (!i915_modparams.force_reset_modeset_test &&
3944 !gpu_reset_clobbers_display(dev_priv))
3945 return;
3946
3947 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3948 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3949 wake_up_all(&dev_priv->gpu_error.wait_queue);
3950
3951 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3952 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3953 i915_gem_set_wedged(dev_priv);
3954 }
3955
3956 /*
3957 * Need mode_config.mutex so that we don't
3958 * trample ongoing ->detect() and whatnot.
3959 */
3960 mutex_lock(&dev->mode_config.mutex);
3961 drm_modeset_acquire_init(ctx, 0);
3962 while (1) {
3963 ret = drm_modeset_lock_all_ctx(dev, ctx);
3964 if (ret != -EDEADLK)
3965 break;
3966
3967 drm_modeset_backoff(ctx);
3968 }
3969 /*
3970 * Disabling the crtcs gracefully seems nicer. Also the
3971 * g33 docs say we should at least disable all the planes.
3972 */
3973 state = drm_atomic_helper_duplicate_state(dev, ctx);
3974 if (IS_ERR(state)) {
3975 ret = PTR_ERR(state);
3976 DRM_ERROR("Duplicating state failed with %i\n", ret);
3977 return;
3978 }
3979
3980 ret = drm_atomic_helper_disable_all(dev, ctx);
3981 if (ret) {
3982 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3983 drm_atomic_state_put(state);
3984 return;
3985 }
3986
3987 dev_priv->modeset_restore_state = state;
3988 state->acquire_ctx = ctx;
3989 }
3990
3991 void intel_finish_reset(struct drm_i915_private *dev_priv)
3992 {
3993 struct drm_device *dev = &dev_priv->drm;
3994 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3995 struct drm_atomic_state *state;
3996 int ret;
3997
3998 /* reset doesn't touch the display */
3999 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
4000 return;
4001
4002 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4003 if (!state)
4004 goto unlock;
4005
4006 /* reset doesn't touch the display */
4007 if (!gpu_reset_clobbers_display(dev_priv)) {
4008 /* for testing only restore the display */
4009 ret = __intel_display_resume(dev, state, ctx);
4010 if (ret)
4011 DRM_ERROR("Restoring old state failed with %i\n", ret);
4012 } else {
4013 /*
4014 * The display has been reset as well,
4015 * so need a full re-initialization.
4016 */
4017 intel_pps_unlock_regs_wa(dev_priv);
4018 intel_modeset_init_hw(dev);
4019 intel_init_clock_gating(dev_priv);
4020
4021 spin_lock_irq(&dev_priv->irq_lock);
4022 if (dev_priv->display.hpd_irq_setup)
4023 dev_priv->display.hpd_irq_setup(dev_priv);
4024 spin_unlock_irq(&dev_priv->irq_lock);
4025
4026 ret = __intel_display_resume(dev, state, ctx);
4027 if (ret)
4028 DRM_ERROR("Restoring old state failed with %i\n", ret);
4029
4030 intel_hpd_init(dev_priv);
4031 }
4032
4033 drm_atomic_state_put(state);
4034 unlock:
4035 drm_modeset_drop_locks(ctx);
4036 drm_modeset_acquire_fini(ctx);
4037 mutex_unlock(&dev->mode_config.mutex);
4038
4039 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4040 }
4041
4042 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4043 {
4044 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4045 enum pipe pipe = crtc->pipe;
4046 u32 tmp;
4047
4048 tmp = I915_READ(PIPE_CHICKEN(pipe));
4049
4050 /*
4051 * Display WA #1153: icl
4052 * enable hardware to bypass the alpha math
4053 * and rounding for per-pixel values 00 and 0xff
4054 */
4055 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4056 /*
4057 * Display WA # 1605353570: icl
4058 * Set the pixel rounding bit to 1 for allowing
4059 * passthrough of Frame buffer pixels unmodified
4060 * across pipe
4061 */
4062 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4063 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4064 }
4065
4066 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4067 const struct intel_crtc_state *new_crtc_state)
4068 {
4069 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4070 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4071
4072 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4073 crtc->base.mode = new_crtc_state->base.mode;
4074
4075 /*
4076 * Update pipe size and adjust fitter if needed: the reason for this is
4077 * that in compute_mode_changes we check the native mode (not the pfit
4078 * mode) to see if we can flip rather than do a full mode set. In the
4079 * fastboot case, we'll flip, but if we don't update the pipesrc and
4080 * pfit state, we'll end up with a big fb scanned out into the wrong
4081 * sized surface.
4082 */
4083
4084 I915_WRITE(PIPESRC(crtc->pipe),
4085 ((new_crtc_state->pipe_src_w - 1) << 16) |
4086 (new_crtc_state->pipe_src_h - 1));
4087
4088 /* on skylake this is done by detaching scalers */
4089 if (INTEL_GEN(dev_priv) >= 9) {
4090 skl_detach_scalers(new_crtc_state);
4091
4092 if (new_crtc_state->pch_pfit.enabled)
4093 skylake_pfit_enable(new_crtc_state);
4094 } else if (HAS_PCH_SPLIT(dev_priv)) {
4095 if (new_crtc_state->pch_pfit.enabled)
4096 ironlake_pfit_enable(new_crtc_state);
4097 else if (old_crtc_state->pch_pfit.enabled)
4098 ironlake_pfit_disable(old_crtc_state);
4099 }
4100
4101 if (INTEL_GEN(dev_priv) >= 11)
4102 icl_set_pipe_chicken(crtc);
4103 }
4104
4105 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4106 {
4107 struct drm_device *dev = crtc->base.dev;
4108 struct drm_i915_private *dev_priv = to_i915(dev);
4109 int pipe = crtc->pipe;
4110 i915_reg_t reg;
4111 u32 temp;
4112
4113 /* enable normal train */
4114 reg = FDI_TX_CTL(pipe);
4115 temp = I915_READ(reg);
4116 if (IS_IVYBRIDGE(dev_priv)) {
4117 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4118 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4119 } else {
4120 temp &= ~FDI_LINK_TRAIN_NONE;
4121 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4122 }
4123 I915_WRITE(reg, temp);
4124
4125 reg = FDI_RX_CTL(pipe);
4126 temp = I915_READ(reg);
4127 if (HAS_PCH_CPT(dev_priv)) {
4128 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4129 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4130 } else {
4131 temp &= ~FDI_LINK_TRAIN_NONE;
4132 temp |= FDI_LINK_TRAIN_NONE;
4133 }
4134 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4135
4136 /* wait one idle pattern time */
4137 POSTING_READ(reg);
4138 udelay(1000);
4139
4140 /* IVB wants error correction enabled */
4141 if (IS_IVYBRIDGE(dev_priv))
4142 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4143 FDI_FE_ERRC_ENABLE);
4144 }
4145
4146 /* The FDI link training functions for ILK/Ibexpeak. */
4147 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4148 const struct intel_crtc_state *crtc_state)
4149 {
4150 struct drm_device *dev = crtc->base.dev;
4151 struct drm_i915_private *dev_priv = to_i915(dev);
4152 int pipe = crtc->pipe;
4153 i915_reg_t reg;
4154 u32 temp, tries;
4155
4156 /* FDI needs bits from pipe first */
4157 assert_pipe_enabled(dev_priv, pipe);
4158
4159 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4160 for train result */
4161 reg = FDI_RX_IMR(pipe);
4162 temp = I915_READ(reg);
4163 temp &= ~FDI_RX_SYMBOL_LOCK;
4164 temp &= ~FDI_RX_BIT_LOCK;
4165 I915_WRITE(reg, temp);
4166 I915_READ(reg);
4167 udelay(150);
4168
4169 /* enable CPU FDI TX and PCH FDI RX */
4170 reg = FDI_TX_CTL(pipe);
4171 temp = I915_READ(reg);
4172 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4173 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4174 temp &= ~FDI_LINK_TRAIN_NONE;
4175 temp |= FDI_LINK_TRAIN_PATTERN_1;
4176 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4177
4178 reg = FDI_RX_CTL(pipe);
4179 temp = I915_READ(reg);
4180 temp &= ~FDI_LINK_TRAIN_NONE;
4181 temp |= FDI_LINK_TRAIN_PATTERN_1;
4182 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4183
4184 POSTING_READ(reg);
4185 udelay(150);
4186
4187 /* Ironlake workaround, enable clock pointer after FDI enable*/
4188 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4189 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4190 FDI_RX_PHASE_SYNC_POINTER_EN);
4191
4192 reg = FDI_RX_IIR(pipe);
4193 for (tries = 0; tries < 5; tries++) {
4194 temp = I915_READ(reg);
4195 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4196
4197 if ((temp & FDI_RX_BIT_LOCK)) {
4198 DRM_DEBUG_KMS("FDI train 1 done.\n");
4199 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4200 break;
4201 }
4202 }
4203 if (tries == 5)
4204 DRM_ERROR("FDI train 1 fail!\n");
4205
4206 /* Train 2 */
4207 reg = FDI_TX_CTL(pipe);
4208 temp = I915_READ(reg);
4209 temp &= ~FDI_LINK_TRAIN_NONE;
4210 temp |= FDI_LINK_TRAIN_PATTERN_2;
4211 I915_WRITE(reg, temp);
4212
4213 reg = FDI_RX_CTL(pipe);
4214 temp = I915_READ(reg);
4215 temp &= ~FDI_LINK_TRAIN_NONE;
4216 temp |= FDI_LINK_TRAIN_PATTERN_2;
4217 I915_WRITE(reg, temp);
4218
4219 POSTING_READ(reg);
4220 udelay(150);
4221
4222 reg = FDI_RX_IIR(pipe);
4223 for (tries = 0; tries < 5; tries++) {
4224 temp = I915_READ(reg);
4225 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4226
4227 if (temp & FDI_RX_SYMBOL_LOCK) {
4228 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4229 DRM_DEBUG_KMS("FDI train 2 done.\n");
4230 break;
4231 }
4232 }
4233 if (tries == 5)
4234 DRM_ERROR("FDI train 2 fail!\n");
4235
4236 DRM_DEBUG_KMS("FDI train done\n");
4237
4238 }
4239
4240 static const int snb_b_fdi_train_param[] = {
4241 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4242 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4243 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4244 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4245 };
4246
4247 /* The FDI link training functions for SNB/Cougarpoint. */
4248 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4249 const struct intel_crtc_state *crtc_state)
4250 {
4251 struct drm_device *dev = crtc->base.dev;
4252 struct drm_i915_private *dev_priv = to_i915(dev);
4253 int pipe = crtc->pipe;
4254 i915_reg_t reg;
4255 u32 temp, i, retry;
4256
4257 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4258 for train result */
4259 reg = FDI_RX_IMR(pipe);
4260 temp = I915_READ(reg);
4261 temp &= ~FDI_RX_SYMBOL_LOCK;
4262 temp &= ~FDI_RX_BIT_LOCK;
4263 I915_WRITE(reg, temp);
4264
4265 POSTING_READ(reg);
4266 udelay(150);
4267
4268 /* enable CPU FDI TX and PCH FDI RX */
4269 reg = FDI_TX_CTL(pipe);
4270 temp = I915_READ(reg);
4271 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4272 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4273 temp &= ~FDI_LINK_TRAIN_NONE;
4274 temp |= FDI_LINK_TRAIN_PATTERN_1;
4275 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4276 /* SNB-B */
4277 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4278 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4279
4280 I915_WRITE(FDI_RX_MISC(pipe),
4281 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4282
4283 reg = FDI_RX_CTL(pipe);
4284 temp = I915_READ(reg);
4285 if (HAS_PCH_CPT(dev_priv)) {
4286 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4287 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4288 } else {
4289 temp &= ~FDI_LINK_TRAIN_NONE;
4290 temp |= FDI_LINK_TRAIN_PATTERN_1;
4291 }
4292 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4293
4294 POSTING_READ(reg);
4295 udelay(150);
4296
4297 for (i = 0; i < 4; i++) {
4298 reg = FDI_TX_CTL(pipe);
4299 temp = I915_READ(reg);
4300 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4301 temp |= snb_b_fdi_train_param[i];
4302 I915_WRITE(reg, temp);
4303
4304 POSTING_READ(reg);
4305 udelay(500);
4306
4307 for (retry = 0; retry < 5; retry++) {
4308 reg = FDI_RX_IIR(pipe);
4309 temp = I915_READ(reg);
4310 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4311 if (temp & FDI_RX_BIT_LOCK) {
4312 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4313 DRM_DEBUG_KMS("FDI train 1 done.\n");
4314 break;
4315 }
4316 udelay(50);
4317 }
4318 if (retry < 5)
4319 break;
4320 }
4321 if (i == 4)
4322 DRM_ERROR("FDI train 1 fail!\n");
4323
4324 /* Train 2 */
4325 reg = FDI_TX_CTL(pipe);
4326 temp = I915_READ(reg);
4327 temp &= ~FDI_LINK_TRAIN_NONE;
4328 temp |= FDI_LINK_TRAIN_PATTERN_2;
4329 if (IS_GEN(dev_priv, 6)) {
4330 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4331 /* SNB-B */
4332 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4333 }
4334 I915_WRITE(reg, temp);
4335
4336 reg = FDI_RX_CTL(pipe);
4337 temp = I915_READ(reg);
4338 if (HAS_PCH_CPT(dev_priv)) {
4339 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4340 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4341 } else {
4342 temp &= ~FDI_LINK_TRAIN_NONE;
4343 temp |= FDI_LINK_TRAIN_PATTERN_2;
4344 }
4345 I915_WRITE(reg, temp);
4346
4347 POSTING_READ(reg);
4348 udelay(150);
4349
4350 for (i = 0; i < 4; i++) {
4351 reg = FDI_TX_CTL(pipe);
4352 temp = I915_READ(reg);
4353 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4354 temp |= snb_b_fdi_train_param[i];
4355 I915_WRITE(reg, temp);
4356
4357 POSTING_READ(reg);
4358 udelay(500);
4359
4360 for (retry = 0; retry < 5; retry++) {
4361 reg = FDI_RX_IIR(pipe);
4362 temp = I915_READ(reg);
4363 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4364 if (temp & FDI_RX_SYMBOL_LOCK) {
4365 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4366 DRM_DEBUG_KMS("FDI train 2 done.\n");
4367 break;
4368 }
4369 udelay(50);
4370 }
4371 if (retry < 5)
4372 break;
4373 }
4374 if (i == 4)
4375 DRM_ERROR("FDI train 2 fail!\n");
4376
4377 DRM_DEBUG_KMS("FDI train done.\n");
4378 }
4379
4380 /* Manual link training for Ivy Bridge A0 parts */
4381 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4382 const struct intel_crtc_state *crtc_state)
4383 {
4384 struct drm_device *dev = crtc->base.dev;
4385 struct drm_i915_private *dev_priv = to_i915(dev);
4386 int pipe = crtc->pipe;
4387 i915_reg_t reg;
4388 u32 temp, i, j;
4389
4390 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4391 for train result */
4392 reg = FDI_RX_IMR(pipe);
4393 temp = I915_READ(reg);
4394 temp &= ~FDI_RX_SYMBOL_LOCK;
4395 temp &= ~FDI_RX_BIT_LOCK;
4396 I915_WRITE(reg, temp);
4397
4398 POSTING_READ(reg);
4399 udelay(150);
4400
4401 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4402 I915_READ(FDI_RX_IIR(pipe)));
4403
4404 /* Try each vswing and preemphasis setting twice before moving on */
4405 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4406 /* disable first in case we need to retry */
4407 reg = FDI_TX_CTL(pipe);
4408 temp = I915_READ(reg);
4409 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4410 temp &= ~FDI_TX_ENABLE;
4411 I915_WRITE(reg, temp);
4412
4413 reg = FDI_RX_CTL(pipe);
4414 temp = I915_READ(reg);
4415 temp &= ~FDI_LINK_TRAIN_AUTO;
4416 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4417 temp &= ~FDI_RX_ENABLE;
4418 I915_WRITE(reg, temp);
4419
4420 /* enable CPU FDI TX and PCH FDI RX */
4421 reg = FDI_TX_CTL(pipe);
4422 temp = I915_READ(reg);
4423 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4424 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4425 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4426 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4427 temp |= snb_b_fdi_train_param[j/2];
4428 temp |= FDI_COMPOSITE_SYNC;
4429 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4430
4431 I915_WRITE(FDI_RX_MISC(pipe),
4432 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4433
4434 reg = FDI_RX_CTL(pipe);
4435 temp = I915_READ(reg);
4436 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4437 temp |= FDI_COMPOSITE_SYNC;
4438 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4439
4440 POSTING_READ(reg);
4441 udelay(1); /* should be 0.5us */
4442
4443 for (i = 0; i < 4; i++) {
4444 reg = FDI_RX_IIR(pipe);
4445 temp = I915_READ(reg);
4446 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4447
4448 if (temp & FDI_RX_BIT_LOCK ||
4449 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4450 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4451 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4452 i);
4453 break;
4454 }
4455 udelay(1); /* should be 0.5us */
4456 }
4457 if (i == 4) {
4458 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4459 continue;
4460 }
4461
4462 /* Train 2 */
4463 reg = FDI_TX_CTL(pipe);
4464 temp = I915_READ(reg);
4465 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4466 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4467 I915_WRITE(reg, temp);
4468
4469 reg = FDI_RX_CTL(pipe);
4470 temp = I915_READ(reg);
4471 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4472 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4473 I915_WRITE(reg, temp);
4474
4475 POSTING_READ(reg);
4476 udelay(2); /* should be 1.5us */
4477
4478 for (i = 0; i < 4; i++) {
4479 reg = FDI_RX_IIR(pipe);
4480 temp = I915_READ(reg);
4481 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4482
4483 if (temp & FDI_RX_SYMBOL_LOCK ||
4484 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4485 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4486 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4487 i);
4488 goto train_done;
4489 }
4490 udelay(2); /* should be 1.5us */
4491 }
4492 if (i == 4)
4493 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4494 }
4495
4496 train_done:
4497 DRM_DEBUG_KMS("FDI train done.\n");
4498 }
4499
4500 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4501 {
4502 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4503 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4504 int pipe = intel_crtc->pipe;
4505 i915_reg_t reg;
4506 u32 temp;
4507
4508 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4509 reg = FDI_RX_CTL(pipe);
4510 temp = I915_READ(reg);
4511 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4512 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4513 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4514 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4515
4516 POSTING_READ(reg);
4517 udelay(200);
4518
4519 /* Switch from Rawclk to PCDclk */
4520 temp = I915_READ(reg);
4521 I915_WRITE(reg, temp | FDI_PCDCLK);
4522
4523 POSTING_READ(reg);
4524 udelay(200);
4525
4526 /* Enable CPU FDI TX PLL, always on for Ironlake */
4527 reg = FDI_TX_CTL(pipe);
4528 temp = I915_READ(reg);
4529 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4530 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4531
4532 POSTING_READ(reg);
4533 udelay(100);
4534 }
4535 }
4536
4537 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4538 {
4539 struct drm_device *dev = intel_crtc->base.dev;
4540 struct drm_i915_private *dev_priv = to_i915(dev);
4541 int pipe = intel_crtc->pipe;
4542 i915_reg_t reg;
4543 u32 temp;
4544
4545 /* Switch from PCDclk to Rawclk */
4546 reg = FDI_RX_CTL(pipe);
4547 temp = I915_READ(reg);
4548 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4549
4550 /* Disable CPU FDI TX PLL */
4551 reg = FDI_TX_CTL(pipe);
4552 temp = I915_READ(reg);
4553 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4554
4555 POSTING_READ(reg);
4556 udelay(100);
4557
4558 reg = FDI_RX_CTL(pipe);
4559 temp = I915_READ(reg);
4560 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4561
4562 /* Wait for the clocks to turn off. */
4563 POSTING_READ(reg);
4564 udelay(100);
4565 }
4566
4567 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4568 {
4569 struct drm_device *dev = crtc->dev;
4570 struct drm_i915_private *dev_priv = to_i915(dev);
4571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4572 int pipe = intel_crtc->pipe;
4573 i915_reg_t reg;
4574 u32 temp;
4575
4576 /* disable CPU FDI tx and PCH FDI rx */
4577 reg = FDI_TX_CTL(pipe);
4578 temp = I915_READ(reg);
4579 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4580 POSTING_READ(reg);
4581
4582 reg = FDI_RX_CTL(pipe);
4583 temp = I915_READ(reg);
4584 temp &= ~(0x7 << 16);
4585 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4586 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4587
4588 POSTING_READ(reg);
4589 udelay(100);
4590
4591 /* Ironlake workaround, disable clock pointer after downing FDI */
4592 if (HAS_PCH_IBX(dev_priv))
4593 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4594
4595 /* still set train pattern 1 */
4596 reg = FDI_TX_CTL(pipe);
4597 temp = I915_READ(reg);
4598 temp &= ~FDI_LINK_TRAIN_NONE;
4599 temp |= FDI_LINK_TRAIN_PATTERN_1;
4600 I915_WRITE(reg, temp);
4601
4602 reg = FDI_RX_CTL(pipe);
4603 temp = I915_READ(reg);
4604 if (HAS_PCH_CPT(dev_priv)) {
4605 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4606 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4607 } else {
4608 temp &= ~FDI_LINK_TRAIN_NONE;
4609 temp |= FDI_LINK_TRAIN_PATTERN_1;
4610 }
4611 /* BPC in FDI rx is consistent with that in PIPECONF */
4612 temp &= ~(0x07 << 16);
4613 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4614 I915_WRITE(reg, temp);
4615
4616 POSTING_READ(reg);
4617 udelay(100);
4618 }
4619
4620 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4621 {
4622 struct drm_crtc *crtc;
4623 bool cleanup_done;
4624
4625 drm_for_each_crtc(crtc, &dev_priv->drm) {
4626 struct drm_crtc_commit *commit;
4627 spin_lock(&crtc->commit_lock);
4628 commit = list_first_entry_or_null(&crtc->commit_list,
4629 struct drm_crtc_commit, commit_entry);
4630 cleanup_done = commit ?
4631 try_wait_for_completion(&commit->cleanup_done) : true;
4632 spin_unlock(&crtc->commit_lock);
4633
4634 if (cleanup_done)
4635 continue;
4636
4637 drm_crtc_wait_one_vblank(crtc);
4638
4639 return true;
4640 }
4641
4642 return false;
4643 }
4644
4645 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4646 {
4647 u32 temp;
4648
4649 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4650
4651 mutex_lock(&dev_priv->sb_lock);
4652
4653 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4654 temp |= SBI_SSCCTL_DISABLE;
4655 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4656
4657 mutex_unlock(&dev_priv->sb_lock);
4658 }
4659
4660 /* Program iCLKIP clock to the desired frequency */
4661 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4662 {
4663 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4664 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4665 int clock = crtc_state->base.adjusted_mode.crtc_clock;
4666 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4667 u32 temp;
4668
4669 lpt_disable_iclkip(dev_priv);
4670
4671 /* The iCLK virtual clock root frequency is in MHz,
4672 * but the adjusted_mode->crtc_clock in in KHz. To get the
4673 * divisors, it is necessary to divide one by another, so we
4674 * convert the virtual clock precision to KHz here for higher
4675 * precision.
4676 */
4677 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4678 u32 iclk_virtual_root_freq = 172800 * 1000;
4679 u32 iclk_pi_range = 64;
4680 u32 desired_divisor;
4681
4682 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4683 clock << auxdiv);
4684 divsel = (desired_divisor / iclk_pi_range) - 2;
4685 phaseinc = desired_divisor % iclk_pi_range;
4686
4687 /*
4688 * Near 20MHz is a corner case which is
4689 * out of range for the 7-bit divisor
4690 */
4691 if (divsel <= 0x7f)
4692 break;
4693 }
4694
4695 /* This should not happen with any sane values */
4696 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4697 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4698 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4699 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4700
4701 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4702 clock,
4703 auxdiv,
4704 divsel,
4705 phasedir,
4706 phaseinc);
4707
4708 mutex_lock(&dev_priv->sb_lock);
4709
4710 /* Program SSCDIVINTPHASE6 */
4711 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4712 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4713 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4714 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4715 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4716 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4717 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4718 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4719
4720 /* Program SSCAUXDIV */
4721 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4722 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4723 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4724 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4725
4726 /* Enable modulator and associated divider */
4727 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4728 temp &= ~SBI_SSCCTL_DISABLE;
4729 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4730
4731 mutex_unlock(&dev_priv->sb_lock);
4732
4733 /* Wait for initialization time */
4734 udelay(24);
4735
4736 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4737 }
4738
4739 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4740 {
4741 u32 divsel, phaseinc, auxdiv;
4742 u32 iclk_virtual_root_freq = 172800 * 1000;
4743 u32 iclk_pi_range = 64;
4744 u32 desired_divisor;
4745 u32 temp;
4746
4747 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4748 return 0;
4749
4750 mutex_lock(&dev_priv->sb_lock);
4751
4752 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4753 if (temp & SBI_SSCCTL_DISABLE) {
4754 mutex_unlock(&dev_priv->sb_lock);
4755 return 0;
4756 }
4757
4758 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4759 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4760 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4761 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4762 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4763
4764 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4765 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4766 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4767
4768 mutex_unlock(&dev_priv->sb_lock);
4769
4770 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4771
4772 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4773 desired_divisor << auxdiv);
4774 }
4775
4776 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4777 enum pipe pch_transcoder)
4778 {
4779 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4781 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4782
4783 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4784 I915_READ(HTOTAL(cpu_transcoder)));
4785 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4786 I915_READ(HBLANK(cpu_transcoder)));
4787 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4788 I915_READ(HSYNC(cpu_transcoder)));
4789
4790 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4791 I915_READ(VTOTAL(cpu_transcoder)));
4792 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4793 I915_READ(VBLANK(cpu_transcoder)));
4794 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4795 I915_READ(VSYNC(cpu_transcoder)));
4796 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4797 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4798 }
4799
4800 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4801 {
4802 u32 temp;
4803
4804 temp = I915_READ(SOUTH_CHICKEN1);
4805 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4806 return;
4807
4808 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4809 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4810
4811 temp &= ~FDI_BC_BIFURCATION_SELECT;
4812 if (enable)
4813 temp |= FDI_BC_BIFURCATION_SELECT;
4814
4815 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4816 I915_WRITE(SOUTH_CHICKEN1, temp);
4817 POSTING_READ(SOUTH_CHICKEN1);
4818 }
4819
4820 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4821 {
4822 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4824
4825 switch (crtc->pipe) {
4826 case PIPE_A:
4827 break;
4828 case PIPE_B:
4829 if (crtc_state->fdi_lanes > 2)
4830 cpt_set_fdi_bc_bifurcation(dev_priv, false);
4831 else
4832 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4833
4834 break;
4835 case PIPE_C:
4836 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4837
4838 break;
4839 default:
4840 BUG();
4841 }
4842 }
4843
4844 /*
4845 * Finds the encoder associated with the given CRTC. This can only be
4846 * used when we know that the CRTC isn't feeding multiple encoders!
4847 */
4848 static struct intel_encoder *
4849 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4850 const struct intel_crtc_state *crtc_state)
4851 {
4852 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4853 const struct drm_connector_state *connector_state;
4854 const struct drm_connector *connector;
4855 struct intel_encoder *encoder = NULL;
4856 int num_encoders = 0;
4857 int i;
4858
4859 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4860 if (connector_state->crtc != &crtc->base)
4861 continue;
4862
4863 encoder = to_intel_encoder(connector_state->best_encoder);
4864 num_encoders++;
4865 }
4866
4867 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4868 num_encoders, pipe_name(crtc->pipe));
4869
4870 return encoder;
4871 }
4872
4873 /*
4874 * Enable PCH resources required for PCH ports:
4875 * - PCH PLLs
4876 * - FDI training & RX/TX
4877 * - update transcoder timings
4878 * - DP transcoding bits
4879 * - transcoder
4880 */
4881 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4882 const struct intel_crtc_state *crtc_state)
4883 {
4884 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4885 struct drm_device *dev = crtc->base.dev;
4886 struct drm_i915_private *dev_priv = to_i915(dev);
4887 int pipe = crtc->pipe;
4888 u32 temp;
4889
4890 assert_pch_transcoder_disabled(dev_priv, pipe);
4891
4892 if (IS_IVYBRIDGE(dev_priv))
4893 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4894
4895 /* Write the TU size bits before fdi link training, so that error
4896 * detection works. */
4897 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4898 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4899
4900 /* For PCH output, training FDI link */
4901 dev_priv->display.fdi_link_train(crtc, crtc_state);
4902
4903 /* We need to program the right clock selection before writing the pixel
4904 * mutliplier into the DPLL. */
4905 if (HAS_PCH_CPT(dev_priv)) {
4906 u32 sel;
4907
4908 temp = I915_READ(PCH_DPLL_SEL);
4909 temp |= TRANS_DPLL_ENABLE(pipe);
4910 sel = TRANS_DPLLB_SEL(pipe);
4911 if (crtc_state->shared_dpll ==
4912 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4913 temp |= sel;
4914 else
4915 temp &= ~sel;
4916 I915_WRITE(PCH_DPLL_SEL, temp);
4917 }
4918
4919 /* XXX: pch pll's can be enabled any time before we enable the PCH
4920 * transcoder, and we actually should do this to not upset any PCH
4921 * transcoder that already use the clock when we share it.
4922 *
4923 * Note that enable_shared_dpll tries to do the right thing, but
4924 * get_shared_dpll unconditionally resets the pll - we need that to have
4925 * the right LVDS enable sequence. */
4926 intel_enable_shared_dpll(crtc_state);
4927
4928 /* set transcoder timing, panel must allow it */
4929 assert_panel_unlocked(dev_priv, pipe);
4930 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4931
4932 intel_fdi_normal_train(crtc);
4933
4934 /* For PCH DP, enable TRANS_DP_CTL */
4935 if (HAS_PCH_CPT(dev_priv) &&
4936 intel_crtc_has_dp_encoder(crtc_state)) {
4937 const struct drm_display_mode *adjusted_mode =
4938 &crtc_state->base.adjusted_mode;
4939 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4940 i915_reg_t reg = TRANS_DP_CTL(pipe);
4941 enum port port;
4942
4943 temp = I915_READ(reg);
4944 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4945 TRANS_DP_SYNC_MASK |
4946 TRANS_DP_BPC_MASK);
4947 temp |= TRANS_DP_OUTPUT_ENABLE;
4948 temp |= bpc << 9; /* same format but at 11:9 */
4949
4950 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4951 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4952 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4953 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4954
4955 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4956 WARN_ON(port < PORT_B || port > PORT_D);
4957 temp |= TRANS_DP_PORT_SEL(port);
4958
4959 I915_WRITE(reg, temp);
4960 }
4961
4962 ironlake_enable_pch_transcoder(crtc_state);
4963 }
4964
4965 static void lpt_pch_enable(const struct intel_atomic_state *state,
4966 const struct intel_crtc_state *crtc_state)
4967 {
4968 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4970 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4971
4972 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4973
4974 lpt_program_iclkip(crtc_state);
4975
4976 /* Set transcoder timing. */
4977 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4978
4979 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4980 }
4981
4982 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4983 {
4984 struct drm_i915_private *dev_priv = to_i915(dev);
4985 i915_reg_t dslreg = PIPEDSL(pipe);
4986 u32 temp;
4987
4988 temp = I915_READ(dslreg);
4989 udelay(500);
4990 if (wait_for(I915_READ(dslreg) != temp, 5)) {
4991 if (wait_for(I915_READ(dslreg) != temp, 5))
4992 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4993 }
4994 }
4995
4996 /*
4997 * The hardware phase 0.0 refers to the center of the pixel.
4998 * We want to start from the top/left edge which is phase
4999 * -0.5. That matches how the hardware calculates the scaling
5000 * factors (from top-left of the first pixel to bottom-right
5001 * of the last pixel, as opposed to the pixel centers).
5002 *
5003 * For 4:2:0 subsampled chroma planes we obviously have to
5004 * adjust that so that the chroma sample position lands in
5005 * the right spot.
5006 *
5007 * Note that for packed YCbCr 4:2:2 formats there is no way to
5008 * control chroma siting. The hardware simply replicates the
5009 * chroma samples for both of the luma samples, and thus we don't
5010 * actually get the expected MPEG2 chroma siting convention :(
5011 * The same behaviour is observed on pre-SKL platforms as well.
5012 *
5013 * Theory behind the formula (note that we ignore sub-pixel
5014 * source coordinates):
5015 * s = source sample position
5016 * d = destination sample position
5017 *
5018 * Downscaling 4:1:
5019 * -0.5
5020 * | 0.0
5021 * | | 1.5 (initial phase)
5022 * | | |
5023 * v v v
5024 * | s | s | s | s |
5025 * | d |
5026 *
5027 * Upscaling 1:4:
5028 * -0.5
5029 * | -0.375 (initial phase)
5030 * | | 0.0
5031 * | | |
5032 * v v v
5033 * | s |
5034 * | d | d | d | d |
5035 */
5036 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5037 {
5038 int phase = -0x8000;
5039 u16 trip = 0;
5040
5041 if (chroma_cosited)
5042 phase += (sub - 1) * 0x8000 / sub;
5043
5044 phase += scale / (2 * sub);
5045
5046 /*
5047 * Hardware initial phase limited to [-0.5:1.5].
5048 * Since the max hardware scale factor is 3.0, we
5049 * should never actually excdeed 1.0 here.
5050 */
5051 WARN_ON(phase < -0x8000 || phase > 0x18000);
5052
5053 if (phase < 0)
5054 phase = 0x10000 + phase;
5055 else
5056 trip = PS_PHASE_TRIP;
5057
5058 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5059 }
5060
5061 static int
5062 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5063 unsigned int scaler_user, int *scaler_id,
5064 int src_w, int src_h, int dst_w, int dst_h,
5065 const struct drm_format_info *format, bool need_scaler)
5066 {
5067 struct intel_crtc_scaler_state *scaler_state =
5068 &crtc_state->scaler_state;
5069 struct intel_crtc *intel_crtc =
5070 to_intel_crtc(crtc_state->base.crtc);
5071 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5072 const struct drm_display_mode *adjusted_mode =
5073 &crtc_state->base.adjusted_mode;
5074
5075 /*
5076 * Src coordinates are already rotated by 270 degrees for
5077 * the 90/270 degree plane rotation cases (to match the
5078 * GTT mapping), hence no need to account for rotation here.
5079 */
5080 if (src_w != dst_w || src_h != dst_h)
5081 need_scaler = true;
5082
5083 /*
5084 * Scaling/fitting not supported in IF-ID mode in GEN9+
5085 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5086 * Once NV12 is enabled, handle it here while allocating scaler
5087 * for NV12.
5088 */
5089 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5090 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5091 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5092 return -EINVAL;
5093 }
5094
5095 /*
5096 * if plane is being disabled or scaler is no more required or force detach
5097 * - free scaler binded to this plane/crtc
5098 * - in order to do this, update crtc->scaler_usage
5099 *
5100 * Here scaler state in crtc_state is set free so that
5101 * scaler can be assigned to other user. Actual register
5102 * update to free the scaler is done in plane/panel-fit programming.
5103 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5104 */
5105 if (force_detach || !need_scaler) {
5106 if (*scaler_id >= 0) {
5107 scaler_state->scaler_users &= ~(1 << scaler_user);
5108 scaler_state->scalers[*scaler_id].in_use = 0;
5109
5110 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5111 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5112 intel_crtc->pipe, scaler_user, *scaler_id,
5113 scaler_state->scaler_users);
5114 *scaler_id = -1;
5115 }
5116 return 0;
5117 }
5118
5119 if (format && is_planar_yuv_format(format->format) &&
5120 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5121 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5122 return -EINVAL;
5123 }
5124
5125 /* range checks */
5126 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5127 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5128 (INTEL_GEN(dev_priv) >= 11 &&
5129 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5130 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5131 (INTEL_GEN(dev_priv) < 11 &&
5132 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5133 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5134 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5135 "size is out of scaler range\n",
5136 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5137 return -EINVAL;
5138 }
5139
5140 /* mark this plane as a scaler user in crtc_state */
5141 scaler_state->scaler_users |= (1 << scaler_user);
5142 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5143 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5144 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5145 scaler_state->scaler_users);
5146
5147 return 0;
5148 }
5149
5150 /**
5151 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5152 *
5153 * @state: crtc's scaler state
5154 *
5155 * Return
5156 * 0 - scaler_usage updated successfully
5157 * error - requested scaling cannot be supported or other error condition
5158 */
5159 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5160 {
5161 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5162 bool need_scaler = false;
5163
5164 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5165 need_scaler = true;
5166
5167 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5168 &state->scaler_state.scaler_id,
5169 state->pipe_src_w, state->pipe_src_h,
5170 adjusted_mode->crtc_hdisplay,
5171 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5172 }
5173
5174 /**
5175 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5176 * @crtc_state: crtc's scaler state
5177 * @plane_state: atomic plane state to update
5178 *
5179 * Return
5180 * 0 - scaler_usage updated successfully
5181 * error - requested scaling cannot be supported or other error condition
5182 */
5183 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5184 struct intel_plane_state *plane_state)
5185 {
5186 struct intel_plane *intel_plane =
5187 to_intel_plane(plane_state->base.plane);
5188 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5189 struct drm_framebuffer *fb = plane_state->base.fb;
5190 int ret;
5191 bool force_detach = !fb || !plane_state->base.visible;
5192 bool need_scaler = false;
5193
5194 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5195 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5196 fb && is_planar_yuv_format(fb->format->format))
5197 need_scaler = true;
5198
5199 ret = skl_update_scaler(crtc_state, force_detach,
5200 drm_plane_index(&intel_plane->base),
5201 &plane_state->scaler_id,
5202 drm_rect_width(&plane_state->base.src) >> 16,
5203 drm_rect_height(&plane_state->base.src) >> 16,
5204 drm_rect_width(&plane_state->base.dst),
5205 drm_rect_height(&plane_state->base.dst),
5206 fb ? fb->format : NULL, need_scaler);
5207
5208 if (ret || plane_state->scaler_id < 0)
5209 return ret;
5210
5211 /* check colorkey */
5212 if (plane_state->ckey.flags) {
5213 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5214 intel_plane->base.base.id,
5215 intel_plane->base.name);
5216 return -EINVAL;
5217 }
5218
5219 /* Check src format */
5220 switch (fb->format->format) {
5221 case DRM_FORMAT_RGB565:
5222 case DRM_FORMAT_XBGR8888:
5223 case DRM_FORMAT_XRGB8888:
5224 case DRM_FORMAT_ABGR8888:
5225 case DRM_FORMAT_ARGB8888:
5226 case DRM_FORMAT_XRGB2101010:
5227 case DRM_FORMAT_XBGR2101010:
5228 case DRM_FORMAT_XBGR16161616F:
5229 case DRM_FORMAT_ABGR16161616F:
5230 case DRM_FORMAT_XRGB16161616F:
5231 case DRM_FORMAT_ARGB16161616F:
5232 case DRM_FORMAT_YUYV:
5233 case DRM_FORMAT_YVYU:
5234 case DRM_FORMAT_UYVY:
5235 case DRM_FORMAT_VYUY:
5236 case DRM_FORMAT_NV12:
5237 case DRM_FORMAT_P010:
5238 case DRM_FORMAT_P012:
5239 case DRM_FORMAT_P016:
5240 case DRM_FORMAT_Y210:
5241 case DRM_FORMAT_Y212:
5242 case DRM_FORMAT_Y216:
5243 case DRM_FORMAT_XVYU2101010:
5244 case DRM_FORMAT_XVYU12_16161616:
5245 case DRM_FORMAT_XVYU16161616:
5246 break;
5247 default:
5248 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5249 intel_plane->base.base.id, intel_plane->base.name,
5250 fb->base.id, fb->format->format);
5251 return -EINVAL;
5252 }
5253
5254 return 0;
5255 }
5256
5257 static void skylake_scaler_disable(struct intel_crtc *crtc)
5258 {
5259 int i;
5260
5261 for (i = 0; i < crtc->num_scalers; i++)
5262 skl_detach_scaler(crtc, i);
5263 }
5264
5265 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5266 {
5267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5269 enum pipe pipe = crtc->pipe;
5270 const struct intel_crtc_scaler_state *scaler_state =
5271 &crtc_state->scaler_state;
5272
5273 if (crtc_state->pch_pfit.enabled) {
5274 u16 uv_rgb_hphase, uv_rgb_vphase;
5275 int pfit_w, pfit_h, hscale, vscale;
5276 int id;
5277
5278 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5279 return;
5280
5281 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5282 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5283
5284 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5285 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5286
5287 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5288 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5289
5290 id = scaler_state->scaler_id;
5291 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5292 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5293 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5294 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5295 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5296 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5297 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5298 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5299 }
5300 }
5301
5302 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5303 {
5304 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5306 int pipe = crtc->pipe;
5307
5308 if (crtc_state->pch_pfit.enabled) {
5309 /* Force use of hard-coded filter coefficients
5310 * as some pre-programmed values are broken,
5311 * e.g. x201.
5312 */
5313 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5314 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5315 PF_PIPE_SEL_IVB(pipe));
5316 else
5317 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5318 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5319 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5320 }
5321 }
5322
5323 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5324 {
5325 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5326 struct drm_device *dev = crtc->base.dev;
5327 struct drm_i915_private *dev_priv = to_i915(dev);
5328
5329 if (!crtc_state->ips_enabled)
5330 return;
5331
5332 /*
5333 * We can only enable IPS after we enable a plane and wait for a vblank
5334 * This function is called from post_plane_update, which is run after
5335 * a vblank wait.
5336 */
5337 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5338
5339 if (IS_BROADWELL(dev_priv)) {
5340 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5341 IPS_ENABLE | IPS_PCODE_CONTROL));
5342 /* Quoting Art Runyan: "its not safe to expect any particular
5343 * value in IPS_CTL bit 31 after enabling IPS through the
5344 * mailbox." Moreover, the mailbox may return a bogus state,
5345 * so we need to just enable it and continue on.
5346 */
5347 } else {
5348 I915_WRITE(IPS_CTL, IPS_ENABLE);
5349 /* The bit only becomes 1 in the next vblank, so this wait here
5350 * is essentially intel_wait_for_vblank. If we don't have this
5351 * and don't wait for vblanks until the end of crtc_enable, then
5352 * the HW state readout code will complain that the expected
5353 * IPS_CTL value is not the one we read. */
5354 if (intel_wait_for_register(&dev_priv->uncore,
5355 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5356 50))
5357 DRM_ERROR("Timed out waiting for IPS enable\n");
5358 }
5359 }
5360
5361 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5362 {
5363 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5364 struct drm_device *dev = crtc->base.dev;
5365 struct drm_i915_private *dev_priv = to_i915(dev);
5366
5367 if (!crtc_state->ips_enabled)
5368 return;
5369
5370 if (IS_BROADWELL(dev_priv)) {
5371 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5372 /*
5373 * Wait for PCODE to finish disabling IPS. The BSpec specified
5374 * 42ms timeout value leads to occasional timeouts so use 100ms
5375 * instead.
5376 */
5377 if (intel_wait_for_register(&dev_priv->uncore,
5378 IPS_CTL, IPS_ENABLE, 0,
5379 100))
5380 DRM_ERROR("Timed out waiting for IPS disable\n");
5381 } else {
5382 I915_WRITE(IPS_CTL, 0);
5383 POSTING_READ(IPS_CTL);
5384 }
5385
5386 /* We need to wait for a vblank before we can disable the plane. */
5387 intel_wait_for_vblank(dev_priv, crtc->pipe);
5388 }
5389
5390 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5391 {
5392 if (intel_crtc->overlay) {
5393 struct drm_device *dev = intel_crtc->base.dev;
5394
5395 mutex_lock(&dev->struct_mutex);
5396 (void) intel_overlay_switch_off(intel_crtc->overlay);
5397 mutex_unlock(&dev->struct_mutex);
5398 }
5399
5400 /* Let userspace switch the overlay on again. In most cases userspace
5401 * has to recompute where to put it anyway.
5402 */
5403 }
5404
5405 /**
5406 * intel_post_enable_primary - Perform operations after enabling primary plane
5407 * @crtc: the CRTC whose primary plane was just enabled
5408 * @new_crtc_state: the enabling state
5409 *
5410 * Performs potentially sleeping operations that must be done after the primary
5411 * plane is enabled, such as updating FBC and IPS. Note that this may be
5412 * called due to an explicit primary plane update, or due to an implicit
5413 * re-enable that is caused when a sprite plane is updated to no longer
5414 * completely hide the primary plane.
5415 */
5416 static void
5417 intel_post_enable_primary(struct drm_crtc *crtc,
5418 const struct intel_crtc_state *new_crtc_state)
5419 {
5420 struct drm_device *dev = crtc->dev;
5421 struct drm_i915_private *dev_priv = to_i915(dev);
5422 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5423 int pipe = intel_crtc->pipe;
5424
5425 /*
5426 * Gen2 reports pipe underruns whenever all planes are disabled.
5427 * So don't enable underrun reporting before at least some planes
5428 * are enabled.
5429 * FIXME: Need to fix the logic to work when we turn off all planes
5430 * but leave the pipe running.
5431 */
5432 if (IS_GEN(dev_priv, 2))
5433 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5434
5435 /* Underruns don't always raise interrupts, so check manually. */
5436 intel_check_cpu_fifo_underruns(dev_priv);
5437 intel_check_pch_fifo_underruns(dev_priv);
5438 }
5439
5440 /* FIXME get rid of this and use pre_plane_update */
5441 static void
5442 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5443 {
5444 struct drm_device *dev = crtc->dev;
5445 struct drm_i915_private *dev_priv = to_i915(dev);
5446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5447 int pipe = intel_crtc->pipe;
5448
5449 /*
5450 * Gen2 reports pipe underruns whenever all planes are disabled.
5451 * So disable underrun reporting before all the planes get disabled.
5452 */
5453 if (IS_GEN(dev_priv, 2))
5454 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5455
5456 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5457
5458 /*
5459 * Vblank time updates from the shadow to live plane control register
5460 * are blocked if the memory self-refresh mode is active at that
5461 * moment. So to make sure the plane gets truly disabled, disable
5462 * first the self-refresh mode. The self-refresh enable bit in turn
5463 * will be checked/applied by the HW only at the next frame start
5464 * event which is after the vblank start event, so we need to have a
5465 * wait-for-vblank between disabling the plane and the pipe.
5466 */
5467 if (HAS_GMCH(dev_priv) &&
5468 intel_set_memory_cxsr(dev_priv, false))
5469 intel_wait_for_vblank(dev_priv, pipe);
5470 }
5471
5472 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5473 const struct intel_crtc_state *new_crtc_state)
5474 {
5475 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5476 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5477
5478 if (!old_crtc_state->ips_enabled)
5479 return false;
5480
5481 if (needs_modeset(&new_crtc_state->base))
5482 return true;
5483
5484 /*
5485 * Workaround : Do not read or write the pipe palette/gamma data while
5486 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5487 *
5488 * Disable IPS before we program the LUT.
5489 */
5490 if (IS_HASWELL(dev_priv) &&
5491 (new_crtc_state->base.color_mgmt_changed ||
5492 new_crtc_state->update_pipe) &&
5493 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5494 return true;
5495
5496 return !new_crtc_state->ips_enabled;
5497 }
5498
5499 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5500 const struct intel_crtc_state *new_crtc_state)
5501 {
5502 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5503 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5504
5505 if (!new_crtc_state->ips_enabled)
5506 return false;
5507
5508 if (needs_modeset(&new_crtc_state->base))
5509 return true;
5510
5511 /*
5512 * Workaround : Do not read or write the pipe palette/gamma data while
5513 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5514 *
5515 * Re-enable IPS after the LUT has been programmed.
5516 */
5517 if (IS_HASWELL(dev_priv) &&
5518 (new_crtc_state->base.color_mgmt_changed ||
5519 new_crtc_state->update_pipe) &&
5520 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5521 return true;
5522
5523 /*
5524 * We can't read out IPS on broadwell, assume the worst and
5525 * forcibly enable IPS on the first fastset.
5526 */
5527 if (new_crtc_state->update_pipe &&
5528 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5529 return true;
5530
5531 return !old_crtc_state->ips_enabled;
5532 }
5533
5534 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5535 const struct intel_crtc_state *crtc_state)
5536 {
5537 if (!crtc_state->nv12_planes)
5538 return false;
5539
5540 /* WA Display #0827: Gen9:all */
5541 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5542 return true;
5543
5544 return false;
5545 }
5546
5547 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5548 const struct intel_crtc_state *crtc_state)
5549 {
5550 /* Wa_2006604312:icl */
5551 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5552 return true;
5553
5554 return false;
5555 }
5556
5557 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5558 {
5559 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5560 struct drm_device *dev = crtc->base.dev;
5561 struct drm_i915_private *dev_priv = to_i915(dev);
5562 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5563 struct intel_crtc_state *pipe_config =
5564 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5565 crtc);
5566 struct drm_plane *primary = crtc->base.primary;
5567 struct drm_plane_state *old_primary_state =
5568 drm_atomic_get_old_plane_state(old_state, primary);
5569
5570 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5571
5572 if (pipe_config->update_wm_post && pipe_config->base.active)
5573 intel_update_watermarks(crtc);
5574
5575 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5576 hsw_enable_ips(pipe_config);
5577
5578 if (old_primary_state) {
5579 struct drm_plane_state *new_primary_state =
5580 drm_atomic_get_new_plane_state(old_state, primary);
5581
5582 intel_fbc_post_update(crtc);
5583
5584 if (new_primary_state->visible &&
5585 (needs_modeset(&pipe_config->base) ||
5586 !old_primary_state->visible))
5587 intel_post_enable_primary(&crtc->base, pipe_config);
5588 }
5589
5590 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5591 !needs_nv12_wa(dev_priv, pipe_config))
5592 skl_wa_827(dev_priv, crtc->pipe, false);
5593
5594 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5595 !needs_scalerclk_wa(dev_priv, pipe_config))
5596 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5597 }
5598
5599 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5600 struct intel_crtc_state *pipe_config)
5601 {
5602 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5603 struct drm_device *dev = crtc->base.dev;
5604 struct drm_i915_private *dev_priv = to_i915(dev);
5605 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5606 struct drm_plane *primary = crtc->base.primary;
5607 struct drm_plane_state *old_primary_state =
5608 drm_atomic_get_old_plane_state(old_state, primary);
5609 bool modeset = needs_modeset(&pipe_config->base);
5610 struct intel_atomic_state *old_intel_state =
5611 to_intel_atomic_state(old_state);
5612
5613 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5614 hsw_disable_ips(old_crtc_state);
5615
5616 if (old_primary_state) {
5617 struct intel_plane_state *new_primary_state =
5618 intel_atomic_get_new_plane_state(old_intel_state,
5619 to_intel_plane(primary));
5620
5621 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5622 /*
5623 * Gen2 reports pipe underruns whenever all planes are disabled.
5624 * So disable underrun reporting before all the planes get disabled.
5625 */
5626 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5627 (modeset || !new_primary_state->base.visible))
5628 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5629 }
5630
5631 /* Display WA 827 */
5632 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5633 needs_nv12_wa(dev_priv, pipe_config))
5634 skl_wa_827(dev_priv, crtc->pipe, true);
5635
5636 /* Wa_2006604312:icl */
5637 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5638 needs_scalerclk_wa(dev_priv, pipe_config))
5639 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5640
5641 /*
5642 * Vblank time updates from the shadow to live plane control register
5643 * are blocked if the memory self-refresh mode is active at that
5644 * moment. So to make sure the plane gets truly disabled, disable
5645 * first the self-refresh mode. The self-refresh enable bit in turn
5646 * will be checked/applied by the HW only at the next frame start
5647 * event which is after the vblank start event, so we need to have a
5648 * wait-for-vblank between disabling the plane and the pipe.
5649 */
5650 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5651 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5652 intel_wait_for_vblank(dev_priv, crtc->pipe);
5653
5654 /*
5655 * IVB workaround: must disable low power watermarks for at least
5656 * one frame before enabling scaling. LP watermarks can be re-enabled
5657 * when scaling is disabled.
5658 *
5659 * WaCxSRDisabledForSpriteScaling:ivb
5660 */
5661 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5662 old_crtc_state->base.active)
5663 intel_wait_for_vblank(dev_priv, crtc->pipe);
5664
5665 /*
5666 * If we're doing a modeset, we're done. No need to do any pre-vblank
5667 * watermark programming here.
5668 */
5669 if (needs_modeset(&pipe_config->base))
5670 return;
5671
5672 /*
5673 * For platforms that support atomic watermarks, program the
5674 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5675 * will be the intermediate values that are safe for both pre- and
5676 * post- vblank; when vblank happens, the 'active' values will be set
5677 * to the final 'target' values and we'll do this again to get the
5678 * optimal watermarks. For gen9+ platforms, the values we program here
5679 * will be the final target values which will get automatically latched
5680 * at vblank time; no further programming will be necessary.
5681 *
5682 * If a platform hasn't been transitioned to atomic watermarks yet,
5683 * we'll continue to update watermarks the old way, if flags tell
5684 * us to.
5685 */
5686 if (dev_priv->display.initial_watermarks != NULL)
5687 dev_priv->display.initial_watermarks(old_intel_state,
5688 pipe_config);
5689 else if (pipe_config->update_wm_pre)
5690 intel_update_watermarks(crtc);
5691 }
5692
5693 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5694 struct intel_crtc *crtc)
5695 {
5696 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5697 const struct intel_crtc_state *new_crtc_state =
5698 intel_atomic_get_new_crtc_state(state, crtc);
5699 unsigned int update_mask = new_crtc_state->update_planes;
5700 const struct intel_plane_state *old_plane_state;
5701 struct intel_plane *plane;
5702 unsigned fb_bits = 0;
5703 int i;
5704
5705 intel_crtc_dpms_overlay_disable(crtc);
5706
5707 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5708 if (crtc->pipe != plane->pipe ||
5709 !(update_mask & BIT(plane->id)))
5710 continue;
5711
5712 intel_disable_plane(plane, new_crtc_state);
5713
5714 if (old_plane_state->base.visible)
5715 fb_bits |= plane->frontbuffer_bit;
5716 }
5717
5718 intel_frontbuffer_flip(dev_priv, fb_bits);
5719 }
5720
5721 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5722 struct intel_crtc_state *crtc_state,
5723 struct drm_atomic_state *old_state)
5724 {
5725 struct drm_connector_state *conn_state;
5726 struct drm_connector *conn;
5727 int i;
5728
5729 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5730 struct intel_encoder *encoder =
5731 to_intel_encoder(conn_state->best_encoder);
5732
5733 if (conn_state->crtc != crtc)
5734 continue;
5735
5736 if (encoder->pre_pll_enable)
5737 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5738 }
5739 }
5740
5741 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5742 struct intel_crtc_state *crtc_state,
5743 struct drm_atomic_state *old_state)
5744 {
5745 struct drm_connector_state *conn_state;
5746 struct drm_connector *conn;
5747 int i;
5748
5749 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5750 struct intel_encoder *encoder =
5751 to_intel_encoder(conn_state->best_encoder);
5752
5753 if (conn_state->crtc != crtc)
5754 continue;
5755
5756 if (encoder->pre_enable)
5757 encoder->pre_enable(encoder, crtc_state, conn_state);
5758 }
5759 }
5760
5761 static void intel_encoders_enable(struct drm_crtc *crtc,
5762 struct intel_crtc_state *crtc_state,
5763 struct drm_atomic_state *old_state)
5764 {
5765 struct drm_connector_state *conn_state;
5766 struct drm_connector *conn;
5767 int i;
5768
5769 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5770 struct intel_encoder *encoder =
5771 to_intel_encoder(conn_state->best_encoder);
5772
5773 if (conn_state->crtc != crtc)
5774 continue;
5775
5776 if (encoder->enable)
5777 encoder->enable(encoder, crtc_state, conn_state);
5778 intel_opregion_notify_encoder(encoder, true);
5779 }
5780 }
5781
5782 static void intel_encoders_disable(struct drm_crtc *crtc,
5783 struct intel_crtc_state *old_crtc_state,
5784 struct drm_atomic_state *old_state)
5785 {
5786 struct drm_connector_state *old_conn_state;
5787 struct drm_connector *conn;
5788 int i;
5789
5790 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5791 struct intel_encoder *encoder =
5792 to_intel_encoder(old_conn_state->best_encoder);
5793
5794 if (old_conn_state->crtc != crtc)
5795 continue;
5796
5797 intel_opregion_notify_encoder(encoder, false);
5798 if (encoder->disable)
5799 encoder->disable(encoder, old_crtc_state, old_conn_state);
5800 }
5801 }
5802
5803 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5804 struct intel_crtc_state *old_crtc_state,
5805 struct drm_atomic_state *old_state)
5806 {
5807 struct drm_connector_state *old_conn_state;
5808 struct drm_connector *conn;
5809 int i;
5810
5811 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5812 struct intel_encoder *encoder =
5813 to_intel_encoder(old_conn_state->best_encoder);
5814
5815 if (old_conn_state->crtc != crtc)
5816 continue;
5817
5818 if (encoder->post_disable)
5819 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5820 }
5821 }
5822
5823 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5824 struct intel_crtc_state *old_crtc_state,
5825 struct drm_atomic_state *old_state)
5826 {
5827 struct drm_connector_state *old_conn_state;
5828 struct drm_connector *conn;
5829 int i;
5830
5831 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5832 struct intel_encoder *encoder =
5833 to_intel_encoder(old_conn_state->best_encoder);
5834
5835 if (old_conn_state->crtc != crtc)
5836 continue;
5837
5838 if (encoder->post_pll_disable)
5839 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5840 }
5841 }
5842
5843 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5844 struct intel_crtc_state *crtc_state,
5845 struct drm_atomic_state *old_state)
5846 {
5847 struct drm_connector_state *conn_state;
5848 struct drm_connector *conn;
5849 int i;
5850
5851 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5852 struct intel_encoder *encoder =
5853 to_intel_encoder(conn_state->best_encoder);
5854
5855 if (conn_state->crtc != crtc)
5856 continue;
5857
5858 if (encoder->update_pipe)
5859 encoder->update_pipe(encoder, crtc_state, conn_state);
5860 }
5861 }
5862
5863 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
5864 {
5865 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5866 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
5867
5868 plane->disable_plane(plane, crtc_state);
5869 }
5870
5871 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5872 struct drm_atomic_state *old_state)
5873 {
5874 struct drm_crtc *crtc = pipe_config->base.crtc;
5875 struct drm_device *dev = crtc->dev;
5876 struct drm_i915_private *dev_priv = to_i915(dev);
5877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5878 int pipe = intel_crtc->pipe;
5879 struct intel_atomic_state *old_intel_state =
5880 to_intel_atomic_state(old_state);
5881
5882 if (WARN_ON(intel_crtc->active))
5883 return;
5884
5885 /*
5886 * Sometimes spurious CPU pipe underruns happen during FDI
5887 * training, at least with VGA+HDMI cloning. Suppress them.
5888 *
5889 * On ILK we get an occasional spurious CPU pipe underruns
5890 * between eDP port A enable and vdd enable. Also PCH port
5891 * enable seems to result in the occasional CPU pipe underrun.
5892 *
5893 * Spurious PCH underruns also occur during PCH enabling.
5894 */
5895 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5896 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5897
5898 if (pipe_config->has_pch_encoder)
5899 intel_prepare_shared_dpll(pipe_config);
5900
5901 if (intel_crtc_has_dp_encoder(pipe_config))
5902 intel_dp_set_m_n(pipe_config, M1_N1);
5903
5904 intel_set_pipe_timings(pipe_config);
5905 intel_set_pipe_src_size(pipe_config);
5906
5907 if (pipe_config->has_pch_encoder) {
5908 intel_cpu_transcoder_set_m_n(pipe_config,
5909 &pipe_config->fdi_m_n, NULL);
5910 }
5911
5912 ironlake_set_pipeconf(pipe_config);
5913
5914 intel_crtc->active = true;
5915
5916 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5917
5918 if (pipe_config->has_pch_encoder) {
5919 /* Note: FDI PLL enabling _must_ be done before we enable the
5920 * cpu pipes, hence this is separate from all the other fdi/pch
5921 * enabling. */
5922 ironlake_fdi_pll_enable(pipe_config);
5923 } else {
5924 assert_fdi_tx_disabled(dev_priv, pipe);
5925 assert_fdi_rx_disabled(dev_priv, pipe);
5926 }
5927
5928 ironlake_pfit_enable(pipe_config);
5929
5930 /*
5931 * On ILK+ LUT must be loaded before the pipe is running but with
5932 * clocks enabled
5933 */
5934 intel_color_load_luts(pipe_config);
5935 intel_color_commit(pipe_config);
5936 /* update DSPCNTR to configure gamma for pipe bottom color */
5937 intel_disable_primary_plane(pipe_config);
5938
5939 if (dev_priv->display.initial_watermarks != NULL)
5940 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5941 intel_enable_pipe(pipe_config);
5942
5943 if (pipe_config->has_pch_encoder)
5944 ironlake_pch_enable(old_intel_state, pipe_config);
5945
5946 assert_vblank_disabled(crtc);
5947 intel_crtc_vblank_on(pipe_config);
5948
5949 intel_encoders_enable(crtc, pipe_config, old_state);
5950
5951 if (HAS_PCH_CPT(dev_priv))
5952 cpt_verify_modeset(dev, intel_crtc->pipe);
5953
5954 /*
5955 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5956 * And a second vblank wait is needed at least on ILK with
5957 * some interlaced HDMI modes. Let's do the double wait always
5958 * in case there are more corner cases we don't know about.
5959 */
5960 if (pipe_config->has_pch_encoder) {
5961 intel_wait_for_vblank(dev_priv, pipe);
5962 intel_wait_for_vblank(dev_priv, pipe);
5963 }
5964 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5965 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5966 }
5967
5968 /* IPS only exists on ULT machines and is tied to pipe A. */
5969 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5970 {
5971 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5972 }
5973
5974 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5975 enum pipe pipe, bool apply)
5976 {
5977 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5978 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5979
5980 if (apply)
5981 val |= mask;
5982 else
5983 val &= ~mask;
5984
5985 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5986 }
5987
5988 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5989 {
5990 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5991 enum pipe pipe = crtc->pipe;
5992 u32 val;
5993
5994 val = MBUS_DBOX_A_CREDIT(2);
5995 val |= MBUS_DBOX_BW_CREDIT(1);
5996 val |= MBUS_DBOX_B_CREDIT(8);
5997
5998 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5999 }
6000
6001 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6002 struct drm_atomic_state *old_state)
6003 {
6004 struct drm_crtc *crtc = pipe_config->base.crtc;
6005 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6006 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6007 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6008 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6009 struct intel_atomic_state *old_intel_state =
6010 to_intel_atomic_state(old_state);
6011 bool psl_clkgate_wa;
6012
6013 if (WARN_ON(intel_crtc->active))
6014 return;
6015
6016 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6017
6018 if (pipe_config->shared_dpll)
6019 intel_enable_shared_dpll(pipe_config);
6020
6021 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6022
6023 if (intel_crtc_has_dp_encoder(pipe_config))
6024 intel_dp_set_m_n(pipe_config, M1_N1);
6025
6026 if (!transcoder_is_dsi(cpu_transcoder))
6027 intel_set_pipe_timings(pipe_config);
6028
6029 intel_set_pipe_src_size(pipe_config);
6030
6031 if (cpu_transcoder != TRANSCODER_EDP &&
6032 !transcoder_is_dsi(cpu_transcoder)) {
6033 I915_WRITE(PIPE_MULT(cpu_transcoder),
6034 pipe_config->pixel_multiplier - 1);
6035 }
6036
6037 if (pipe_config->has_pch_encoder) {
6038 intel_cpu_transcoder_set_m_n(pipe_config,
6039 &pipe_config->fdi_m_n, NULL);
6040 }
6041
6042 if (!transcoder_is_dsi(cpu_transcoder))
6043 haswell_set_pipeconf(pipe_config);
6044
6045 haswell_set_pipemisc(pipe_config);
6046
6047 intel_crtc->active = true;
6048
6049 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6050 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6051 pipe_config->pch_pfit.enabled;
6052 if (psl_clkgate_wa)
6053 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6054
6055 if (INTEL_GEN(dev_priv) >= 9)
6056 skylake_pfit_enable(pipe_config);
6057 else
6058 ironlake_pfit_enable(pipe_config);
6059
6060 /*
6061 * On ILK+ LUT must be loaded before the pipe is running but with
6062 * clocks enabled
6063 */
6064 intel_color_load_luts(pipe_config);
6065 intel_color_commit(pipe_config);
6066 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6067 if (INTEL_GEN(dev_priv) < 9)
6068 intel_disable_primary_plane(pipe_config);
6069
6070 if (INTEL_GEN(dev_priv) >= 11)
6071 icl_set_pipe_chicken(intel_crtc);
6072
6073 intel_ddi_set_pipe_settings(pipe_config);
6074 if (!transcoder_is_dsi(cpu_transcoder))
6075 intel_ddi_enable_transcoder_func(pipe_config);
6076
6077 if (dev_priv->display.initial_watermarks != NULL)
6078 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6079
6080 if (INTEL_GEN(dev_priv) >= 11)
6081 icl_pipe_mbus_enable(intel_crtc);
6082
6083 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6084 if (!transcoder_is_dsi(cpu_transcoder))
6085 intel_enable_pipe(pipe_config);
6086
6087 if (pipe_config->has_pch_encoder)
6088 lpt_pch_enable(old_intel_state, pipe_config);
6089
6090 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6091 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6092
6093 assert_vblank_disabled(crtc);
6094 intel_crtc_vblank_on(pipe_config);
6095
6096 intel_encoders_enable(crtc, pipe_config, old_state);
6097
6098 if (psl_clkgate_wa) {
6099 intel_wait_for_vblank(dev_priv, pipe);
6100 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6101 }
6102
6103 /* If we change the relative order between pipe/planes enabling, we need
6104 * to change the workaround. */
6105 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6106 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6107 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6108 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6109 }
6110 }
6111
6112 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6113 {
6114 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6115 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6116 enum pipe pipe = crtc->pipe;
6117
6118 /* To avoid upsetting the power well on haswell only disable the pfit if
6119 * it's in use. The hw state code will make sure we get this right. */
6120 if (old_crtc_state->pch_pfit.enabled) {
6121 I915_WRITE(PF_CTL(pipe), 0);
6122 I915_WRITE(PF_WIN_POS(pipe), 0);
6123 I915_WRITE(PF_WIN_SZ(pipe), 0);
6124 }
6125 }
6126
6127 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6128 struct drm_atomic_state *old_state)
6129 {
6130 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6131 struct drm_device *dev = crtc->dev;
6132 struct drm_i915_private *dev_priv = to_i915(dev);
6133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6134 int pipe = intel_crtc->pipe;
6135
6136 /*
6137 * Sometimes spurious CPU pipe underruns happen when the
6138 * pipe is already disabled, but FDI RX/TX is still enabled.
6139 * Happens at least with VGA+HDMI cloning. Suppress them.
6140 */
6141 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6142 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6143
6144 intel_encoders_disable(crtc, old_crtc_state, old_state);
6145
6146 drm_crtc_vblank_off(crtc);
6147 assert_vblank_disabled(crtc);
6148
6149 intel_disable_pipe(old_crtc_state);
6150
6151 ironlake_pfit_disable(old_crtc_state);
6152
6153 if (old_crtc_state->has_pch_encoder)
6154 ironlake_fdi_disable(crtc);
6155
6156 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6157
6158 if (old_crtc_state->has_pch_encoder) {
6159 ironlake_disable_pch_transcoder(dev_priv, pipe);
6160
6161 if (HAS_PCH_CPT(dev_priv)) {
6162 i915_reg_t reg;
6163 u32 temp;
6164
6165 /* disable TRANS_DP_CTL */
6166 reg = TRANS_DP_CTL(pipe);
6167 temp = I915_READ(reg);
6168 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6169 TRANS_DP_PORT_SEL_MASK);
6170 temp |= TRANS_DP_PORT_SEL_NONE;
6171 I915_WRITE(reg, temp);
6172
6173 /* disable DPLL_SEL */
6174 temp = I915_READ(PCH_DPLL_SEL);
6175 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6176 I915_WRITE(PCH_DPLL_SEL, temp);
6177 }
6178
6179 ironlake_fdi_pll_disable(intel_crtc);
6180 }
6181
6182 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6183 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6184 }
6185
6186 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6187 struct drm_atomic_state *old_state)
6188 {
6189 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6190 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6192 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6193
6194 intel_encoders_disable(crtc, old_crtc_state, old_state);
6195
6196 drm_crtc_vblank_off(crtc);
6197 assert_vblank_disabled(crtc);
6198
6199 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6200 if (!transcoder_is_dsi(cpu_transcoder))
6201 intel_disable_pipe(old_crtc_state);
6202
6203 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6204 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6205
6206 if (!transcoder_is_dsi(cpu_transcoder))
6207 intel_ddi_disable_transcoder_func(old_crtc_state);
6208
6209 intel_dsc_disable(old_crtc_state);
6210
6211 if (INTEL_GEN(dev_priv) >= 9)
6212 skylake_scaler_disable(intel_crtc);
6213 else
6214 ironlake_pfit_disable(old_crtc_state);
6215
6216 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6217
6218 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6219 }
6220
6221 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6222 {
6223 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6224 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6225
6226 if (!crtc_state->gmch_pfit.control)
6227 return;
6228
6229 /*
6230 * The panel fitter should only be adjusted whilst the pipe is disabled,
6231 * according to register description and PRM.
6232 */
6233 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6234 assert_pipe_disabled(dev_priv, crtc->pipe);
6235
6236 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6237 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6238
6239 /* Border color in case we don't scale up to the full screen. Black by
6240 * default, change to something else for debugging. */
6241 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6242 }
6243
6244 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6245 {
6246 if (port == PORT_NONE)
6247 return false;
6248
6249 if (IS_ELKHARTLAKE(dev_priv))
6250 return port <= PORT_C;
6251
6252 if (INTEL_GEN(dev_priv) >= 11)
6253 return port <= PORT_B;
6254
6255 return false;
6256 }
6257
6258 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6259 {
6260 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6261 return port >= PORT_C && port <= PORT_F;
6262
6263 return false;
6264 }
6265
6266 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6267 {
6268 if (!intel_port_is_tc(dev_priv, port))
6269 return PORT_TC_NONE;
6270
6271 return port - PORT_C;
6272 }
6273
6274 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6275 {
6276 switch (port) {
6277 case PORT_A:
6278 return POWER_DOMAIN_PORT_DDI_A_LANES;
6279 case PORT_B:
6280 return POWER_DOMAIN_PORT_DDI_B_LANES;
6281 case PORT_C:
6282 return POWER_DOMAIN_PORT_DDI_C_LANES;
6283 case PORT_D:
6284 return POWER_DOMAIN_PORT_DDI_D_LANES;
6285 case PORT_E:
6286 return POWER_DOMAIN_PORT_DDI_E_LANES;
6287 case PORT_F:
6288 return POWER_DOMAIN_PORT_DDI_F_LANES;
6289 default:
6290 MISSING_CASE(port);
6291 return POWER_DOMAIN_PORT_OTHER;
6292 }
6293 }
6294
6295 enum intel_display_power_domain
6296 intel_aux_power_domain(struct intel_digital_port *dig_port)
6297 {
6298 switch (dig_port->aux_ch) {
6299 case AUX_CH_A:
6300 return POWER_DOMAIN_AUX_A;
6301 case AUX_CH_B:
6302 return POWER_DOMAIN_AUX_B;
6303 case AUX_CH_C:
6304 return POWER_DOMAIN_AUX_C;
6305 case AUX_CH_D:
6306 return POWER_DOMAIN_AUX_D;
6307 case AUX_CH_E:
6308 return POWER_DOMAIN_AUX_E;
6309 case AUX_CH_F:
6310 return POWER_DOMAIN_AUX_F;
6311 default:
6312 MISSING_CASE(dig_port->aux_ch);
6313 return POWER_DOMAIN_AUX_A;
6314 }
6315 }
6316
6317 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6318 struct intel_crtc_state *crtc_state)
6319 {
6320 struct drm_device *dev = crtc->dev;
6321 struct drm_i915_private *dev_priv = to_i915(dev);
6322 struct drm_encoder *encoder;
6323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6324 enum pipe pipe = intel_crtc->pipe;
6325 u64 mask;
6326 enum transcoder transcoder = crtc_state->cpu_transcoder;
6327
6328 if (!crtc_state->base.active)
6329 return 0;
6330
6331 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6332 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6333 if (crtc_state->pch_pfit.enabled ||
6334 crtc_state->pch_pfit.force_thru)
6335 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6336
6337 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6338 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6339
6340 mask |= BIT_ULL(intel_encoder->power_domain);
6341 }
6342
6343 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6344 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6345
6346 if (crtc_state->shared_dpll)
6347 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6348
6349 return mask;
6350 }
6351
6352 static u64
6353 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6354 struct intel_crtc_state *crtc_state)
6355 {
6356 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6358 enum intel_display_power_domain domain;
6359 u64 domains, new_domains, old_domains;
6360
6361 old_domains = intel_crtc->enabled_power_domains;
6362 intel_crtc->enabled_power_domains = new_domains =
6363 get_crtc_power_domains(crtc, crtc_state);
6364
6365 domains = new_domains & ~old_domains;
6366
6367 for_each_power_domain(domain, domains)
6368 intel_display_power_get(dev_priv, domain);
6369
6370 return old_domains & ~new_domains;
6371 }
6372
6373 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6374 u64 domains)
6375 {
6376 enum intel_display_power_domain domain;
6377
6378 for_each_power_domain(domain, domains)
6379 intel_display_power_put_unchecked(dev_priv, domain);
6380 }
6381
6382 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6383 struct drm_atomic_state *old_state)
6384 {
6385 struct intel_atomic_state *old_intel_state =
6386 to_intel_atomic_state(old_state);
6387 struct drm_crtc *crtc = pipe_config->base.crtc;
6388 struct drm_device *dev = crtc->dev;
6389 struct drm_i915_private *dev_priv = to_i915(dev);
6390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6391 int pipe = intel_crtc->pipe;
6392
6393 if (WARN_ON(intel_crtc->active))
6394 return;
6395
6396 if (intel_crtc_has_dp_encoder(pipe_config))
6397 intel_dp_set_m_n(pipe_config, M1_N1);
6398
6399 intel_set_pipe_timings(pipe_config);
6400 intel_set_pipe_src_size(pipe_config);
6401
6402 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6403 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6404 I915_WRITE(CHV_CANVAS(pipe), 0);
6405 }
6406
6407 i9xx_set_pipeconf(pipe_config);
6408
6409 intel_crtc->active = true;
6410
6411 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6412
6413 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6414
6415 if (IS_CHERRYVIEW(dev_priv)) {
6416 chv_prepare_pll(intel_crtc, pipe_config);
6417 chv_enable_pll(intel_crtc, pipe_config);
6418 } else {
6419 vlv_prepare_pll(intel_crtc, pipe_config);
6420 vlv_enable_pll(intel_crtc, pipe_config);
6421 }
6422
6423 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6424
6425 i9xx_pfit_enable(pipe_config);
6426
6427 intel_color_load_luts(pipe_config);
6428 intel_color_commit(pipe_config);
6429 /* update DSPCNTR to configure gamma for pipe bottom color */
6430 intel_disable_primary_plane(pipe_config);
6431
6432 dev_priv->display.initial_watermarks(old_intel_state,
6433 pipe_config);
6434 intel_enable_pipe(pipe_config);
6435
6436 assert_vblank_disabled(crtc);
6437 intel_crtc_vblank_on(pipe_config);
6438
6439 intel_encoders_enable(crtc, pipe_config, old_state);
6440 }
6441
6442 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6443 {
6444 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6445 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6446
6447 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6448 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6449 }
6450
6451 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6452 struct drm_atomic_state *old_state)
6453 {
6454 struct intel_atomic_state *old_intel_state =
6455 to_intel_atomic_state(old_state);
6456 struct drm_crtc *crtc = pipe_config->base.crtc;
6457 struct drm_device *dev = crtc->dev;
6458 struct drm_i915_private *dev_priv = to_i915(dev);
6459 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6460 enum pipe pipe = intel_crtc->pipe;
6461
6462 if (WARN_ON(intel_crtc->active))
6463 return;
6464
6465 i9xx_set_pll_dividers(pipe_config);
6466
6467 if (intel_crtc_has_dp_encoder(pipe_config))
6468 intel_dp_set_m_n(pipe_config, M1_N1);
6469
6470 intel_set_pipe_timings(pipe_config);
6471 intel_set_pipe_src_size(pipe_config);
6472
6473 i9xx_set_pipeconf(pipe_config);
6474
6475 intel_crtc->active = true;
6476
6477 if (!IS_GEN(dev_priv, 2))
6478 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6479
6480 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6481
6482 i9xx_enable_pll(intel_crtc, pipe_config);
6483
6484 i9xx_pfit_enable(pipe_config);
6485
6486 intel_color_load_luts(pipe_config);
6487 intel_color_commit(pipe_config);
6488 /* update DSPCNTR to configure gamma for pipe bottom color */
6489 intel_disable_primary_plane(pipe_config);
6490
6491 if (dev_priv->display.initial_watermarks != NULL)
6492 dev_priv->display.initial_watermarks(old_intel_state,
6493 pipe_config);
6494 else
6495 intel_update_watermarks(intel_crtc);
6496 intel_enable_pipe(pipe_config);
6497
6498 assert_vblank_disabled(crtc);
6499 intel_crtc_vblank_on(pipe_config);
6500
6501 intel_encoders_enable(crtc, pipe_config, old_state);
6502 }
6503
6504 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6505 {
6506 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6508
6509 if (!old_crtc_state->gmch_pfit.control)
6510 return;
6511
6512 assert_pipe_disabled(dev_priv, crtc->pipe);
6513
6514 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6515 I915_READ(PFIT_CONTROL));
6516 I915_WRITE(PFIT_CONTROL, 0);
6517 }
6518
6519 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6520 struct drm_atomic_state *old_state)
6521 {
6522 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6523 struct drm_device *dev = crtc->dev;
6524 struct drm_i915_private *dev_priv = to_i915(dev);
6525 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6526 int pipe = intel_crtc->pipe;
6527
6528 /*
6529 * On gen2 planes are double buffered but the pipe isn't, so we must
6530 * wait for planes to fully turn off before disabling the pipe.
6531 */
6532 if (IS_GEN(dev_priv, 2))
6533 intel_wait_for_vblank(dev_priv, pipe);
6534
6535 intel_encoders_disable(crtc, old_crtc_state, old_state);
6536
6537 drm_crtc_vblank_off(crtc);
6538 assert_vblank_disabled(crtc);
6539
6540 intel_disable_pipe(old_crtc_state);
6541
6542 i9xx_pfit_disable(old_crtc_state);
6543
6544 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6545
6546 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6547 if (IS_CHERRYVIEW(dev_priv))
6548 chv_disable_pll(dev_priv, pipe);
6549 else if (IS_VALLEYVIEW(dev_priv))
6550 vlv_disable_pll(dev_priv, pipe);
6551 else
6552 i9xx_disable_pll(old_crtc_state);
6553 }
6554
6555 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6556
6557 if (!IS_GEN(dev_priv, 2))
6558 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6559
6560 if (!dev_priv->display.initial_watermarks)
6561 intel_update_watermarks(intel_crtc);
6562
6563 /* clock the pipe down to 640x480@60 to potentially save power */
6564 if (IS_I830(dev_priv))
6565 i830_enable_pipe(dev_priv, pipe);
6566 }
6567
6568 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6569 struct drm_modeset_acquire_ctx *ctx)
6570 {
6571 struct intel_encoder *encoder;
6572 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6573 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6574 enum intel_display_power_domain domain;
6575 struct intel_plane *plane;
6576 u64 domains;
6577 struct drm_atomic_state *state;
6578 struct intel_crtc_state *crtc_state;
6579 int ret;
6580
6581 if (!intel_crtc->active)
6582 return;
6583
6584 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6585 const struct intel_plane_state *plane_state =
6586 to_intel_plane_state(plane->base.state);
6587
6588 if (plane_state->base.visible)
6589 intel_plane_disable_noatomic(intel_crtc, plane);
6590 }
6591
6592 state = drm_atomic_state_alloc(crtc->dev);
6593 if (!state) {
6594 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6595 crtc->base.id, crtc->name);
6596 return;
6597 }
6598
6599 state->acquire_ctx = ctx;
6600
6601 /* Everything's already locked, -EDEADLK can't happen. */
6602 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6603 ret = drm_atomic_add_affected_connectors(state, crtc);
6604
6605 WARN_ON(IS_ERR(crtc_state) || ret);
6606
6607 dev_priv->display.crtc_disable(crtc_state, state);
6608
6609 drm_atomic_state_put(state);
6610
6611 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6612 crtc->base.id, crtc->name);
6613
6614 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6615 crtc->state->active = false;
6616 intel_crtc->active = false;
6617 crtc->enabled = false;
6618 crtc->state->connector_mask = 0;
6619 crtc->state->encoder_mask = 0;
6620
6621 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6622 encoder->base.crtc = NULL;
6623
6624 intel_fbc_disable(intel_crtc);
6625 intel_update_watermarks(intel_crtc);
6626 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6627
6628 domains = intel_crtc->enabled_power_domains;
6629 for_each_power_domain(domain, domains)
6630 intel_display_power_put_unchecked(dev_priv, domain);
6631 intel_crtc->enabled_power_domains = 0;
6632
6633 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6634 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6635 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6636 }
6637
6638 /*
6639 * turn all crtc's off, but do not adjust state
6640 * This has to be paired with a call to intel_modeset_setup_hw_state.
6641 */
6642 int intel_display_suspend(struct drm_device *dev)
6643 {
6644 struct drm_i915_private *dev_priv = to_i915(dev);
6645 struct drm_atomic_state *state;
6646 int ret;
6647
6648 state = drm_atomic_helper_suspend(dev);
6649 ret = PTR_ERR_OR_ZERO(state);
6650 if (ret)
6651 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6652 else
6653 dev_priv->modeset_restore_state = state;
6654 return ret;
6655 }
6656
6657 void intel_encoder_destroy(struct drm_encoder *encoder)
6658 {
6659 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6660
6661 drm_encoder_cleanup(encoder);
6662 kfree(intel_encoder);
6663 }
6664
6665 /* Cross check the actual hw state with our own modeset state tracking (and it's
6666 * internal consistency). */
6667 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6668 struct drm_connector_state *conn_state)
6669 {
6670 struct intel_connector *connector = to_intel_connector(conn_state->connector);
6671
6672 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6673 connector->base.base.id,
6674 connector->base.name);
6675
6676 if (connector->get_hw_state(connector)) {
6677 struct intel_encoder *encoder = connector->encoder;
6678
6679 I915_STATE_WARN(!crtc_state,
6680 "connector enabled without attached crtc\n");
6681
6682 if (!crtc_state)
6683 return;
6684
6685 I915_STATE_WARN(!crtc_state->active,
6686 "connector is active, but attached crtc isn't\n");
6687
6688 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6689 return;
6690
6691 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6692 "atomic encoder doesn't match attached encoder\n");
6693
6694 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6695 "attached encoder crtc differs from connector crtc\n");
6696 } else {
6697 I915_STATE_WARN(crtc_state && crtc_state->active,
6698 "attached crtc is active, but connector isn't\n");
6699 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6700 "best encoder set without crtc!\n");
6701 }
6702 }
6703
6704 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6705 {
6706 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6707 return crtc_state->fdi_lanes;
6708
6709 return 0;
6710 }
6711
6712 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6713 struct intel_crtc_state *pipe_config)
6714 {
6715 struct drm_i915_private *dev_priv = to_i915(dev);
6716 struct drm_atomic_state *state = pipe_config->base.state;
6717 struct intel_crtc *other_crtc;
6718 struct intel_crtc_state *other_crtc_state;
6719
6720 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6721 pipe_name(pipe), pipe_config->fdi_lanes);
6722 if (pipe_config->fdi_lanes > 4) {
6723 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6724 pipe_name(pipe), pipe_config->fdi_lanes);
6725 return -EINVAL;
6726 }
6727
6728 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6729 if (pipe_config->fdi_lanes > 2) {
6730 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6731 pipe_config->fdi_lanes);
6732 return -EINVAL;
6733 } else {
6734 return 0;
6735 }
6736 }
6737
6738 if (INTEL_INFO(dev_priv)->num_pipes == 2)
6739 return 0;
6740
6741 /* Ivybridge 3 pipe is really complicated */
6742 switch (pipe) {
6743 case PIPE_A:
6744 return 0;
6745 case PIPE_B:
6746 if (pipe_config->fdi_lanes <= 2)
6747 return 0;
6748
6749 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6750 other_crtc_state =
6751 intel_atomic_get_crtc_state(state, other_crtc);
6752 if (IS_ERR(other_crtc_state))
6753 return PTR_ERR(other_crtc_state);
6754
6755 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6756 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6757 pipe_name(pipe), pipe_config->fdi_lanes);
6758 return -EINVAL;
6759 }
6760 return 0;
6761 case PIPE_C:
6762 if (pipe_config->fdi_lanes > 2) {
6763 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6764 pipe_name(pipe), pipe_config->fdi_lanes);
6765 return -EINVAL;
6766 }
6767
6768 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6769 other_crtc_state =
6770 intel_atomic_get_crtc_state(state, other_crtc);
6771 if (IS_ERR(other_crtc_state))
6772 return PTR_ERR(other_crtc_state);
6773
6774 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6775 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6776 return -EINVAL;
6777 }
6778 return 0;
6779 default:
6780 BUG();
6781 }
6782 }
6783
6784 #define RETRY 1
6785 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6786 struct intel_crtc_state *pipe_config)
6787 {
6788 struct drm_device *dev = intel_crtc->base.dev;
6789 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6790 int lane, link_bw, fdi_dotclock, ret;
6791 bool needs_recompute = false;
6792
6793 retry:
6794 /* FDI is a binary signal running at ~2.7GHz, encoding
6795 * each output octet as 10 bits. The actual frequency
6796 * is stored as a divider into a 100MHz clock, and the
6797 * mode pixel clock is stored in units of 1KHz.
6798 * Hence the bw of each lane in terms of the mode signal
6799 * is:
6800 */
6801 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6802
6803 fdi_dotclock = adjusted_mode->crtc_clock;
6804
6805 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6806 pipe_config->pipe_bpp);
6807
6808 pipe_config->fdi_lanes = lane;
6809
6810 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6811 link_bw, &pipe_config->fdi_m_n, false);
6812
6813 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6814 if (ret == -EDEADLK)
6815 return ret;
6816
6817 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6818 pipe_config->pipe_bpp -= 2*3;
6819 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6820 pipe_config->pipe_bpp);
6821 needs_recompute = true;
6822 pipe_config->bw_constrained = true;
6823
6824 goto retry;
6825 }
6826
6827 if (needs_recompute)
6828 return RETRY;
6829
6830 return ret;
6831 }
6832
6833 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6834 {
6835 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6836 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6837
6838 /* IPS only exists on ULT machines and is tied to pipe A. */
6839 if (!hsw_crtc_supports_ips(crtc))
6840 return false;
6841
6842 if (!i915_modparams.enable_ips)
6843 return false;
6844
6845 if (crtc_state->pipe_bpp > 24)
6846 return false;
6847
6848 /*
6849 * We compare against max which means we must take
6850 * the increased cdclk requirement into account when
6851 * calculating the new cdclk.
6852 *
6853 * Should measure whether using a lower cdclk w/o IPS
6854 */
6855 if (IS_BROADWELL(dev_priv) &&
6856 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6857 return false;
6858
6859 return true;
6860 }
6861
6862 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6863 {
6864 struct drm_i915_private *dev_priv =
6865 to_i915(crtc_state->base.crtc->dev);
6866 struct intel_atomic_state *intel_state =
6867 to_intel_atomic_state(crtc_state->base.state);
6868
6869 if (!hsw_crtc_state_ips_capable(crtc_state))
6870 return false;
6871
6872 /*
6873 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
6874 * enabled and disabled dynamically based on package C states,
6875 * user space can't make reliable use of the CRCs, so let's just
6876 * completely disable it.
6877 */
6878 if (crtc_state->crc_enabled)
6879 return false;
6880
6881 /* IPS should be fine as long as at least one plane is enabled. */
6882 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6883 return false;
6884
6885 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6886 if (IS_BROADWELL(dev_priv) &&
6887 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6888 return false;
6889
6890 return true;
6891 }
6892
6893 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6894 {
6895 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6896
6897 /* GDG double wide on either pipe, otherwise pipe A only */
6898 return INTEL_GEN(dev_priv) < 4 &&
6899 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6900 }
6901
6902 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6903 {
6904 u32 pixel_rate;
6905
6906 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6907
6908 /*
6909 * We only use IF-ID interlacing. If we ever use
6910 * PF-ID we'll need to adjust the pixel_rate here.
6911 */
6912
6913 if (pipe_config->pch_pfit.enabled) {
6914 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6915 u32 pfit_size = pipe_config->pch_pfit.size;
6916
6917 pipe_w = pipe_config->pipe_src_w;
6918 pipe_h = pipe_config->pipe_src_h;
6919
6920 pfit_w = (pfit_size >> 16) & 0xFFFF;
6921 pfit_h = pfit_size & 0xFFFF;
6922 if (pipe_w < pfit_w)
6923 pipe_w = pfit_w;
6924 if (pipe_h < pfit_h)
6925 pipe_h = pfit_h;
6926
6927 if (WARN_ON(!pfit_w || !pfit_h))
6928 return pixel_rate;
6929
6930 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
6931 pfit_w * pfit_h);
6932 }
6933
6934 return pixel_rate;
6935 }
6936
6937 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6938 {
6939 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6940
6941 if (HAS_GMCH(dev_priv))
6942 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6943 crtc_state->pixel_rate =
6944 crtc_state->base.adjusted_mode.crtc_clock;
6945 else
6946 crtc_state->pixel_rate =
6947 ilk_pipe_pixel_rate(crtc_state);
6948 }
6949
6950 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6951 struct intel_crtc_state *pipe_config)
6952 {
6953 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6954 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6955 int clock_limit = dev_priv->max_dotclk_freq;
6956
6957 if (INTEL_GEN(dev_priv) < 4) {
6958 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6959
6960 /*
6961 * Enable double wide mode when the dot clock
6962 * is > 90% of the (display) core speed.
6963 */
6964 if (intel_crtc_supports_double_wide(crtc) &&
6965 adjusted_mode->crtc_clock > clock_limit) {
6966 clock_limit = dev_priv->max_dotclk_freq;
6967 pipe_config->double_wide = true;
6968 }
6969 }
6970
6971 if (adjusted_mode->crtc_clock > clock_limit) {
6972 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6973 adjusted_mode->crtc_clock, clock_limit,
6974 yesno(pipe_config->double_wide));
6975 return -EINVAL;
6976 }
6977
6978 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6979 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6980 pipe_config->base.ctm) {
6981 /*
6982 * There is only one pipe CSC unit per pipe, and we need that
6983 * for output conversion from RGB->YCBCR. So if CTM is already
6984 * applied we can't support YCBCR420 output.
6985 */
6986 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6987 return -EINVAL;
6988 }
6989
6990 /*
6991 * Pipe horizontal size must be even in:
6992 * - DVO ganged mode
6993 * - LVDS dual channel mode
6994 * - Double wide pipe
6995 */
6996 if (pipe_config->pipe_src_w & 1) {
6997 if (pipe_config->double_wide) {
6998 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6999 return -EINVAL;
7000 }
7001
7002 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7003 intel_is_dual_link_lvds(dev_priv)) {
7004 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7005 return -EINVAL;
7006 }
7007 }
7008
7009 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7010 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7011 */
7012 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7013 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7014 return -EINVAL;
7015
7016 intel_crtc_compute_pixel_rate(pipe_config);
7017
7018 if (pipe_config->has_pch_encoder)
7019 return ironlake_fdi_compute_config(crtc, pipe_config);
7020
7021 return 0;
7022 }
7023
7024 static void
7025 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7026 {
7027 while (*num > DATA_LINK_M_N_MASK ||
7028 *den > DATA_LINK_M_N_MASK) {
7029 *num >>= 1;
7030 *den >>= 1;
7031 }
7032 }
7033
7034 static void compute_m_n(unsigned int m, unsigned int n,
7035 u32 *ret_m, u32 *ret_n,
7036 bool constant_n)
7037 {
7038 /*
7039 * Several DP dongles in particular seem to be fussy about
7040 * too large link M/N values. Give N value as 0x8000 that
7041 * should be acceptable by specific devices. 0x8000 is the
7042 * specified fixed N value for asynchronous clock mode,
7043 * which the devices expect also in synchronous clock mode.
7044 */
7045 if (constant_n)
7046 *ret_n = 0x8000;
7047 else
7048 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7049
7050 *ret_m = div_u64((u64)m * *ret_n, n);
7051 intel_reduce_m_n_ratio(ret_m, ret_n);
7052 }
7053
7054 void
7055 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7056 int pixel_clock, int link_clock,
7057 struct intel_link_m_n *m_n,
7058 bool constant_n)
7059 {
7060 m_n->tu = 64;
7061
7062 compute_m_n(bits_per_pixel * pixel_clock,
7063 link_clock * nlanes * 8,
7064 &m_n->gmch_m, &m_n->gmch_n,
7065 constant_n);
7066
7067 compute_m_n(pixel_clock, link_clock,
7068 &m_n->link_m, &m_n->link_n,
7069 constant_n);
7070 }
7071
7072 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7073 {
7074 if (i915_modparams.panel_use_ssc >= 0)
7075 return i915_modparams.panel_use_ssc != 0;
7076 return dev_priv->vbt.lvds_use_ssc
7077 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7078 }
7079
7080 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7081 {
7082 return (1 << dpll->n) << 16 | dpll->m2;
7083 }
7084
7085 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7086 {
7087 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7088 }
7089
7090 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7091 struct intel_crtc_state *crtc_state,
7092 struct dpll *reduced_clock)
7093 {
7094 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7095 u32 fp, fp2 = 0;
7096
7097 if (IS_PINEVIEW(dev_priv)) {
7098 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7099 if (reduced_clock)
7100 fp2 = pnv_dpll_compute_fp(reduced_clock);
7101 } else {
7102 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7103 if (reduced_clock)
7104 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7105 }
7106
7107 crtc_state->dpll_hw_state.fp0 = fp;
7108
7109 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7110 reduced_clock) {
7111 crtc_state->dpll_hw_state.fp1 = fp2;
7112 } else {
7113 crtc_state->dpll_hw_state.fp1 = fp;
7114 }
7115 }
7116
7117 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7118 pipe)
7119 {
7120 u32 reg_val;
7121
7122 /*
7123 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7124 * and set it to a reasonable value instead.
7125 */
7126 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7127 reg_val &= 0xffffff00;
7128 reg_val |= 0x00000030;
7129 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7130
7131 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7132 reg_val &= 0x00ffffff;
7133 reg_val |= 0x8c000000;
7134 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7135
7136 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7137 reg_val &= 0xffffff00;
7138 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7139
7140 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7141 reg_val &= 0x00ffffff;
7142 reg_val |= 0xb0000000;
7143 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7144 }
7145
7146 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7147 const struct intel_link_m_n *m_n)
7148 {
7149 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7150 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7151 enum pipe pipe = crtc->pipe;
7152
7153 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7154 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7155 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7156 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7157 }
7158
7159 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7160 enum transcoder transcoder)
7161 {
7162 if (IS_HASWELL(dev_priv))
7163 return transcoder == TRANSCODER_EDP;
7164
7165 /*
7166 * Strictly speaking some registers are available before
7167 * gen7, but we only support DRRS on gen7+
7168 */
7169 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7170 }
7171
7172 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7173 const struct intel_link_m_n *m_n,
7174 const struct intel_link_m_n *m2_n2)
7175 {
7176 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7177 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7178 enum pipe pipe = crtc->pipe;
7179 enum transcoder transcoder = crtc_state->cpu_transcoder;
7180
7181 if (INTEL_GEN(dev_priv) >= 5) {
7182 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7183 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7184 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7185 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7186 /*
7187 * M2_N2 registers are set only if DRRS is supported
7188 * (to make sure the registers are not unnecessarily accessed).
7189 */
7190 if (m2_n2 && crtc_state->has_drrs &&
7191 transcoder_has_m2_n2(dev_priv, transcoder)) {
7192 I915_WRITE(PIPE_DATA_M2(transcoder),
7193 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7194 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7195 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7196 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7197 }
7198 } else {
7199 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7200 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7201 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7202 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7203 }
7204 }
7205
7206 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7207 {
7208 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7209
7210 if (m_n == M1_N1) {
7211 dp_m_n = &crtc_state->dp_m_n;
7212 dp_m2_n2 = &crtc_state->dp_m2_n2;
7213 } else if (m_n == M2_N2) {
7214
7215 /*
7216 * M2_N2 registers are not supported. Hence m2_n2 divider value
7217 * needs to be programmed into M1_N1.
7218 */
7219 dp_m_n = &crtc_state->dp_m2_n2;
7220 } else {
7221 DRM_ERROR("Unsupported divider value\n");
7222 return;
7223 }
7224
7225 if (crtc_state->has_pch_encoder)
7226 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7227 else
7228 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7229 }
7230
7231 static void vlv_compute_dpll(struct intel_crtc *crtc,
7232 struct intel_crtc_state *pipe_config)
7233 {
7234 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7235 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7236 if (crtc->pipe != PIPE_A)
7237 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7238
7239 /* DPLL not used with DSI, but still need the rest set up */
7240 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7241 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7242 DPLL_EXT_BUFFER_ENABLE_VLV;
7243
7244 pipe_config->dpll_hw_state.dpll_md =
7245 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7246 }
7247
7248 static void chv_compute_dpll(struct intel_crtc *crtc,
7249 struct intel_crtc_state *pipe_config)
7250 {
7251 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7252 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7253 if (crtc->pipe != PIPE_A)
7254 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7255
7256 /* DPLL not used with DSI, but still need the rest set up */
7257 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7258 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7259
7260 pipe_config->dpll_hw_state.dpll_md =
7261 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7262 }
7263
7264 static void vlv_prepare_pll(struct intel_crtc *crtc,
7265 const struct intel_crtc_state *pipe_config)
7266 {
7267 struct drm_device *dev = crtc->base.dev;
7268 struct drm_i915_private *dev_priv = to_i915(dev);
7269 enum pipe pipe = crtc->pipe;
7270 u32 mdiv;
7271 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7272 u32 coreclk, reg_val;
7273
7274 /* Enable Refclk */
7275 I915_WRITE(DPLL(pipe),
7276 pipe_config->dpll_hw_state.dpll &
7277 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7278
7279 /* No need to actually set up the DPLL with DSI */
7280 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7281 return;
7282
7283 vlv_dpio_get(dev_priv);
7284
7285 bestn = pipe_config->dpll.n;
7286 bestm1 = pipe_config->dpll.m1;
7287 bestm2 = pipe_config->dpll.m2;
7288 bestp1 = pipe_config->dpll.p1;
7289 bestp2 = pipe_config->dpll.p2;
7290
7291 /* See eDP HDMI DPIO driver vbios notes doc */
7292
7293 /* PLL B needs special handling */
7294 if (pipe == PIPE_B)
7295 vlv_pllb_recal_opamp(dev_priv, pipe);
7296
7297 /* Set up Tx target for periodic Rcomp update */
7298 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7299
7300 /* Disable target IRef on PLL */
7301 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7302 reg_val &= 0x00ffffff;
7303 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7304
7305 /* Disable fast lock */
7306 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7307
7308 /* Set idtafcrecal before PLL is enabled */
7309 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7310 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7311 mdiv |= ((bestn << DPIO_N_SHIFT));
7312 mdiv |= (1 << DPIO_K_SHIFT);
7313
7314 /*
7315 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7316 * but we don't support that).
7317 * Note: don't use the DAC post divider as it seems unstable.
7318 */
7319 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7320 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7321
7322 mdiv |= DPIO_ENABLE_CALIBRATION;
7323 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7324
7325 /* Set HBR and RBR LPF coefficients */
7326 if (pipe_config->port_clock == 162000 ||
7327 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7328 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7329 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7330 0x009f0003);
7331 else
7332 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7333 0x00d0000f);
7334
7335 if (intel_crtc_has_dp_encoder(pipe_config)) {
7336 /* Use SSC source */
7337 if (pipe == PIPE_A)
7338 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7339 0x0df40000);
7340 else
7341 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7342 0x0df70000);
7343 } else { /* HDMI or VGA */
7344 /* Use bend source */
7345 if (pipe == PIPE_A)
7346 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7347 0x0df70000);
7348 else
7349 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7350 0x0df40000);
7351 }
7352
7353 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7354 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7355 if (intel_crtc_has_dp_encoder(pipe_config))
7356 coreclk |= 0x01000000;
7357 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7358
7359 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7360
7361 vlv_dpio_put(dev_priv);
7362 }
7363
7364 static void chv_prepare_pll(struct intel_crtc *crtc,
7365 const struct intel_crtc_state *pipe_config)
7366 {
7367 struct drm_device *dev = crtc->base.dev;
7368 struct drm_i915_private *dev_priv = to_i915(dev);
7369 enum pipe pipe = crtc->pipe;
7370 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7371 u32 loopfilter, tribuf_calcntr;
7372 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7373 u32 dpio_val;
7374 int vco;
7375
7376 /* Enable Refclk and SSC */
7377 I915_WRITE(DPLL(pipe),
7378 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7379
7380 /* No need to actually set up the DPLL with DSI */
7381 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7382 return;
7383
7384 bestn = pipe_config->dpll.n;
7385 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7386 bestm1 = pipe_config->dpll.m1;
7387 bestm2 = pipe_config->dpll.m2 >> 22;
7388 bestp1 = pipe_config->dpll.p1;
7389 bestp2 = pipe_config->dpll.p2;
7390 vco = pipe_config->dpll.vco;
7391 dpio_val = 0;
7392 loopfilter = 0;
7393
7394 vlv_dpio_get(dev_priv);
7395
7396 /* p1 and p2 divider */
7397 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7398 5 << DPIO_CHV_S1_DIV_SHIFT |
7399 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7400 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7401 1 << DPIO_CHV_K_DIV_SHIFT);
7402
7403 /* Feedback post-divider - m2 */
7404 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7405
7406 /* Feedback refclk divider - n and m1 */
7407 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7408 DPIO_CHV_M1_DIV_BY_2 |
7409 1 << DPIO_CHV_N_DIV_SHIFT);
7410
7411 /* M2 fraction division */
7412 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7413
7414 /* M2 fraction division enable */
7415 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7416 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7417 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7418 if (bestm2_frac)
7419 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7420 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7421
7422 /* Program digital lock detect threshold */
7423 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7424 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7425 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7426 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7427 if (!bestm2_frac)
7428 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7429 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7430
7431 /* Loop filter */
7432 if (vco == 5400000) {
7433 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7434 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7435 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7436 tribuf_calcntr = 0x9;
7437 } else if (vco <= 6200000) {
7438 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7439 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7440 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7441 tribuf_calcntr = 0x9;
7442 } else if (vco <= 6480000) {
7443 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7444 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7445 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7446 tribuf_calcntr = 0x8;
7447 } else {
7448 /* Not supported. Apply the same limits as in the max case */
7449 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7450 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7451 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7452 tribuf_calcntr = 0;
7453 }
7454 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7455
7456 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7457 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7458 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7459 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7460
7461 /* AFC Recal */
7462 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7463 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7464 DPIO_AFC_RECAL);
7465
7466 vlv_dpio_put(dev_priv);
7467 }
7468
7469 /**
7470 * vlv_force_pll_on - forcibly enable just the PLL
7471 * @dev_priv: i915 private structure
7472 * @pipe: pipe PLL to enable
7473 * @dpll: PLL configuration
7474 *
7475 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7476 * in cases where we need the PLL enabled even when @pipe is not going to
7477 * be enabled.
7478 */
7479 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7480 const struct dpll *dpll)
7481 {
7482 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7483 struct intel_crtc_state *pipe_config;
7484
7485 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7486 if (!pipe_config)
7487 return -ENOMEM;
7488
7489 pipe_config->base.crtc = &crtc->base;
7490 pipe_config->pixel_multiplier = 1;
7491 pipe_config->dpll = *dpll;
7492
7493 if (IS_CHERRYVIEW(dev_priv)) {
7494 chv_compute_dpll(crtc, pipe_config);
7495 chv_prepare_pll(crtc, pipe_config);
7496 chv_enable_pll(crtc, pipe_config);
7497 } else {
7498 vlv_compute_dpll(crtc, pipe_config);
7499 vlv_prepare_pll(crtc, pipe_config);
7500 vlv_enable_pll(crtc, pipe_config);
7501 }
7502
7503 kfree(pipe_config);
7504
7505 return 0;
7506 }
7507
7508 /**
7509 * vlv_force_pll_off - forcibly disable just the PLL
7510 * @dev_priv: i915 private structure
7511 * @pipe: pipe PLL to disable
7512 *
7513 * Disable the PLL for @pipe. To be used in cases where we need
7514 * the PLL enabled even when @pipe is not going to be enabled.
7515 */
7516 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7517 {
7518 if (IS_CHERRYVIEW(dev_priv))
7519 chv_disable_pll(dev_priv, pipe);
7520 else
7521 vlv_disable_pll(dev_priv, pipe);
7522 }
7523
7524 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7525 struct intel_crtc_state *crtc_state,
7526 struct dpll *reduced_clock)
7527 {
7528 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7529 u32 dpll;
7530 struct dpll *clock = &crtc_state->dpll;
7531
7532 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7533
7534 dpll = DPLL_VGA_MODE_DIS;
7535
7536 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7537 dpll |= DPLLB_MODE_LVDS;
7538 else
7539 dpll |= DPLLB_MODE_DAC_SERIAL;
7540
7541 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7542 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7543 dpll |= (crtc_state->pixel_multiplier - 1)
7544 << SDVO_MULTIPLIER_SHIFT_HIRES;
7545 }
7546
7547 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7548 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7549 dpll |= DPLL_SDVO_HIGH_SPEED;
7550
7551 if (intel_crtc_has_dp_encoder(crtc_state))
7552 dpll |= DPLL_SDVO_HIGH_SPEED;
7553
7554 /* compute bitmask from p1 value */
7555 if (IS_PINEVIEW(dev_priv))
7556 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7557 else {
7558 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7559 if (IS_G4X(dev_priv) && reduced_clock)
7560 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7561 }
7562 switch (clock->p2) {
7563 case 5:
7564 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7565 break;
7566 case 7:
7567 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7568 break;
7569 case 10:
7570 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7571 break;
7572 case 14:
7573 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7574 break;
7575 }
7576 if (INTEL_GEN(dev_priv) >= 4)
7577 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7578
7579 if (crtc_state->sdvo_tv_clock)
7580 dpll |= PLL_REF_INPUT_TVCLKINBC;
7581 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7582 intel_panel_use_ssc(dev_priv))
7583 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7584 else
7585 dpll |= PLL_REF_INPUT_DREFCLK;
7586
7587 dpll |= DPLL_VCO_ENABLE;
7588 crtc_state->dpll_hw_state.dpll = dpll;
7589
7590 if (INTEL_GEN(dev_priv) >= 4) {
7591 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7592 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7593 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7594 }
7595 }
7596
7597 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7598 struct intel_crtc_state *crtc_state,
7599 struct dpll *reduced_clock)
7600 {
7601 struct drm_device *dev = crtc->base.dev;
7602 struct drm_i915_private *dev_priv = to_i915(dev);
7603 u32 dpll;
7604 struct dpll *clock = &crtc_state->dpll;
7605
7606 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7607
7608 dpll = DPLL_VGA_MODE_DIS;
7609
7610 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7611 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7612 } else {
7613 if (clock->p1 == 2)
7614 dpll |= PLL_P1_DIVIDE_BY_TWO;
7615 else
7616 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7617 if (clock->p2 == 4)
7618 dpll |= PLL_P2_DIVIDE_BY_4;
7619 }
7620
7621 /*
7622 * Bspec:
7623 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7624 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7625 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7626 * Enable) must be set to “1” in both the DPLL A Control Register
7627 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7628 *
7629 * For simplicity We simply keep both bits always enabled in
7630 * both DPLLS. The spec says we should disable the DVO 2X clock
7631 * when not needed, but this seems to work fine in practice.
7632 */
7633 if (IS_I830(dev_priv) ||
7634 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7635 dpll |= DPLL_DVO_2X_MODE;
7636
7637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7638 intel_panel_use_ssc(dev_priv))
7639 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7640 else
7641 dpll |= PLL_REF_INPUT_DREFCLK;
7642
7643 dpll |= DPLL_VCO_ENABLE;
7644 crtc_state->dpll_hw_state.dpll = dpll;
7645 }
7646
7647 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7648 {
7649 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7650 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7651 enum pipe pipe = crtc->pipe;
7652 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7653 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7654 u32 crtc_vtotal, crtc_vblank_end;
7655 int vsyncshift = 0;
7656
7657 /* We need to be careful not to changed the adjusted mode, for otherwise
7658 * the hw state checker will get angry at the mismatch. */
7659 crtc_vtotal = adjusted_mode->crtc_vtotal;
7660 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7661
7662 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7663 /* the chip adds 2 halflines automatically */
7664 crtc_vtotal -= 1;
7665 crtc_vblank_end -= 1;
7666
7667 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7668 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7669 else
7670 vsyncshift = adjusted_mode->crtc_hsync_start -
7671 adjusted_mode->crtc_htotal / 2;
7672 if (vsyncshift < 0)
7673 vsyncshift += adjusted_mode->crtc_htotal;
7674 }
7675
7676 if (INTEL_GEN(dev_priv) > 3)
7677 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7678
7679 I915_WRITE(HTOTAL(cpu_transcoder),
7680 (adjusted_mode->crtc_hdisplay - 1) |
7681 ((adjusted_mode->crtc_htotal - 1) << 16));
7682 I915_WRITE(HBLANK(cpu_transcoder),
7683 (adjusted_mode->crtc_hblank_start - 1) |
7684 ((adjusted_mode->crtc_hblank_end - 1) << 16));
7685 I915_WRITE(HSYNC(cpu_transcoder),
7686 (adjusted_mode->crtc_hsync_start - 1) |
7687 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7688
7689 I915_WRITE(VTOTAL(cpu_transcoder),
7690 (adjusted_mode->crtc_vdisplay - 1) |
7691 ((crtc_vtotal - 1) << 16));
7692 I915_WRITE(VBLANK(cpu_transcoder),
7693 (adjusted_mode->crtc_vblank_start - 1) |
7694 ((crtc_vblank_end - 1) << 16));
7695 I915_WRITE(VSYNC(cpu_transcoder),
7696 (adjusted_mode->crtc_vsync_start - 1) |
7697 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7698
7699 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7700 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7701 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7702 * bits. */
7703 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7704 (pipe == PIPE_B || pipe == PIPE_C))
7705 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7706
7707 }
7708
7709 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7710 {
7711 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7712 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7713 enum pipe pipe = crtc->pipe;
7714
7715 /* pipesrc controls the size that is scaled from, which should
7716 * always be the user's requested size.
7717 */
7718 I915_WRITE(PIPESRC(pipe),
7719 ((crtc_state->pipe_src_w - 1) << 16) |
7720 (crtc_state->pipe_src_h - 1));
7721 }
7722
7723 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7724 struct intel_crtc_state *pipe_config)
7725 {
7726 struct drm_device *dev = crtc->base.dev;
7727 struct drm_i915_private *dev_priv = to_i915(dev);
7728 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7729 u32 tmp;
7730
7731 tmp = I915_READ(HTOTAL(cpu_transcoder));
7732 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7733 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7734 tmp = I915_READ(HBLANK(cpu_transcoder));
7735 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7736 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7737 tmp = I915_READ(HSYNC(cpu_transcoder));
7738 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7739 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7740
7741 tmp = I915_READ(VTOTAL(cpu_transcoder));
7742 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7743 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7744 tmp = I915_READ(VBLANK(cpu_transcoder));
7745 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7746 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7747 tmp = I915_READ(VSYNC(cpu_transcoder));
7748 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7749 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7750
7751 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7752 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7753 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7754 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7755 }
7756 }
7757
7758 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7759 struct intel_crtc_state *pipe_config)
7760 {
7761 struct drm_device *dev = crtc->base.dev;
7762 struct drm_i915_private *dev_priv = to_i915(dev);
7763 u32 tmp;
7764
7765 tmp = I915_READ(PIPESRC(crtc->pipe));
7766 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7767 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7768
7769 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7770 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7771 }
7772
7773 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7774 struct intel_crtc_state *pipe_config)
7775 {
7776 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7777 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7778 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7779 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7780
7781 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7782 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7783 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7784 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7785
7786 mode->flags = pipe_config->base.adjusted_mode.flags;
7787 mode->type = DRM_MODE_TYPE_DRIVER;
7788
7789 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7790
7791 mode->hsync = drm_mode_hsync(mode);
7792 mode->vrefresh = drm_mode_vrefresh(mode);
7793 drm_mode_set_name(mode);
7794 }
7795
7796 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7797 {
7798 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7799 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7800 u32 pipeconf;
7801
7802 pipeconf = 0;
7803
7804 /* we keep both pipes enabled on 830 */
7805 if (IS_I830(dev_priv))
7806 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7807
7808 if (crtc_state->double_wide)
7809 pipeconf |= PIPECONF_DOUBLE_WIDE;
7810
7811 /* only g4x and later have fancy bpc/dither controls */
7812 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7813 IS_CHERRYVIEW(dev_priv)) {
7814 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7815 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7816 pipeconf |= PIPECONF_DITHER_EN |
7817 PIPECONF_DITHER_TYPE_SP;
7818
7819 switch (crtc_state->pipe_bpp) {
7820 case 18:
7821 pipeconf |= PIPECONF_6BPC;
7822 break;
7823 case 24:
7824 pipeconf |= PIPECONF_8BPC;
7825 break;
7826 case 30:
7827 pipeconf |= PIPECONF_10BPC;
7828 break;
7829 default:
7830 /* Case prevented by intel_choose_pipe_bpp_dither. */
7831 BUG();
7832 }
7833 }
7834
7835 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7836 if (INTEL_GEN(dev_priv) < 4 ||
7837 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7838 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7839 else
7840 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7841 } else {
7842 pipeconf |= PIPECONF_PROGRESSIVE;
7843 }
7844
7845 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7846 crtc_state->limited_color_range)
7847 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7848
7849 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
7850
7851 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7852 POSTING_READ(PIPECONF(crtc->pipe));
7853 }
7854
7855 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7856 struct intel_crtc_state *crtc_state)
7857 {
7858 struct drm_device *dev = crtc->base.dev;
7859 struct drm_i915_private *dev_priv = to_i915(dev);
7860 const struct intel_limit *limit;
7861 int refclk = 48000;
7862
7863 memset(&crtc_state->dpll_hw_state, 0,
7864 sizeof(crtc_state->dpll_hw_state));
7865
7866 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7867 if (intel_panel_use_ssc(dev_priv)) {
7868 refclk = dev_priv->vbt.lvds_ssc_freq;
7869 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7870 }
7871
7872 limit = &intel_limits_i8xx_lvds;
7873 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7874 limit = &intel_limits_i8xx_dvo;
7875 } else {
7876 limit = &intel_limits_i8xx_dac;
7877 }
7878
7879 if (!crtc_state->clock_set &&
7880 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7881 refclk, NULL, &crtc_state->dpll)) {
7882 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7883 return -EINVAL;
7884 }
7885
7886 i8xx_compute_dpll(crtc, crtc_state, NULL);
7887
7888 return 0;
7889 }
7890
7891 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7892 struct intel_crtc_state *crtc_state)
7893 {
7894 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7895 const struct intel_limit *limit;
7896 int refclk = 96000;
7897
7898 memset(&crtc_state->dpll_hw_state, 0,
7899 sizeof(crtc_state->dpll_hw_state));
7900
7901 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7902 if (intel_panel_use_ssc(dev_priv)) {
7903 refclk = dev_priv->vbt.lvds_ssc_freq;
7904 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7905 }
7906
7907 if (intel_is_dual_link_lvds(dev_priv))
7908 limit = &intel_limits_g4x_dual_channel_lvds;
7909 else
7910 limit = &intel_limits_g4x_single_channel_lvds;
7911 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7912 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7913 limit = &intel_limits_g4x_hdmi;
7914 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7915 limit = &intel_limits_g4x_sdvo;
7916 } else {
7917 /* The option is for other outputs */
7918 limit = &intel_limits_i9xx_sdvo;
7919 }
7920
7921 if (!crtc_state->clock_set &&
7922 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7923 refclk, NULL, &crtc_state->dpll)) {
7924 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7925 return -EINVAL;
7926 }
7927
7928 i9xx_compute_dpll(crtc, crtc_state, NULL);
7929
7930 return 0;
7931 }
7932
7933 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7934 struct intel_crtc_state *crtc_state)
7935 {
7936 struct drm_device *dev = crtc->base.dev;
7937 struct drm_i915_private *dev_priv = to_i915(dev);
7938 const struct intel_limit *limit;
7939 int refclk = 96000;
7940
7941 memset(&crtc_state->dpll_hw_state, 0,
7942 sizeof(crtc_state->dpll_hw_state));
7943
7944 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7945 if (intel_panel_use_ssc(dev_priv)) {
7946 refclk = dev_priv->vbt.lvds_ssc_freq;
7947 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7948 }
7949
7950 limit = &intel_limits_pineview_lvds;
7951 } else {
7952 limit = &intel_limits_pineview_sdvo;
7953 }
7954
7955 if (!crtc_state->clock_set &&
7956 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7957 refclk, NULL, &crtc_state->dpll)) {
7958 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7959 return -EINVAL;
7960 }
7961
7962 i9xx_compute_dpll(crtc, crtc_state, NULL);
7963
7964 return 0;
7965 }
7966
7967 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7968 struct intel_crtc_state *crtc_state)
7969 {
7970 struct drm_device *dev = crtc->base.dev;
7971 struct drm_i915_private *dev_priv = to_i915(dev);
7972 const struct intel_limit *limit;
7973 int refclk = 96000;
7974
7975 memset(&crtc_state->dpll_hw_state, 0,
7976 sizeof(crtc_state->dpll_hw_state));
7977
7978 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7979 if (intel_panel_use_ssc(dev_priv)) {
7980 refclk = dev_priv->vbt.lvds_ssc_freq;
7981 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7982 }
7983
7984 limit = &intel_limits_i9xx_lvds;
7985 } else {
7986 limit = &intel_limits_i9xx_sdvo;
7987 }
7988
7989 if (!crtc_state->clock_set &&
7990 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7991 refclk, NULL, &crtc_state->dpll)) {
7992 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7993 return -EINVAL;
7994 }
7995
7996 i9xx_compute_dpll(crtc, crtc_state, NULL);
7997
7998 return 0;
7999 }
8000
8001 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8002 struct intel_crtc_state *crtc_state)
8003 {
8004 int refclk = 100000;
8005 const struct intel_limit *limit = &intel_limits_chv;
8006
8007 memset(&crtc_state->dpll_hw_state, 0,
8008 sizeof(crtc_state->dpll_hw_state));
8009
8010 if (!crtc_state->clock_set &&
8011 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8012 refclk, NULL, &crtc_state->dpll)) {
8013 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8014 return -EINVAL;
8015 }
8016
8017 chv_compute_dpll(crtc, crtc_state);
8018
8019 return 0;
8020 }
8021
8022 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8023 struct intel_crtc_state *crtc_state)
8024 {
8025 int refclk = 100000;
8026 const struct intel_limit *limit = &intel_limits_vlv;
8027
8028 memset(&crtc_state->dpll_hw_state, 0,
8029 sizeof(crtc_state->dpll_hw_state));
8030
8031 if (!crtc_state->clock_set &&
8032 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8033 refclk, NULL, &crtc_state->dpll)) {
8034 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8035 return -EINVAL;
8036 }
8037
8038 vlv_compute_dpll(crtc, crtc_state);
8039
8040 return 0;
8041 }
8042
8043 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8044 {
8045 if (IS_I830(dev_priv))
8046 return false;
8047
8048 return INTEL_GEN(dev_priv) >= 4 ||
8049 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8050 }
8051
8052 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8053 struct intel_crtc_state *pipe_config)
8054 {
8055 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8056 u32 tmp;
8057
8058 if (!i9xx_has_pfit(dev_priv))
8059 return;
8060
8061 tmp = I915_READ(PFIT_CONTROL);
8062 if (!(tmp & PFIT_ENABLE))
8063 return;
8064
8065 /* Check whether the pfit is attached to our pipe. */
8066 if (INTEL_GEN(dev_priv) < 4) {
8067 if (crtc->pipe != PIPE_B)
8068 return;
8069 } else {
8070 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8071 return;
8072 }
8073
8074 pipe_config->gmch_pfit.control = tmp;
8075 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8076 }
8077
8078 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8079 struct intel_crtc_state *pipe_config)
8080 {
8081 struct drm_device *dev = crtc->base.dev;
8082 struct drm_i915_private *dev_priv = to_i915(dev);
8083 int pipe = pipe_config->cpu_transcoder;
8084 struct dpll clock;
8085 u32 mdiv;
8086 int refclk = 100000;
8087
8088 /* In case of DSI, DPLL will not be used */
8089 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8090 return;
8091
8092 vlv_dpio_get(dev_priv);
8093 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8094 vlv_dpio_put(dev_priv);
8095
8096 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8097 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8098 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8099 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8100 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8101
8102 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8103 }
8104
8105 static void
8106 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8107 struct intel_initial_plane_config *plane_config)
8108 {
8109 struct drm_device *dev = crtc->base.dev;
8110 struct drm_i915_private *dev_priv = to_i915(dev);
8111 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8112 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8113 enum pipe pipe;
8114 u32 val, base, offset;
8115 int fourcc, pixel_format;
8116 unsigned int aligned_height;
8117 struct drm_framebuffer *fb;
8118 struct intel_framebuffer *intel_fb;
8119
8120 if (!plane->get_hw_state(plane, &pipe))
8121 return;
8122
8123 WARN_ON(pipe != crtc->pipe);
8124
8125 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8126 if (!intel_fb) {
8127 DRM_DEBUG_KMS("failed to alloc fb\n");
8128 return;
8129 }
8130
8131 fb = &intel_fb->base;
8132
8133 fb->dev = dev;
8134
8135 val = I915_READ(DSPCNTR(i9xx_plane));
8136
8137 if (INTEL_GEN(dev_priv) >= 4) {
8138 if (val & DISPPLANE_TILED) {
8139 plane_config->tiling = I915_TILING_X;
8140 fb->modifier = I915_FORMAT_MOD_X_TILED;
8141 }
8142
8143 if (val & DISPPLANE_ROTATE_180)
8144 plane_config->rotation = DRM_MODE_ROTATE_180;
8145 }
8146
8147 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8148 val & DISPPLANE_MIRROR)
8149 plane_config->rotation |= DRM_MODE_REFLECT_X;
8150
8151 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8152 fourcc = i9xx_format_to_fourcc(pixel_format);
8153 fb->format = drm_format_info(fourcc);
8154
8155 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8156 offset = I915_READ(DSPOFFSET(i9xx_plane));
8157 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8158 } else if (INTEL_GEN(dev_priv) >= 4) {
8159 if (plane_config->tiling)
8160 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8161 else
8162 offset = I915_READ(DSPLINOFF(i9xx_plane));
8163 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8164 } else {
8165 base = I915_READ(DSPADDR(i9xx_plane));
8166 }
8167 plane_config->base = base;
8168
8169 val = I915_READ(PIPESRC(pipe));
8170 fb->width = ((val >> 16) & 0xfff) + 1;
8171 fb->height = ((val >> 0) & 0xfff) + 1;
8172
8173 val = I915_READ(DSPSTRIDE(i9xx_plane));
8174 fb->pitches[0] = val & 0xffffffc0;
8175
8176 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8177
8178 plane_config->size = fb->pitches[0] * aligned_height;
8179
8180 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8181 crtc->base.name, plane->base.name, fb->width, fb->height,
8182 fb->format->cpp[0] * 8, base, fb->pitches[0],
8183 plane_config->size);
8184
8185 plane_config->fb = intel_fb;
8186 }
8187
8188 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8189 struct intel_crtc_state *pipe_config)
8190 {
8191 struct drm_device *dev = crtc->base.dev;
8192 struct drm_i915_private *dev_priv = to_i915(dev);
8193 int pipe = pipe_config->cpu_transcoder;
8194 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8195 struct dpll clock;
8196 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8197 int refclk = 100000;
8198
8199 /* In case of DSI, DPLL will not be used */
8200 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8201 return;
8202
8203 vlv_dpio_get(dev_priv);
8204 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8205 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8206 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8207 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8208 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8209 vlv_dpio_put(dev_priv);
8210
8211 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8212 clock.m2 = (pll_dw0 & 0xff) << 22;
8213 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8214 clock.m2 |= pll_dw2 & 0x3fffff;
8215 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8216 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8217 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8218
8219 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8220 }
8221
8222 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8223 struct intel_crtc_state *pipe_config)
8224 {
8225 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8226 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8227
8228 pipe_config->lspcon_downsampling = false;
8229
8230 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8231 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8232
8233 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8234 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8235 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8236
8237 if (ycbcr420_enabled) {
8238 /* We support 4:2:0 in full blend mode only */
8239 if (!blend)
8240 output = INTEL_OUTPUT_FORMAT_INVALID;
8241 else if (!(IS_GEMINILAKE(dev_priv) ||
8242 INTEL_GEN(dev_priv) >= 10))
8243 output = INTEL_OUTPUT_FORMAT_INVALID;
8244 else
8245 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8246 } else {
8247 /*
8248 * Currently there is no interface defined to
8249 * check user preference between RGB/YCBCR444
8250 * or YCBCR420. So the only possible case for
8251 * YCBCR444 usage is driving YCBCR420 output
8252 * with LSPCON, when pipe is configured for
8253 * YCBCR444 output and LSPCON takes care of
8254 * downsampling it.
8255 */
8256 pipe_config->lspcon_downsampling = true;
8257 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8258 }
8259 }
8260 }
8261
8262 pipe_config->output_format = output;
8263 }
8264
8265 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8266 {
8267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8268 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8269 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8270 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8271 u32 tmp;
8272
8273 tmp = I915_READ(DSPCNTR(i9xx_plane));
8274
8275 if (tmp & DISPPLANE_GAMMA_ENABLE)
8276 crtc_state->gamma_enable = true;
8277
8278 if (!HAS_GMCH(dev_priv) &&
8279 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8280 crtc_state->csc_enable = true;
8281 }
8282
8283 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8284 struct intel_crtc_state *pipe_config)
8285 {
8286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8287 enum intel_display_power_domain power_domain;
8288 intel_wakeref_t wakeref;
8289 u32 tmp;
8290 bool ret;
8291
8292 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8293 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8294 if (!wakeref)
8295 return false;
8296
8297 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8298 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8299 pipe_config->shared_dpll = NULL;
8300
8301 ret = false;
8302
8303 tmp = I915_READ(PIPECONF(crtc->pipe));
8304 if (!(tmp & PIPECONF_ENABLE))
8305 goto out;
8306
8307 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8308 IS_CHERRYVIEW(dev_priv)) {
8309 switch (tmp & PIPECONF_BPC_MASK) {
8310 case PIPECONF_6BPC:
8311 pipe_config->pipe_bpp = 18;
8312 break;
8313 case PIPECONF_8BPC:
8314 pipe_config->pipe_bpp = 24;
8315 break;
8316 case PIPECONF_10BPC:
8317 pipe_config->pipe_bpp = 30;
8318 break;
8319 default:
8320 break;
8321 }
8322 }
8323
8324 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8325 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8326 pipe_config->limited_color_range = true;
8327
8328 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8329 PIPECONF_GAMMA_MODE_SHIFT;
8330
8331 if (IS_CHERRYVIEW(dev_priv))
8332 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8333
8334 i9xx_get_pipe_color_config(pipe_config);
8335
8336 if (INTEL_GEN(dev_priv) < 4)
8337 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8338
8339 intel_get_pipe_timings(crtc, pipe_config);
8340 intel_get_pipe_src_size(crtc, pipe_config);
8341
8342 i9xx_get_pfit_config(crtc, pipe_config);
8343
8344 if (INTEL_GEN(dev_priv) >= 4) {
8345 /* No way to read it out on pipes B and C */
8346 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8347 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8348 else
8349 tmp = I915_READ(DPLL_MD(crtc->pipe));
8350 pipe_config->pixel_multiplier =
8351 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8352 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8353 pipe_config->dpll_hw_state.dpll_md = tmp;
8354 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8355 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8356 tmp = I915_READ(DPLL(crtc->pipe));
8357 pipe_config->pixel_multiplier =
8358 ((tmp & SDVO_MULTIPLIER_MASK)
8359 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8360 } else {
8361 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8362 * port and will be fixed up in the encoder->get_config
8363 * function. */
8364 pipe_config->pixel_multiplier = 1;
8365 }
8366 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8367 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8368 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8369 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8370 } else {
8371 /* Mask out read-only status bits. */
8372 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8373 DPLL_PORTC_READY_MASK |
8374 DPLL_PORTB_READY_MASK);
8375 }
8376
8377 if (IS_CHERRYVIEW(dev_priv))
8378 chv_crtc_clock_get(crtc, pipe_config);
8379 else if (IS_VALLEYVIEW(dev_priv))
8380 vlv_crtc_clock_get(crtc, pipe_config);
8381 else
8382 i9xx_crtc_clock_get(crtc, pipe_config);
8383
8384 /*
8385 * Normally the dotclock is filled in by the encoder .get_config()
8386 * but in case the pipe is enabled w/o any ports we need a sane
8387 * default.
8388 */
8389 pipe_config->base.adjusted_mode.crtc_clock =
8390 pipe_config->port_clock / pipe_config->pixel_multiplier;
8391
8392 ret = true;
8393
8394 out:
8395 intel_display_power_put(dev_priv, power_domain, wakeref);
8396
8397 return ret;
8398 }
8399
8400 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8401 {
8402 struct intel_encoder *encoder;
8403 int i;
8404 u32 val, final;
8405 bool has_lvds = false;
8406 bool has_cpu_edp = false;
8407 bool has_panel = false;
8408 bool has_ck505 = false;
8409 bool can_ssc = false;
8410 bool using_ssc_source = false;
8411
8412 /* We need to take the global config into account */
8413 for_each_intel_encoder(&dev_priv->drm, encoder) {
8414 switch (encoder->type) {
8415 case INTEL_OUTPUT_LVDS:
8416 has_panel = true;
8417 has_lvds = true;
8418 break;
8419 case INTEL_OUTPUT_EDP:
8420 has_panel = true;
8421 if (encoder->port == PORT_A)
8422 has_cpu_edp = true;
8423 break;
8424 default:
8425 break;
8426 }
8427 }
8428
8429 if (HAS_PCH_IBX(dev_priv)) {
8430 has_ck505 = dev_priv->vbt.display_clock_mode;
8431 can_ssc = has_ck505;
8432 } else {
8433 has_ck505 = false;
8434 can_ssc = true;
8435 }
8436
8437 /* Check if any DPLLs are using the SSC source */
8438 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8439 u32 temp = I915_READ(PCH_DPLL(i));
8440
8441 if (!(temp & DPLL_VCO_ENABLE))
8442 continue;
8443
8444 if ((temp & PLL_REF_INPUT_MASK) ==
8445 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8446 using_ssc_source = true;
8447 break;
8448 }
8449 }
8450
8451 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8452 has_panel, has_lvds, has_ck505, using_ssc_source);
8453
8454 /* Ironlake: try to setup display ref clock before DPLL
8455 * enabling. This is only under driver's control after
8456 * PCH B stepping, previous chipset stepping should be
8457 * ignoring this setting.
8458 */
8459 val = I915_READ(PCH_DREF_CONTROL);
8460
8461 /* As we must carefully and slowly disable/enable each source in turn,
8462 * compute the final state we want first and check if we need to
8463 * make any changes at all.
8464 */
8465 final = val;
8466 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8467 if (has_ck505)
8468 final |= DREF_NONSPREAD_CK505_ENABLE;
8469 else
8470 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8471
8472 final &= ~DREF_SSC_SOURCE_MASK;
8473 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8474 final &= ~DREF_SSC1_ENABLE;
8475
8476 if (has_panel) {
8477 final |= DREF_SSC_SOURCE_ENABLE;
8478
8479 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8480 final |= DREF_SSC1_ENABLE;
8481
8482 if (has_cpu_edp) {
8483 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8484 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8485 else
8486 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8487 } else
8488 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8489 } else if (using_ssc_source) {
8490 final |= DREF_SSC_SOURCE_ENABLE;
8491 final |= DREF_SSC1_ENABLE;
8492 }
8493
8494 if (final == val)
8495 return;
8496
8497 /* Always enable nonspread source */
8498 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8499
8500 if (has_ck505)
8501 val |= DREF_NONSPREAD_CK505_ENABLE;
8502 else
8503 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8504
8505 if (has_panel) {
8506 val &= ~DREF_SSC_SOURCE_MASK;
8507 val |= DREF_SSC_SOURCE_ENABLE;
8508
8509 /* SSC must be turned on before enabling the CPU output */
8510 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8511 DRM_DEBUG_KMS("Using SSC on panel\n");
8512 val |= DREF_SSC1_ENABLE;
8513 } else
8514 val &= ~DREF_SSC1_ENABLE;
8515
8516 /* Get SSC going before enabling the outputs */
8517 I915_WRITE(PCH_DREF_CONTROL, val);
8518 POSTING_READ(PCH_DREF_CONTROL);
8519 udelay(200);
8520
8521 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8522
8523 /* Enable CPU source on CPU attached eDP */
8524 if (has_cpu_edp) {
8525 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8526 DRM_DEBUG_KMS("Using SSC on eDP\n");
8527 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8528 } else
8529 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8530 } else
8531 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8532
8533 I915_WRITE(PCH_DREF_CONTROL, val);
8534 POSTING_READ(PCH_DREF_CONTROL);
8535 udelay(200);
8536 } else {
8537 DRM_DEBUG_KMS("Disabling CPU source output\n");
8538
8539 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8540
8541 /* Turn off CPU output */
8542 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8543
8544 I915_WRITE(PCH_DREF_CONTROL, val);
8545 POSTING_READ(PCH_DREF_CONTROL);
8546 udelay(200);
8547
8548 if (!using_ssc_source) {
8549 DRM_DEBUG_KMS("Disabling SSC source\n");
8550
8551 /* Turn off the SSC source */
8552 val &= ~DREF_SSC_SOURCE_MASK;
8553 val |= DREF_SSC_SOURCE_DISABLE;
8554
8555 /* Turn off SSC1 */
8556 val &= ~DREF_SSC1_ENABLE;
8557
8558 I915_WRITE(PCH_DREF_CONTROL, val);
8559 POSTING_READ(PCH_DREF_CONTROL);
8560 udelay(200);
8561 }
8562 }
8563
8564 BUG_ON(val != final);
8565 }
8566
8567 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8568 {
8569 u32 tmp;
8570
8571 tmp = I915_READ(SOUTH_CHICKEN2);
8572 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8573 I915_WRITE(SOUTH_CHICKEN2, tmp);
8574
8575 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8576 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8577 DRM_ERROR("FDI mPHY reset assert timeout\n");
8578
8579 tmp = I915_READ(SOUTH_CHICKEN2);
8580 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8581 I915_WRITE(SOUTH_CHICKEN2, tmp);
8582
8583 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8584 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8585 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8586 }
8587
8588 /* WaMPhyProgramming:hsw */
8589 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8590 {
8591 u32 tmp;
8592
8593 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8594 tmp &= ~(0xFF << 24);
8595 tmp |= (0x12 << 24);
8596 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8597
8598 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8599 tmp |= (1 << 11);
8600 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8601
8602 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8603 tmp |= (1 << 11);
8604 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8605
8606 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8607 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8608 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8609
8610 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8611 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8612 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8613
8614 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8615 tmp &= ~(7 << 13);
8616 tmp |= (5 << 13);
8617 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8618
8619 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8620 tmp &= ~(7 << 13);
8621 tmp |= (5 << 13);
8622 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8623
8624 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8625 tmp &= ~0xFF;
8626 tmp |= 0x1C;
8627 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8628
8629 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8630 tmp &= ~0xFF;
8631 tmp |= 0x1C;
8632 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8633
8634 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8635 tmp &= ~(0xFF << 16);
8636 tmp |= (0x1C << 16);
8637 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8638
8639 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8640 tmp &= ~(0xFF << 16);
8641 tmp |= (0x1C << 16);
8642 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8643
8644 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8645 tmp |= (1 << 27);
8646 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8647
8648 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8649 tmp |= (1 << 27);
8650 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8651
8652 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8653 tmp &= ~(0xF << 28);
8654 tmp |= (4 << 28);
8655 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8656
8657 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8658 tmp &= ~(0xF << 28);
8659 tmp |= (4 << 28);
8660 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8661 }
8662
8663 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8664 * Programming" based on the parameters passed:
8665 * - Sequence to enable CLKOUT_DP
8666 * - Sequence to enable CLKOUT_DP without spread
8667 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8668 */
8669 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8670 bool with_spread, bool with_fdi)
8671 {
8672 u32 reg, tmp;
8673
8674 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8675 with_spread = true;
8676 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8677 with_fdi, "LP PCH doesn't have FDI\n"))
8678 with_fdi = false;
8679
8680 mutex_lock(&dev_priv->sb_lock);
8681
8682 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8683 tmp &= ~SBI_SSCCTL_DISABLE;
8684 tmp |= SBI_SSCCTL_PATHALT;
8685 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8686
8687 udelay(24);
8688
8689 if (with_spread) {
8690 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8691 tmp &= ~SBI_SSCCTL_PATHALT;
8692 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8693
8694 if (with_fdi) {
8695 lpt_reset_fdi_mphy(dev_priv);
8696 lpt_program_fdi_mphy(dev_priv);
8697 }
8698 }
8699
8700 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8701 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8702 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8703 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8704
8705 mutex_unlock(&dev_priv->sb_lock);
8706 }
8707
8708 /* Sequence to disable CLKOUT_DP */
8709 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8710 {
8711 u32 reg, tmp;
8712
8713 mutex_lock(&dev_priv->sb_lock);
8714
8715 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8716 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8717 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8718 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8719
8720 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8721 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8722 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8723 tmp |= SBI_SSCCTL_PATHALT;
8724 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8725 udelay(32);
8726 }
8727 tmp |= SBI_SSCCTL_DISABLE;
8728 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8729 }
8730
8731 mutex_unlock(&dev_priv->sb_lock);
8732 }
8733
8734 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8735
8736 static const u16 sscdivintphase[] = {
8737 [BEND_IDX( 50)] = 0x3B23,
8738 [BEND_IDX( 45)] = 0x3B23,
8739 [BEND_IDX( 40)] = 0x3C23,
8740 [BEND_IDX( 35)] = 0x3C23,
8741 [BEND_IDX( 30)] = 0x3D23,
8742 [BEND_IDX( 25)] = 0x3D23,
8743 [BEND_IDX( 20)] = 0x3E23,
8744 [BEND_IDX( 15)] = 0x3E23,
8745 [BEND_IDX( 10)] = 0x3F23,
8746 [BEND_IDX( 5)] = 0x3F23,
8747 [BEND_IDX( 0)] = 0x0025,
8748 [BEND_IDX( -5)] = 0x0025,
8749 [BEND_IDX(-10)] = 0x0125,
8750 [BEND_IDX(-15)] = 0x0125,
8751 [BEND_IDX(-20)] = 0x0225,
8752 [BEND_IDX(-25)] = 0x0225,
8753 [BEND_IDX(-30)] = 0x0325,
8754 [BEND_IDX(-35)] = 0x0325,
8755 [BEND_IDX(-40)] = 0x0425,
8756 [BEND_IDX(-45)] = 0x0425,
8757 [BEND_IDX(-50)] = 0x0525,
8758 };
8759
8760 /*
8761 * Bend CLKOUT_DP
8762 * steps -50 to 50 inclusive, in steps of 5
8763 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8764 * change in clock period = -(steps / 10) * 5.787 ps
8765 */
8766 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8767 {
8768 u32 tmp;
8769 int idx = BEND_IDX(steps);
8770
8771 if (WARN_ON(steps % 5 != 0))
8772 return;
8773
8774 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8775 return;
8776
8777 mutex_lock(&dev_priv->sb_lock);
8778
8779 if (steps % 10 != 0)
8780 tmp = 0xAAAAAAAB;
8781 else
8782 tmp = 0x00000000;
8783 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8784
8785 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8786 tmp &= 0xffff0000;
8787 tmp |= sscdivintphase[idx];
8788 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8789
8790 mutex_unlock(&dev_priv->sb_lock);
8791 }
8792
8793 #undef BEND_IDX
8794
8795 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8796 {
8797 struct intel_encoder *encoder;
8798 bool has_vga = false;
8799
8800 for_each_intel_encoder(&dev_priv->drm, encoder) {
8801 switch (encoder->type) {
8802 case INTEL_OUTPUT_ANALOG:
8803 has_vga = true;
8804 break;
8805 default:
8806 break;
8807 }
8808 }
8809
8810 if (has_vga) {
8811 lpt_bend_clkout_dp(dev_priv, 0);
8812 lpt_enable_clkout_dp(dev_priv, true, true);
8813 } else {
8814 lpt_disable_clkout_dp(dev_priv);
8815 }
8816 }
8817
8818 /*
8819 * Initialize reference clocks when the driver loads
8820 */
8821 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8822 {
8823 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8824 ironlake_init_pch_refclk(dev_priv);
8825 else if (HAS_PCH_LPT(dev_priv))
8826 lpt_init_pch_refclk(dev_priv);
8827 }
8828
8829 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8830 {
8831 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8832 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8833 enum pipe pipe = crtc->pipe;
8834 u32 val;
8835
8836 val = 0;
8837
8838 switch (crtc_state->pipe_bpp) {
8839 case 18:
8840 val |= PIPECONF_6BPC;
8841 break;
8842 case 24:
8843 val |= PIPECONF_8BPC;
8844 break;
8845 case 30:
8846 val |= PIPECONF_10BPC;
8847 break;
8848 case 36:
8849 val |= PIPECONF_12BPC;
8850 break;
8851 default:
8852 /* Case prevented by intel_choose_pipe_bpp_dither. */
8853 BUG();
8854 }
8855
8856 if (crtc_state->dither)
8857 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8858
8859 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8860 val |= PIPECONF_INTERLACED_ILK;
8861 else
8862 val |= PIPECONF_PROGRESSIVE;
8863
8864 if (crtc_state->limited_color_range)
8865 val |= PIPECONF_COLOR_RANGE_SELECT;
8866
8867 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8868
8869 I915_WRITE(PIPECONF(pipe), val);
8870 POSTING_READ(PIPECONF(pipe));
8871 }
8872
8873 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8874 {
8875 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8876 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8877 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8878 u32 val = 0;
8879
8880 if (IS_HASWELL(dev_priv) && crtc_state->dither)
8881 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8882
8883 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8884 val |= PIPECONF_INTERLACED_ILK;
8885 else
8886 val |= PIPECONF_PROGRESSIVE;
8887
8888 I915_WRITE(PIPECONF(cpu_transcoder), val);
8889 POSTING_READ(PIPECONF(cpu_transcoder));
8890 }
8891
8892 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8893 {
8894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8895 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8896
8897 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8898 u32 val = 0;
8899
8900 switch (crtc_state->pipe_bpp) {
8901 case 18:
8902 val |= PIPEMISC_DITHER_6_BPC;
8903 break;
8904 case 24:
8905 val |= PIPEMISC_DITHER_8_BPC;
8906 break;
8907 case 30:
8908 val |= PIPEMISC_DITHER_10_BPC;
8909 break;
8910 case 36:
8911 val |= PIPEMISC_DITHER_12_BPC;
8912 break;
8913 default:
8914 /* Case prevented by pipe_config_set_bpp. */
8915 BUG();
8916 }
8917
8918 if (crtc_state->dither)
8919 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8920
8921 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8922 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8923 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8924
8925 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8926 val |= PIPEMISC_YUV420_ENABLE |
8927 PIPEMISC_YUV420_MODE_FULL_BLEND;
8928
8929 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8930 }
8931 }
8932
8933 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8934 {
8935 /*
8936 * Account for spread spectrum to avoid
8937 * oversubscribing the link. Max center spread
8938 * is 2.5%; use 5% for safety's sake.
8939 */
8940 u32 bps = target_clock * bpp * 21 / 20;
8941 return DIV_ROUND_UP(bps, link_bw * 8);
8942 }
8943
8944 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8945 {
8946 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8947 }
8948
8949 static void ironlake_compute_dpll(struct intel_crtc *crtc,
8950 struct intel_crtc_state *crtc_state,
8951 struct dpll *reduced_clock)
8952 {
8953 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8954 u32 dpll, fp, fp2;
8955 int factor;
8956
8957 /* Enable autotuning of the PLL clock (if permissible) */
8958 factor = 21;
8959 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8960 if ((intel_panel_use_ssc(dev_priv) &&
8961 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8962 (HAS_PCH_IBX(dev_priv) &&
8963 intel_is_dual_link_lvds(dev_priv)))
8964 factor = 25;
8965 } else if (crtc_state->sdvo_tv_clock) {
8966 factor = 20;
8967 }
8968
8969 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8970
8971 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8972 fp |= FP_CB_TUNE;
8973
8974 if (reduced_clock) {
8975 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8976
8977 if (reduced_clock->m < factor * reduced_clock->n)
8978 fp2 |= FP_CB_TUNE;
8979 } else {
8980 fp2 = fp;
8981 }
8982
8983 dpll = 0;
8984
8985 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8986 dpll |= DPLLB_MODE_LVDS;
8987 else
8988 dpll |= DPLLB_MODE_DAC_SERIAL;
8989
8990 dpll |= (crtc_state->pixel_multiplier - 1)
8991 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8992
8993 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8994 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8995 dpll |= DPLL_SDVO_HIGH_SPEED;
8996
8997 if (intel_crtc_has_dp_encoder(crtc_state))
8998 dpll |= DPLL_SDVO_HIGH_SPEED;
8999
9000 /*
9001 * The high speed IO clock is only really required for
9002 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9003 * possible to share the DPLL between CRT and HDMI. Enabling
9004 * the clock needlessly does no real harm, except use up a
9005 * bit of power potentially.
9006 *
9007 * We'll limit this to IVB with 3 pipes, since it has only two
9008 * DPLLs and so DPLL sharing is the only way to get three pipes
9009 * driving PCH ports at the same time. On SNB we could do this,
9010 * and potentially avoid enabling the second DPLL, but it's not
9011 * clear if it''s a win or loss power wise. No point in doing
9012 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9013 */
9014 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9015 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9016 dpll |= DPLL_SDVO_HIGH_SPEED;
9017
9018 /* compute bitmask from p1 value */
9019 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9020 /* also FPA1 */
9021 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9022
9023 switch (crtc_state->dpll.p2) {
9024 case 5:
9025 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9026 break;
9027 case 7:
9028 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9029 break;
9030 case 10:
9031 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9032 break;
9033 case 14:
9034 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9035 break;
9036 }
9037
9038 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9039 intel_panel_use_ssc(dev_priv))
9040 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9041 else
9042 dpll |= PLL_REF_INPUT_DREFCLK;
9043
9044 dpll |= DPLL_VCO_ENABLE;
9045
9046 crtc_state->dpll_hw_state.dpll = dpll;
9047 crtc_state->dpll_hw_state.fp0 = fp;
9048 crtc_state->dpll_hw_state.fp1 = fp2;
9049 }
9050
9051 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9052 struct intel_crtc_state *crtc_state)
9053 {
9054 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9055 const struct intel_limit *limit;
9056 int refclk = 120000;
9057
9058 memset(&crtc_state->dpll_hw_state, 0,
9059 sizeof(crtc_state->dpll_hw_state));
9060
9061 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9062 if (!crtc_state->has_pch_encoder)
9063 return 0;
9064
9065 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9066 if (intel_panel_use_ssc(dev_priv)) {
9067 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9068 dev_priv->vbt.lvds_ssc_freq);
9069 refclk = dev_priv->vbt.lvds_ssc_freq;
9070 }
9071
9072 if (intel_is_dual_link_lvds(dev_priv)) {
9073 if (refclk == 100000)
9074 limit = &intel_limits_ironlake_dual_lvds_100m;
9075 else
9076 limit = &intel_limits_ironlake_dual_lvds;
9077 } else {
9078 if (refclk == 100000)
9079 limit = &intel_limits_ironlake_single_lvds_100m;
9080 else
9081 limit = &intel_limits_ironlake_single_lvds;
9082 }
9083 } else {
9084 limit = &intel_limits_ironlake_dac;
9085 }
9086
9087 if (!crtc_state->clock_set &&
9088 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9089 refclk, NULL, &crtc_state->dpll)) {
9090 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9091 return -EINVAL;
9092 }
9093
9094 ironlake_compute_dpll(crtc, crtc_state, NULL);
9095
9096 if (!intel_get_shared_dpll(crtc_state, NULL)) {
9097 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9098 pipe_name(crtc->pipe));
9099 return -EINVAL;
9100 }
9101
9102 return 0;
9103 }
9104
9105 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9106 struct intel_link_m_n *m_n)
9107 {
9108 struct drm_device *dev = crtc->base.dev;
9109 struct drm_i915_private *dev_priv = to_i915(dev);
9110 enum pipe pipe = crtc->pipe;
9111
9112 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9113 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9114 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9115 & ~TU_SIZE_MASK;
9116 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9117 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9118 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9119 }
9120
9121 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9122 enum transcoder transcoder,
9123 struct intel_link_m_n *m_n,
9124 struct intel_link_m_n *m2_n2)
9125 {
9126 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9127 enum pipe pipe = crtc->pipe;
9128
9129 if (INTEL_GEN(dev_priv) >= 5) {
9130 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9131 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9132 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9133 & ~TU_SIZE_MASK;
9134 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9135 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9136 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9137
9138 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9139 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9140 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9141 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9142 & ~TU_SIZE_MASK;
9143 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9144 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9145 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9146 }
9147 } else {
9148 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9149 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9150 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9151 & ~TU_SIZE_MASK;
9152 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9153 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9154 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9155 }
9156 }
9157
9158 void intel_dp_get_m_n(struct intel_crtc *crtc,
9159 struct intel_crtc_state *pipe_config)
9160 {
9161 if (pipe_config->has_pch_encoder)
9162 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9163 else
9164 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9165 &pipe_config->dp_m_n,
9166 &pipe_config->dp_m2_n2);
9167 }
9168
9169 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9170 struct intel_crtc_state *pipe_config)
9171 {
9172 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9173 &pipe_config->fdi_m_n, NULL);
9174 }
9175
9176 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9177 struct intel_crtc_state *pipe_config)
9178 {
9179 struct drm_device *dev = crtc->base.dev;
9180 struct drm_i915_private *dev_priv = to_i915(dev);
9181 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9182 u32 ps_ctrl = 0;
9183 int id = -1;
9184 int i;
9185
9186 /* find scaler attached to this pipe */
9187 for (i = 0; i < crtc->num_scalers; i++) {
9188 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9189 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9190 id = i;
9191 pipe_config->pch_pfit.enabled = true;
9192 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9193 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9194 scaler_state->scalers[i].in_use = true;
9195 break;
9196 }
9197 }
9198
9199 scaler_state->scaler_id = id;
9200 if (id >= 0) {
9201 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9202 } else {
9203 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9204 }
9205 }
9206
9207 static void
9208 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9209 struct intel_initial_plane_config *plane_config)
9210 {
9211 struct drm_device *dev = crtc->base.dev;
9212 struct drm_i915_private *dev_priv = to_i915(dev);
9213 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9214 enum plane_id plane_id = plane->id;
9215 enum pipe pipe;
9216 u32 val, base, offset, stride_mult, tiling, alpha;
9217 int fourcc, pixel_format;
9218 unsigned int aligned_height;
9219 struct drm_framebuffer *fb;
9220 struct intel_framebuffer *intel_fb;
9221
9222 if (!plane->get_hw_state(plane, &pipe))
9223 return;
9224
9225 WARN_ON(pipe != crtc->pipe);
9226
9227 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9228 if (!intel_fb) {
9229 DRM_DEBUG_KMS("failed to alloc fb\n");
9230 return;
9231 }
9232
9233 fb = &intel_fb->base;
9234
9235 fb->dev = dev;
9236
9237 val = I915_READ(PLANE_CTL(pipe, plane_id));
9238
9239 if (INTEL_GEN(dev_priv) >= 11)
9240 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9241 else
9242 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9243
9244 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9245 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9246 alpha &= PLANE_COLOR_ALPHA_MASK;
9247 } else {
9248 alpha = val & PLANE_CTL_ALPHA_MASK;
9249 }
9250
9251 fourcc = skl_format_to_fourcc(pixel_format,
9252 val & PLANE_CTL_ORDER_RGBX, alpha);
9253 fb->format = drm_format_info(fourcc);
9254
9255 tiling = val & PLANE_CTL_TILED_MASK;
9256 switch (tiling) {
9257 case PLANE_CTL_TILED_LINEAR:
9258 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9259 break;
9260 case PLANE_CTL_TILED_X:
9261 plane_config->tiling = I915_TILING_X;
9262 fb->modifier = I915_FORMAT_MOD_X_TILED;
9263 break;
9264 case PLANE_CTL_TILED_Y:
9265 plane_config->tiling = I915_TILING_Y;
9266 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9267 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9268 else
9269 fb->modifier = I915_FORMAT_MOD_Y_TILED;
9270 break;
9271 case PLANE_CTL_TILED_YF:
9272 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9273 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9274 else
9275 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9276 break;
9277 default:
9278 MISSING_CASE(tiling);
9279 goto error;
9280 }
9281
9282 /*
9283 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9284 * while i915 HW rotation is clockwise, thats why this swapping.
9285 */
9286 switch (val & PLANE_CTL_ROTATE_MASK) {
9287 case PLANE_CTL_ROTATE_0:
9288 plane_config->rotation = DRM_MODE_ROTATE_0;
9289 break;
9290 case PLANE_CTL_ROTATE_90:
9291 plane_config->rotation = DRM_MODE_ROTATE_270;
9292 break;
9293 case PLANE_CTL_ROTATE_180:
9294 plane_config->rotation = DRM_MODE_ROTATE_180;
9295 break;
9296 case PLANE_CTL_ROTATE_270:
9297 plane_config->rotation = DRM_MODE_ROTATE_90;
9298 break;
9299 }
9300
9301 if (INTEL_GEN(dev_priv) >= 10 &&
9302 val & PLANE_CTL_FLIP_HORIZONTAL)
9303 plane_config->rotation |= DRM_MODE_REFLECT_X;
9304
9305 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9306 plane_config->base = base;
9307
9308 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9309
9310 val = I915_READ(PLANE_SIZE(pipe, plane_id));
9311 fb->height = ((val >> 16) & 0xfff) + 1;
9312 fb->width = ((val >> 0) & 0x1fff) + 1;
9313
9314 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9315 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9316 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9317
9318 aligned_height = intel_fb_align_height(fb, 0, fb->height);
9319
9320 plane_config->size = fb->pitches[0] * aligned_height;
9321
9322 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9323 crtc->base.name, plane->base.name, fb->width, fb->height,
9324 fb->format->cpp[0] * 8, base, fb->pitches[0],
9325 plane_config->size);
9326
9327 plane_config->fb = intel_fb;
9328 return;
9329
9330 error:
9331 kfree(intel_fb);
9332 }
9333
9334 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9335 struct intel_crtc_state *pipe_config)
9336 {
9337 struct drm_device *dev = crtc->base.dev;
9338 struct drm_i915_private *dev_priv = to_i915(dev);
9339 u32 tmp;
9340
9341 tmp = I915_READ(PF_CTL(crtc->pipe));
9342
9343 if (tmp & PF_ENABLE) {
9344 pipe_config->pch_pfit.enabled = true;
9345 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9346 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9347
9348 /* We currently do not free assignements of panel fitters on
9349 * ivb/hsw (since we don't use the higher upscaling modes which
9350 * differentiates them) so just WARN about this case for now. */
9351 if (IS_GEN(dev_priv, 7)) {
9352 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9353 PF_PIPE_SEL_IVB(crtc->pipe));
9354 }
9355 }
9356 }
9357
9358 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9359 struct intel_crtc_state *pipe_config)
9360 {
9361 struct drm_device *dev = crtc->base.dev;
9362 struct drm_i915_private *dev_priv = to_i915(dev);
9363 enum intel_display_power_domain power_domain;
9364 intel_wakeref_t wakeref;
9365 u32 tmp;
9366 bool ret;
9367
9368 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9369 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9370 if (!wakeref)
9371 return false;
9372
9373 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9374 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9375 pipe_config->shared_dpll = NULL;
9376
9377 ret = false;
9378 tmp = I915_READ(PIPECONF(crtc->pipe));
9379 if (!(tmp & PIPECONF_ENABLE))
9380 goto out;
9381
9382 switch (tmp & PIPECONF_BPC_MASK) {
9383 case PIPECONF_6BPC:
9384 pipe_config->pipe_bpp = 18;
9385 break;
9386 case PIPECONF_8BPC:
9387 pipe_config->pipe_bpp = 24;
9388 break;
9389 case PIPECONF_10BPC:
9390 pipe_config->pipe_bpp = 30;
9391 break;
9392 case PIPECONF_12BPC:
9393 pipe_config->pipe_bpp = 36;
9394 break;
9395 default:
9396 break;
9397 }
9398
9399 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9400 pipe_config->limited_color_range = true;
9401
9402 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9403 PIPECONF_GAMMA_MODE_SHIFT;
9404
9405 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9406
9407 i9xx_get_pipe_color_config(pipe_config);
9408
9409 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9410 struct intel_shared_dpll *pll;
9411 enum intel_dpll_id pll_id;
9412
9413 pipe_config->has_pch_encoder = true;
9414
9415 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9416 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9417 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9418
9419 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9420
9421 if (HAS_PCH_IBX(dev_priv)) {
9422 /*
9423 * The pipe->pch transcoder and pch transcoder->pll
9424 * mapping is fixed.
9425 */
9426 pll_id = (enum intel_dpll_id) crtc->pipe;
9427 } else {
9428 tmp = I915_READ(PCH_DPLL_SEL);
9429 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9430 pll_id = DPLL_ID_PCH_PLL_B;
9431 else
9432 pll_id= DPLL_ID_PCH_PLL_A;
9433 }
9434
9435 pipe_config->shared_dpll =
9436 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9437 pll = pipe_config->shared_dpll;
9438
9439 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9440 &pipe_config->dpll_hw_state));
9441
9442 tmp = pipe_config->dpll_hw_state.dpll;
9443 pipe_config->pixel_multiplier =
9444 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9445 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9446
9447 ironlake_pch_clock_get(crtc, pipe_config);
9448 } else {
9449 pipe_config->pixel_multiplier = 1;
9450 }
9451
9452 intel_get_pipe_timings(crtc, pipe_config);
9453 intel_get_pipe_src_size(crtc, pipe_config);
9454
9455 ironlake_get_pfit_config(crtc, pipe_config);
9456
9457 ret = true;
9458
9459 out:
9460 intel_display_power_put(dev_priv, power_domain, wakeref);
9461
9462 return ret;
9463 }
9464
9465 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9466 {
9467 struct drm_device *dev = &dev_priv->drm;
9468 struct intel_crtc *crtc;
9469
9470 for_each_intel_crtc(dev, crtc)
9471 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9472 pipe_name(crtc->pipe));
9473
9474 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9475 "Display power well on\n");
9476 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9477 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9478 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9479 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9480 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9481 "CPU PWM1 enabled\n");
9482 if (IS_HASWELL(dev_priv))
9483 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9484 "CPU PWM2 enabled\n");
9485 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9486 "PCH PWM1 enabled\n");
9487 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9488 "Utility pin enabled\n");
9489 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9490
9491 /*
9492 * In theory we can still leave IRQs enabled, as long as only the HPD
9493 * interrupts remain enabled. We used to check for that, but since it's
9494 * gen-specific and since we only disable LCPLL after we fully disable
9495 * the interrupts, the check below should be enough.
9496 */
9497 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9498 }
9499
9500 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
9501 {
9502 if (IS_HASWELL(dev_priv))
9503 return I915_READ(D_COMP_HSW);
9504 else
9505 return I915_READ(D_COMP_BDW);
9506 }
9507
9508 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
9509 {
9510 if (IS_HASWELL(dev_priv)) {
9511 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9512 val))
9513 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9514 } else {
9515 I915_WRITE(D_COMP_BDW, val);
9516 POSTING_READ(D_COMP_BDW);
9517 }
9518 }
9519
9520 /*
9521 * This function implements pieces of two sequences from BSpec:
9522 * - Sequence for display software to disable LCPLL
9523 * - Sequence for display software to allow package C8+
9524 * The steps implemented here are just the steps that actually touch the LCPLL
9525 * register. Callers should take care of disabling all the display engine
9526 * functions, doing the mode unset, fixing interrupts, etc.
9527 */
9528 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9529 bool switch_to_fclk, bool allow_power_down)
9530 {
9531 u32 val;
9532
9533 assert_can_disable_lcpll(dev_priv);
9534
9535 val = I915_READ(LCPLL_CTL);
9536
9537 if (switch_to_fclk) {
9538 val |= LCPLL_CD_SOURCE_FCLK;
9539 I915_WRITE(LCPLL_CTL, val);
9540
9541 if (wait_for_us(I915_READ(LCPLL_CTL) &
9542 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9543 DRM_ERROR("Switching to FCLK failed\n");
9544
9545 val = I915_READ(LCPLL_CTL);
9546 }
9547
9548 val |= LCPLL_PLL_DISABLE;
9549 I915_WRITE(LCPLL_CTL, val);
9550 POSTING_READ(LCPLL_CTL);
9551
9552 if (intel_wait_for_register(&dev_priv->uncore,
9553 LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9554 DRM_ERROR("LCPLL still locked\n");
9555
9556 val = hsw_read_dcomp(dev_priv);
9557 val |= D_COMP_COMP_DISABLE;
9558 hsw_write_dcomp(dev_priv, val);
9559 ndelay(100);
9560
9561 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9562 1))
9563 DRM_ERROR("D_COMP RCOMP still in progress\n");
9564
9565 if (allow_power_down) {
9566 val = I915_READ(LCPLL_CTL);
9567 val |= LCPLL_POWER_DOWN_ALLOW;
9568 I915_WRITE(LCPLL_CTL, val);
9569 POSTING_READ(LCPLL_CTL);
9570 }
9571 }
9572
9573 /*
9574 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9575 * source.
9576 */
9577 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9578 {
9579 u32 val;
9580
9581 val = I915_READ(LCPLL_CTL);
9582
9583 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9584 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9585 return;
9586
9587 /*
9588 * Make sure we're not on PC8 state before disabling PC8, otherwise
9589 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9590 */
9591 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
9592
9593 if (val & LCPLL_POWER_DOWN_ALLOW) {
9594 val &= ~LCPLL_POWER_DOWN_ALLOW;
9595 I915_WRITE(LCPLL_CTL, val);
9596 POSTING_READ(LCPLL_CTL);
9597 }
9598
9599 val = hsw_read_dcomp(dev_priv);
9600 val |= D_COMP_COMP_FORCE;
9601 val &= ~D_COMP_COMP_DISABLE;
9602 hsw_write_dcomp(dev_priv, val);
9603
9604 val = I915_READ(LCPLL_CTL);
9605 val &= ~LCPLL_PLL_DISABLE;
9606 I915_WRITE(LCPLL_CTL, val);
9607
9608 if (intel_wait_for_register(&dev_priv->uncore,
9609 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9610 5))
9611 DRM_ERROR("LCPLL not locked yet\n");
9612
9613 if (val & LCPLL_CD_SOURCE_FCLK) {
9614 val = I915_READ(LCPLL_CTL);
9615 val &= ~LCPLL_CD_SOURCE_FCLK;
9616 I915_WRITE(LCPLL_CTL, val);
9617
9618 if (wait_for_us((I915_READ(LCPLL_CTL) &
9619 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9620 DRM_ERROR("Switching back to LCPLL failed\n");
9621 }
9622
9623 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
9624
9625 intel_update_cdclk(dev_priv);
9626 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9627 }
9628
9629 /*
9630 * Package states C8 and deeper are really deep PC states that can only be
9631 * reached when all the devices on the system allow it, so even if the graphics
9632 * device allows PC8+, it doesn't mean the system will actually get to these
9633 * states. Our driver only allows PC8+ when going into runtime PM.
9634 *
9635 * The requirements for PC8+ are that all the outputs are disabled, the power
9636 * well is disabled and most interrupts are disabled, and these are also
9637 * requirements for runtime PM. When these conditions are met, we manually do
9638 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9639 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9640 * hang the machine.
9641 *
9642 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9643 * the state of some registers, so when we come back from PC8+ we need to
9644 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9645 * need to take care of the registers kept by RC6. Notice that this happens even
9646 * if we don't put the device in PCI D3 state (which is what currently happens
9647 * because of the runtime PM support).
9648 *
9649 * For more, read "Display Sequences for Package C8" on the hardware
9650 * documentation.
9651 */
9652 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9653 {
9654 u32 val;
9655
9656 DRM_DEBUG_KMS("Enabling package C8+\n");
9657
9658 if (HAS_PCH_LPT_LP(dev_priv)) {
9659 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9660 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9661 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9662 }
9663
9664 lpt_disable_clkout_dp(dev_priv);
9665 hsw_disable_lcpll(dev_priv, true, true);
9666 }
9667
9668 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9669 {
9670 u32 val;
9671
9672 DRM_DEBUG_KMS("Disabling package C8+\n");
9673
9674 hsw_restore_lcpll(dev_priv);
9675 lpt_init_pch_refclk(dev_priv);
9676
9677 if (HAS_PCH_LPT_LP(dev_priv)) {
9678 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9679 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9680 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9681 }
9682 }
9683
9684 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9685 struct intel_crtc_state *crtc_state)
9686 {
9687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9688 struct intel_atomic_state *state =
9689 to_intel_atomic_state(crtc_state->base.state);
9690
9691 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9692 INTEL_GEN(dev_priv) >= 11) {
9693 struct intel_encoder *encoder =
9694 intel_get_crtc_new_encoder(state, crtc_state);
9695
9696 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9697 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9698 pipe_name(crtc->pipe));
9699 return -EINVAL;
9700 }
9701 }
9702
9703 return 0;
9704 }
9705
9706 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9707 enum port port,
9708 struct intel_crtc_state *pipe_config)
9709 {
9710 enum intel_dpll_id id;
9711 u32 temp;
9712
9713 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9714 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9715
9716 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9717 return;
9718
9719 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9720 }
9721
9722 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9723 enum port port,
9724 struct intel_crtc_state *pipe_config)
9725 {
9726 enum intel_dpll_id id;
9727 u32 temp;
9728
9729 /* TODO: TBT pll not implemented. */
9730 if (intel_port_is_combophy(dev_priv, port)) {
9731 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9732 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9733 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9734 } else if (intel_port_is_tc(dev_priv, port)) {
9735 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9736 } else {
9737 WARN(1, "Invalid port %x\n", port);
9738 return;
9739 }
9740
9741 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9742 }
9743
9744 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9745 enum port port,
9746 struct intel_crtc_state *pipe_config)
9747 {
9748 enum intel_dpll_id id;
9749
9750 switch (port) {
9751 case PORT_A:
9752 id = DPLL_ID_SKL_DPLL0;
9753 break;
9754 case PORT_B:
9755 id = DPLL_ID_SKL_DPLL1;
9756 break;
9757 case PORT_C:
9758 id = DPLL_ID_SKL_DPLL2;
9759 break;
9760 default:
9761 DRM_ERROR("Incorrect port type\n");
9762 return;
9763 }
9764
9765 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9766 }
9767
9768 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9769 enum port port,
9770 struct intel_crtc_state *pipe_config)
9771 {
9772 enum intel_dpll_id id;
9773 u32 temp;
9774
9775 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9776 id = temp >> (port * 3 + 1);
9777
9778 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9779 return;
9780
9781 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9782 }
9783
9784 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9785 enum port port,
9786 struct intel_crtc_state *pipe_config)
9787 {
9788 enum intel_dpll_id id;
9789 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9790
9791 switch (ddi_pll_sel) {
9792 case PORT_CLK_SEL_WRPLL1:
9793 id = DPLL_ID_WRPLL1;
9794 break;
9795 case PORT_CLK_SEL_WRPLL2:
9796 id = DPLL_ID_WRPLL2;
9797 break;
9798 case PORT_CLK_SEL_SPLL:
9799 id = DPLL_ID_SPLL;
9800 break;
9801 case PORT_CLK_SEL_LCPLL_810:
9802 id = DPLL_ID_LCPLL_810;
9803 break;
9804 case PORT_CLK_SEL_LCPLL_1350:
9805 id = DPLL_ID_LCPLL_1350;
9806 break;
9807 case PORT_CLK_SEL_LCPLL_2700:
9808 id = DPLL_ID_LCPLL_2700;
9809 break;
9810 default:
9811 MISSING_CASE(ddi_pll_sel);
9812 /* fall through */
9813 case PORT_CLK_SEL_NONE:
9814 return;
9815 }
9816
9817 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9818 }
9819
9820 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9821 struct intel_crtc_state *pipe_config,
9822 u64 *power_domain_mask,
9823 intel_wakeref_t *wakerefs)
9824 {
9825 struct drm_device *dev = crtc->base.dev;
9826 struct drm_i915_private *dev_priv = to_i915(dev);
9827 enum intel_display_power_domain power_domain;
9828 unsigned long panel_transcoder_mask = 0;
9829 unsigned long enabled_panel_transcoders = 0;
9830 enum transcoder panel_transcoder;
9831 intel_wakeref_t wf;
9832 u32 tmp;
9833
9834 if (INTEL_GEN(dev_priv) >= 11)
9835 panel_transcoder_mask |=
9836 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9837
9838 if (HAS_TRANSCODER_EDP(dev_priv))
9839 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9840
9841 /*
9842 * The pipe->transcoder mapping is fixed with the exception of the eDP
9843 * and DSI transcoders handled below.
9844 */
9845 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9846
9847 /*
9848 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9849 * consistency and less surprising code; it's in always on power).
9850 */
9851 for_each_set_bit(panel_transcoder,
9852 &panel_transcoder_mask,
9853 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9854 enum pipe trans_pipe;
9855
9856 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9857 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9858 continue;
9859
9860 /*
9861 * Log all enabled ones, only use the first one.
9862 *
9863 * FIXME: This won't work for two separate DSI displays.
9864 */
9865 enabled_panel_transcoders |= BIT(panel_transcoder);
9866 if (enabled_panel_transcoders != BIT(panel_transcoder))
9867 continue;
9868
9869 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9870 default:
9871 WARN(1, "unknown pipe linked to transcoder %s\n",
9872 transcoder_name(panel_transcoder));
9873 /* fall through */
9874 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9875 case TRANS_DDI_EDP_INPUT_A_ON:
9876 trans_pipe = PIPE_A;
9877 break;
9878 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9879 trans_pipe = PIPE_B;
9880 break;
9881 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9882 trans_pipe = PIPE_C;
9883 break;
9884 }
9885
9886 if (trans_pipe == crtc->pipe)
9887 pipe_config->cpu_transcoder = panel_transcoder;
9888 }
9889
9890 /*
9891 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9892 */
9893 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9894 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9895
9896 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9897 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9898
9899 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
9900 if (!wf)
9901 return false;
9902
9903 wakerefs[power_domain] = wf;
9904 *power_domain_mask |= BIT_ULL(power_domain);
9905
9906 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9907
9908 return tmp & PIPECONF_ENABLE;
9909 }
9910
9911 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9912 struct intel_crtc_state *pipe_config,
9913 u64 *power_domain_mask,
9914 intel_wakeref_t *wakerefs)
9915 {
9916 struct drm_device *dev = crtc->base.dev;
9917 struct drm_i915_private *dev_priv = to_i915(dev);
9918 enum intel_display_power_domain power_domain;
9919 enum transcoder cpu_transcoder;
9920 intel_wakeref_t wf;
9921 enum port port;
9922 u32 tmp;
9923
9924 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9925 if (port == PORT_A)
9926 cpu_transcoder = TRANSCODER_DSI_A;
9927 else
9928 cpu_transcoder = TRANSCODER_DSI_C;
9929
9930 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9931 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9932
9933 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
9934 if (!wf)
9935 continue;
9936
9937 wakerefs[power_domain] = wf;
9938 *power_domain_mask |= BIT_ULL(power_domain);
9939
9940 /*
9941 * The PLL needs to be enabled with a valid divider
9942 * configuration, otherwise accessing DSI registers will hang
9943 * the machine. See BSpec North Display Engine
9944 * registers/MIPI[BXT]. We can break out here early, since we
9945 * need the same DSI PLL to be enabled for both DSI ports.
9946 */
9947 if (!bxt_dsi_pll_is_enabled(dev_priv))
9948 break;
9949
9950 /* XXX: this works for video mode only */
9951 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9952 if (!(tmp & DPI_ENABLE))
9953 continue;
9954
9955 tmp = I915_READ(MIPI_CTRL(port));
9956 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9957 continue;
9958
9959 pipe_config->cpu_transcoder = cpu_transcoder;
9960 break;
9961 }
9962
9963 return transcoder_is_dsi(pipe_config->cpu_transcoder);
9964 }
9965
9966 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9967 struct intel_crtc_state *pipe_config)
9968 {
9969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9970 struct intel_shared_dpll *pll;
9971 enum port port;
9972 u32 tmp;
9973
9974 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9975
9976 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9977
9978 if (INTEL_GEN(dev_priv) >= 11)
9979 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9980 else if (IS_CANNONLAKE(dev_priv))
9981 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9982 else if (IS_GEN9_BC(dev_priv))
9983 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9984 else if (IS_GEN9_LP(dev_priv))
9985 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9986 else
9987 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9988
9989 pll = pipe_config->shared_dpll;
9990 if (pll) {
9991 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9992 &pipe_config->dpll_hw_state));
9993 }
9994
9995 /*
9996 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9997 * DDI E. So just check whether this pipe is wired to DDI E and whether
9998 * the PCH transcoder is on.
9999 */
10000 if (INTEL_GEN(dev_priv) < 9 &&
10001 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10002 pipe_config->has_pch_encoder = true;
10003
10004 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10005 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10006 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10007
10008 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10009 }
10010 }
10011
10012 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10013 struct intel_crtc_state *pipe_config)
10014 {
10015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10016 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10017 enum intel_display_power_domain power_domain;
10018 u64 power_domain_mask;
10019 bool active;
10020
10021 intel_crtc_init_scalers(crtc, pipe_config);
10022
10023 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10024 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10025 if (!wf)
10026 return false;
10027
10028 wakerefs[power_domain] = wf;
10029 power_domain_mask = BIT_ULL(power_domain);
10030
10031 pipe_config->shared_dpll = NULL;
10032
10033 active = hsw_get_transcoder_state(crtc, pipe_config,
10034 &power_domain_mask, wakerefs);
10035
10036 if (IS_GEN9_LP(dev_priv) &&
10037 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10038 &power_domain_mask, wakerefs)) {
10039 WARN_ON(active);
10040 active = true;
10041 }
10042
10043 if (!active)
10044 goto out;
10045
10046 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10047 INTEL_GEN(dev_priv) >= 11) {
10048 haswell_get_ddi_port_state(crtc, pipe_config);
10049 intel_get_pipe_timings(crtc, pipe_config);
10050 }
10051
10052 intel_get_pipe_src_size(crtc, pipe_config);
10053 intel_get_crtc_ycbcr_config(crtc, pipe_config);
10054
10055 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10056
10057 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10058
10059 if (INTEL_GEN(dev_priv) >= 9) {
10060 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10061
10062 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10063 pipe_config->gamma_enable = true;
10064
10065 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10066 pipe_config->csc_enable = true;
10067 } else {
10068 i9xx_get_pipe_color_config(pipe_config);
10069 }
10070
10071 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10072 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10073
10074 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10075 if (wf) {
10076 wakerefs[power_domain] = wf;
10077 power_domain_mask |= BIT_ULL(power_domain);
10078
10079 if (INTEL_GEN(dev_priv) >= 9)
10080 skylake_get_pfit_config(crtc, pipe_config);
10081 else
10082 ironlake_get_pfit_config(crtc, pipe_config);
10083 }
10084
10085 if (hsw_crtc_supports_ips(crtc)) {
10086 if (IS_HASWELL(dev_priv))
10087 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10088 else {
10089 /*
10090 * We cannot readout IPS state on broadwell, set to
10091 * true so we can set it to a defined state on first
10092 * commit.
10093 */
10094 pipe_config->ips_enabled = true;
10095 }
10096 }
10097
10098 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10099 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10100 pipe_config->pixel_multiplier =
10101 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10102 } else {
10103 pipe_config->pixel_multiplier = 1;
10104 }
10105
10106 out:
10107 for_each_power_domain(power_domain, power_domain_mask)
10108 intel_display_power_put(dev_priv,
10109 power_domain, wakerefs[power_domain]);
10110
10111 return active;
10112 }
10113
10114 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10115 {
10116 struct drm_i915_private *dev_priv =
10117 to_i915(plane_state->base.plane->dev);
10118 const struct drm_framebuffer *fb = plane_state->base.fb;
10119 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10120 u32 base;
10121
10122 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10123 base = obj->phys_handle->busaddr;
10124 else
10125 base = intel_plane_ggtt_offset(plane_state);
10126
10127 base += plane_state->color_plane[0].offset;
10128
10129 /* ILK+ do this automagically */
10130 if (HAS_GMCH(dev_priv) &&
10131 plane_state->base.rotation & DRM_MODE_ROTATE_180)
10132 base += (plane_state->base.crtc_h *
10133 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10134
10135 return base;
10136 }
10137
10138 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10139 {
10140 int x = plane_state->base.crtc_x;
10141 int y = plane_state->base.crtc_y;
10142 u32 pos = 0;
10143
10144 if (x < 0) {
10145 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10146 x = -x;
10147 }
10148 pos |= x << CURSOR_X_SHIFT;
10149
10150 if (y < 0) {
10151 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10152 y = -y;
10153 }
10154 pos |= y << CURSOR_Y_SHIFT;
10155
10156 return pos;
10157 }
10158
10159 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10160 {
10161 const struct drm_mode_config *config =
10162 &plane_state->base.plane->dev->mode_config;
10163 int width = plane_state->base.crtc_w;
10164 int height = plane_state->base.crtc_h;
10165
10166 return width > 0 && width <= config->cursor_width &&
10167 height > 0 && height <= config->cursor_height;
10168 }
10169
10170 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10171 {
10172 const struct drm_framebuffer *fb = plane_state->base.fb;
10173 unsigned int rotation = plane_state->base.rotation;
10174 int src_x, src_y;
10175 u32 offset;
10176 int ret;
10177
10178 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
10179 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
10180
10181 ret = intel_plane_check_stride(plane_state);
10182 if (ret)
10183 return ret;
10184
10185 src_x = plane_state->base.src_x >> 16;
10186 src_y = plane_state->base.src_y >> 16;
10187
10188 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10189 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10190 plane_state, 0);
10191
10192 if (src_x != 0 || src_y != 0) {
10193 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10194 return -EINVAL;
10195 }
10196
10197 plane_state->color_plane[0].offset = offset;
10198
10199 return 0;
10200 }
10201
10202 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10203 struct intel_plane_state *plane_state)
10204 {
10205 const struct drm_framebuffer *fb = plane_state->base.fb;
10206 int ret;
10207
10208 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10209 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10210 return -EINVAL;
10211 }
10212
10213 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10214 &crtc_state->base,
10215 DRM_PLANE_HELPER_NO_SCALING,
10216 DRM_PLANE_HELPER_NO_SCALING,
10217 true, true);
10218 if (ret)
10219 return ret;
10220
10221 if (!plane_state->base.visible)
10222 return 0;
10223
10224 ret = intel_plane_check_src_coordinates(plane_state);
10225 if (ret)
10226 return ret;
10227
10228 ret = intel_cursor_check_surface(plane_state);
10229 if (ret)
10230 return ret;
10231
10232 return 0;
10233 }
10234
10235 static unsigned int
10236 i845_cursor_max_stride(struct intel_plane *plane,
10237 u32 pixel_format, u64 modifier,
10238 unsigned int rotation)
10239 {
10240 return 2048;
10241 }
10242
10243 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10244 {
10245 u32 cntl = 0;
10246
10247 if (crtc_state->gamma_enable)
10248 cntl |= CURSOR_GAMMA_ENABLE;
10249
10250 return cntl;
10251 }
10252
10253 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10254 const struct intel_plane_state *plane_state)
10255 {
10256 return CURSOR_ENABLE |
10257 CURSOR_FORMAT_ARGB |
10258 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10259 }
10260
10261 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10262 {
10263 int width = plane_state->base.crtc_w;
10264
10265 /*
10266 * 845g/865g are only limited by the width of their cursors,
10267 * the height is arbitrary up to the precision of the register.
10268 */
10269 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10270 }
10271
10272 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10273 struct intel_plane_state *plane_state)
10274 {
10275 const struct drm_framebuffer *fb = plane_state->base.fb;
10276 int ret;
10277
10278 ret = intel_check_cursor(crtc_state, plane_state);
10279 if (ret)
10280 return ret;
10281
10282 /* if we want to turn off the cursor ignore width and height */
10283 if (!fb)
10284 return 0;
10285
10286 /* Check for which cursor types we support */
10287 if (!i845_cursor_size_ok(plane_state)) {
10288 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10289 plane_state->base.crtc_w,
10290 plane_state->base.crtc_h);
10291 return -EINVAL;
10292 }
10293
10294 WARN_ON(plane_state->base.visible &&
10295 plane_state->color_plane[0].stride != fb->pitches[0]);
10296
10297 switch (fb->pitches[0]) {
10298 case 256:
10299 case 512:
10300 case 1024:
10301 case 2048:
10302 break;
10303 default:
10304 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10305 fb->pitches[0]);
10306 return -EINVAL;
10307 }
10308
10309 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10310
10311 return 0;
10312 }
10313
10314 static void i845_update_cursor(struct intel_plane *plane,
10315 const struct intel_crtc_state *crtc_state,
10316 const struct intel_plane_state *plane_state)
10317 {
10318 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10319 u32 cntl = 0, base = 0, pos = 0, size = 0;
10320 unsigned long irqflags;
10321
10322 if (plane_state && plane_state->base.visible) {
10323 unsigned int width = plane_state->base.crtc_w;
10324 unsigned int height = plane_state->base.crtc_h;
10325
10326 cntl = plane_state->ctl |
10327 i845_cursor_ctl_crtc(crtc_state);
10328
10329 size = (height << 12) | width;
10330
10331 base = intel_cursor_base(plane_state);
10332 pos = intel_cursor_position(plane_state);
10333 }
10334
10335 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10336
10337 /* On these chipsets we can only modify the base/size/stride
10338 * whilst the cursor is disabled.
10339 */
10340 if (plane->cursor.base != base ||
10341 plane->cursor.size != size ||
10342 plane->cursor.cntl != cntl) {
10343 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10344 I915_WRITE_FW(CURBASE(PIPE_A), base);
10345 I915_WRITE_FW(CURSIZE, size);
10346 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10347 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10348
10349 plane->cursor.base = base;
10350 plane->cursor.size = size;
10351 plane->cursor.cntl = cntl;
10352 } else {
10353 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10354 }
10355
10356 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10357 }
10358
10359 static void i845_disable_cursor(struct intel_plane *plane,
10360 const struct intel_crtc_state *crtc_state)
10361 {
10362 i845_update_cursor(plane, crtc_state, NULL);
10363 }
10364
10365 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10366 enum pipe *pipe)
10367 {
10368 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10369 enum intel_display_power_domain power_domain;
10370 intel_wakeref_t wakeref;
10371 bool ret;
10372
10373 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10374 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10375 if (!wakeref)
10376 return false;
10377
10378 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10379
10380 *pipe = PIPE_A;
10381
10382 intel_display_power_put(dev_priv, power_domain, wakeref);
10383
10384 return ret;
10385 }
10386
10387 static unsigned int
10388 i9xx_cursor_max_stride(struct intel_plane *plane,
10389 u32 pixel_format, u64 modifier,
10390 unsigned int rotation)
10391 {
10392 return plane->base.dev->mode_config.cursor_width * 4;
10393 }
10394
10395 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10396 {
10397 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10398 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10399 u32 cntl = 0;
10400
10401 if (INTEL_GEN(dev_priv) >= 11)
10402 return cntl;
10403
10404 if (crtc_state->gamma_enable)
10405 cntl = MCURSOR_GAMMA_ENABLE;
10406
10407 if (crtc_state->csc_enable)
10408 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10409
10410 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10411 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10412
10413 return cntl;
10414 }
10415
10416 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10417 const struct intel_plane_state *plane_state)
10418 {
10419 struct drm_i915_private *dev_priv =
10420 to_i915(plane_state->base.plane->dev);
10421 u32 cntl = 0;
10422
10423 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10424 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10425
10426 switch (plane_state->base.crtc_w) {
10427 case 64:
10428 cntl |= MCURSOR_MODE_64_ARGB_AX;
10429 break;
10430 case 128:
10431 cntl |= MCURSOR_MODE_128_ARGB_AX;
10432 break;
10433 case 256:
10434 cntl |= MCURSOR_MODE_256_ARGB_AX;
10435 break;
10436 default:
10437 MISSING_CASE(plane_state->base.crtc_w);
10438 return 0;
10439 }
10440
10441 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10442 cntl |= MCURSOR_ROTATE_180;
10443
10444 return cntl;
10445 }
10446
10447 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10448 {
10449 struct drm_i915_private *dev_priv =
10450 to_i915(plane_state->base.plane->dev);
10451 int width = plane_state->base.crtc_w;
10452 int height = plane_state->base.crtc_h;
10453
10454 if (!intel_cursor_size_ok(plane_state))
10455 return false;
10456
10457 /* Cursor width is limited to a few power-of-two sizes */
10458 switch (width) {
10459 case 256:
10460 case 128:
10461 case 64:
10462 break;
10463 default:
10464 return false;
10465 }
10466
10467 /*
10468 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10469 * height from 8 lines up to the cursor width, when the
10470 * cursor is not rotated. Everything else requires square
10471 * cursors.
10472 */
10473 if (HAS_CUR_FBC(dev_priv) &&
10474 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10475 if (height < 8 || height > width)
10476 return false;
10477 } else {
10478 if (height != width)
10479 return false;
10480 }
10481
10482 return true;
10483 }
10484
10485 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10486 struct intel_plane_state *plane_state)
10487 {
10488 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10489 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10490 const struct drm_framebuffer *fb = plane_state->base.fb;
10491 enum pipe pipe = plane->pipe;
10492 int ret;
10493
10494 ret = intel_check_cursor(crtc_state, plane_state);
10495 if (ret)
10496 return ret;
10497
10498 /* if we want to turn off the cursor ignore width and height */
10499 if (!fb)
10500 return 0;
10501
10502 /* Check for which cursor types we support */
10503 if (!i9xx_cursor_size_ok(plane_state)) {
10504 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10505 plane_state->base.crtc_w,
10506 plane_state->base.crtc_h);
10507 return -EINVAL;
10508 }
10509
10510 WARN_ON(plane_state->base.visible &&
10511 plane_state->color_plane[0].stride != fb->pitches[0]);
10512
10513 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10514 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10515 fb->pitches[0], plane_state->base.crtc_w);
10516 return -EINVAL;
10517 }
10518
10519 /*
10520 * There's something wrong with the cursor on CHV pipe C.
10521 * If it straddles the left edge of the screen then
10522 * moving it away from the edge or disabling it often
10523 * results in a pipe underrun, and often that can lead to
10524 * dead pipe (constant underrun reported, and it scans
10525 * out just a solid color). To recover from that, the
10526 * display power well must be turned off and on again.
10527 * Refuse the put the cursor into that compromised position.
10528 */
10529 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10530 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10531 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10532 return -EINVAL;
10533 }
10534
10535 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10536
10537 return 0;
10538 }
10539
10540 static void i9xx_update_cursor(struct intel_plane *plane,
10541 const struct intel_crtc_state *crtc_state,
10542 const struct intel_plane_state *plane_state)
10543 {
10544 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10545 enum pipe pipe = plane->pipe;
10546 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10547 unsigned long irqflags;
10548
10549 if (plane_state && plane_state->base.visible) {
10550 cntl = plane_state->ctl |
10551 i9xx_cursor_ctl_crtc(crtc_state);
10552
10553 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10554 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10555
10556 base = intel_cursor_base(plane_state);
10557 pos = intel_cursor_position(plane_state);
10558 }
10559
10560 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10561
10562 /*
10563 * On some platforms writing CURCNTR first will also
10564 * cause CURPOS to be armed by the CURBASE write.
10565 * Without the CURCNTR write the CURPOS write would
10566 * arm itself. Thus we always update CURCNTR before
10567 * CURPOS.
10568 *
10569 * On other platforms CURPOS always requires the
10570 * CURBASE write to arm the update. Additonally
10571 * a write to any of the cursor register will cancel
10572 * an already armed cursor update. Thus leaving out
10573 * the CURBASE write after CURPOS could lead to a
10574 * cursor that doesn't appear to move, or even change
10575 * shape. Thus we always write CURBASE.
10576 *
10577 * The other registers are armed by by the CURBASE write
10578 * except when the plane is getting enabled at which time
10579 * the CURCNTR write arms the update.
10580 */
10581
10582 if (INTEL_GEN(dev_priv) >= 9)
10583 skl_write_cursor_wm(plane, crtc_state);
10584
10585 if (plane->cursor.base != base ||
10586 plane->cursor.size != fbc_ctl ||
10587 plane->cursor.cntl != cntl) {
10588 if (HAS_CUR_FBC(dev_priv))
10589 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10590 I915_WRITE_FW(CURCNTR(pipe), cntl);
10591 I915_WRITE_FW(CURPOS(pipe), pos);
10592 I915_WRITE_FW(CURBASE(pipe), base);
10593
10594 plane->cursor.base = base;
10595 plane->cursor.size = fbc_ctl;
10596 plane->cursor.cntl = cntl;
10597 } else {
10598 I915_WRITE_FW(CURPOS(pipe), pos);
10599 I915_WRITE_FW(CURBASE(pipe), base);
10600 }
10601
10602 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10603 }
10604
10605 static void i9xx_disable_cursor(struct intel_plane *plane,
10606 const struct intel_crtc_state *crtc_state)
10607 {
10608 i9xx_update_cursor(plane, crtc_state, NULL);
10609 }
10610
10611 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10612 enum pipe *pipe)
10613 {
10614 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10615 enum intel_display_power_domain power_domain;
10616 intel_wakeref_t wakeref;
10617 bool ret;
10618 u32 val;
10619
10620 /*
10621 * Not 100% correct for planes that can move between pipes,
10622 * but that's only the case for gen2-3 which don't have any
10623 * display power wells.
10624 */
10625 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10626 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10627 if (!wakeref)
10628 return false;
10629
10630 val = I915_READ(CURCNTR(plane->pipe));
10631
10632 ret = val & MCURSOR_MODE;
10633
10634 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10635 *pipe = plane->pipe;
10636 else
10637 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10638 MCURSOR_PIPE_SELECT_SHIFT;
10639
10640 intel_display_power_put(dev_priv, power_domain, wakeref);
10641
10642 return ret;
10643 }
10644
10645 /* VESA 640x480x72Hz mode to set on the pipe */
10646 static const struct drm_display_mode load_detect_mode = {
10647 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10648 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10649 };
10650
10651 struct drm_framebuffer *
10652 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10653 struct drm_mode_fb_cmd2 *mode_cmd)
10654 {
10655 struct intel_framebuffer *intel_fb;
10656 int ret;
10657
10658 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10659 if (!intel_fb)
10660 return ERR_PTR(-ENOMEM);
10661
10662 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10663 if (ret)
10664 goto err;
10665
10666 return &intel_fb->base;
10667
10668 err:
10669 kfree(intel_fb);
10670 return ERR_PTR(ret);
10671 }
10672
10673 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10674 struct drm_crtc *crtc)
10675 {
10676 struct drm_plane *plane;
10677 struct drm_plane_state *plane_state;
10678 int ret, i;
10679
10680 ret = drm_atomic_add_affected_planes(state, crtc);
10681 if (ret)
10682 return ret;
10683
10684 for_each_new_plane_in_state(state, plane, plane_state, i) {
10685 if (plane_state->crtc != crtc)
10686 continue;
10687
10688 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10689 if (ret)
10690 return ret;
10691
10692 drm_atomic_set_fb_for_plane(plane_state, NULL);
10693 }
10694
10695 return 0;
10696 }
10697
10698 int intel_get_load_detect_pipe(struct drm_connector *connector,
10699 const struct drm_display_mode *mode,
10700 struct intel_load_detect_pipe *old,
10701 struct drm_modeset_acquire_ctx *ctx)
10702 {
10703 struct intel_crtc *intel_crtc;
10704 struct intel_encoder *intel_encoder =
10705 intel_attached_encoder(connector);
10706 struct drm_crtc *possible_crtc;
10707 struct drm_encoder *encoder = &intel_encoder->base;
10708 struct drm_crtc *crtc = NULL;
10709 struct drm_device *dev = encoder->dev;
10710 struct drm_i915_private *dev_priv = to_i915(dev);
10711 struct drm_mode_config *config = &dev->mode_config;
10712 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10713 struct drm_connector_state *connector_state;
10714 struct intel_crtc_state *crtc_state;
10715 int ret, i = -1;
10716
10717 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10718 connector->base.id, connector->name,
10719 encoder->base.id, encoder->name);
10720
10721 old->restore_state = NULL;
10722
10723 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10724
10725 /*
10726 * Algorithm gets a little messy:
10727 *
10728 * - if the connector already has an assigned crtc, use it (but make
10729 * sure it's on first)
10730 *
10731 * - try to find the first unused crtc that can drive this connector,
10732 * and use that if we find one
10733 */
10734
10735 /* See if we already have a CRTC for this connector */
10736 if (connector->state->crtc) {
10737 crtc = connector->state->crtc;
10738
10739 ret = drm_modeset_lock(&crtc->mutex, ctx);
10740 if (ret)
10741 goto fail;
10742
10743 /* Make sure the crtc and connector are running */
10744 goto found;
10745 }
10746
10747 /* Find an unused one (if possible) */
10748 for_each_crtc(dev, possible_crtc) {
10749 i++;
10750 if (!(encoder->possible_crtcs & (1 << i)))
10751 continue;
10752
10753 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10754 if (ret)
10755 goto fail;
10756
10757 if (possible_crtc->state->enable) {
10758 drm_modeset_unlock(&possible_crtc->mutex);
10759 continue;
10760 }
10761
10762 crtc = possible_crtc;
10763 break;
10764 }
10765
10766 /*
10767 * If we didn't find an unused CRTC, don't use any.
10768 */
10769 if (!crtc) {
10770 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10771 ret = -ENODEV;
10772 goto fail;
10773 }
10774
10775 found:
10776 intel_crtc = to_intel_crtc(crtc);
10777
10778 state = drm_atomic_state_alloc(dev);
10779 restore_state = drm_atomic_state_alloc(dev);
10780 if (!state || !restore_state) {
10781 ret = -ENOMEM;
10782 goto fail;
10783 }
10784
10785 state->acquire_ctx = ctx;
10786 restore_state->acquire_ctx = ctx;
10787
10788 connector_state = drm_atomic_get_connector_state(state, connector);
10789 if (IS_ERR(connector_state)) {
10790 ret = PTR_ERR(connector_state);
10791 goto fail;
10792 }
10793
10794 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10795 if (ret)
10796 goto fail;
10797
10798 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10799 if (IS_ERR(crtc_state)) {
10800 ret = PTR_ERR(crtc_state);
10801 goto fail;
10802 }
10803
10804 crtc_state->base.active = crtc_state->base.enable = true;
10805
10806 if (!mode)
10807 mode = &load_detect_mode;
10808
10809 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10810 if (ret)
10811 goto fail;
10812
10813 ret = intel_modeset_disable_planes(state, crtc);
10814 if (ret)
10815 goto fail;
10816
10817 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10818 if (!ret)
10819 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10820 if (!ret)
10821 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10822 if (ret) {
10823 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10824 goto fail;
10825 }
10826
10827 ret = drm_atomic_commit(state);
10828 if (ret) {
10829 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10830 goto fail;
10831 }
10832
10833 old->restore_state = restore_state;
10834 drm_atomic_state_put(state);
10835
10836 /* let the connector get through one full cycle before testing */
10837 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10838 return true;
10839
10840 fail:
10841 if (state) {
10842 drm_atomic_state_put(state);
10843 state = NULL;
10844 }
10845 if (restore_state) {
10846 drm_atomic_state_put(restore_state);
10847 restore_state = NULL;
10848 }
10849
10850 if (ret == -EDEADLK)
10851 return ret;
10852
10853 return false;
10854 }
10855
10856 void intel_release_load_detect_pipe(struct drm_connector *connector,
10857 struct intel_load_detect_pipe *old,
10858 struct drm_modeset_acquire_ctx *ctx)
10859 {
10860 struct intel_encoder *intel_encoder =
10861 intel_attached_encoder(connector);
10862 struct drm_encoder *encoder = &intel_encoder->base;
10863 struct drm_atomic_state *state = old->restore_state;
10864 int ret;
10865
10866 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10867 connector->base.id, connector->name,
10868 encoder->base.id, encoder->name);
10869
10870 if (!state)
10871 return;
10872
10873 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10874 if (ret)
10875 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10876 drm_atomic_state_put(state);
10877 }
10878
10879 static int i9xx_pll_refclk(struct drm_device *dev,
10880 const struct intel_crtc_state *pipe_config)
10881 {
10882 struct drm_i915_private *dev_priv = to_i915(dev);
10883 u32 dpll = pipe_config->dpll_hw_state.dpll;
10884
10885 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10886 return dev_priv->vbt.lvds_ssc_freq;
10887 else if (HAS_PCH_SPLIT(dev_priv))
10888 return 120000;
10889 else if (!IS_GEN(dev_priv, 2))
10890 return 96000;
10891 else
10892 return 48000;
10893 }
10894
10895 /* Returns the clock of the currently programmed mode of the given pipe. */
10896 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10897 struct intel_crtc_state *pipe_config)
10898 {
10899 struct drm_device *dev = crtc->base.dev;
10900 struct drm_i915_private *dev_priv = to_i915(dev);
10901 int pipe = pipe_config->cpu_transcoder;
10902 u32 dpll = pipe_config->dpll_hw_state.dpll;
10903 u32 fp;
10904 struct dpll clock;
10905 int port_clock;
10906 int refclk = i9xx_pll_refclk(dev, pipe_config);
10907
10908 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10909 fp = pipe_config->dpll_hw_state.fp0;
10910 else
10911 fp = pipe_config->dpll_hw_state.fp1;
10912
10913 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10914 if (IS_PINEVIEW(dev_priv)) {
10915 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10916 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10917 } else {
10918 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10919 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10920 }
10921
10922 if (!IS_GEN(dev_priv, 2)) {
10923 if (IS_PINEVIEW(dev_priv))
10924 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10925 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10926 else
10927 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10928 DPLL_FPA01_P1_POST_DIV_SHIFT);
10929
10930 switch (dpll & DPLL_MODE_MASK) {
10931 case DPLLB_MODE_DAC_SERIAL:
10932 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10933 5 : 10;
10934 break;
10935 case DPLLB_MODE_LVDS:
10936 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10937 7 : 14;
10938 break;
10939 default:
10940 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10941 "mode\n", (int)(dpll & DPLL_MODE_MASK));
10942 return;
10943 }
10944
10945 if (IS_PINEVIEW(dev_priv))
10946 port_clock = pnv_calc_dpll_params(refclk, &clock);
10947 else
10948 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10949 } else {
10950 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10951 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10952
10953 if (is_lvds) {
10954 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10955 DPLL_FPA01_P1_POST_DIV_SHIFT);
10956
10957 if (lvds & LVDS_CLKB_POWER_UP)
10958 clock.p2 = 7;
10959 else
10960 clock.p2 = 14;
10961 } else {
10962 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10963 clock.p1 = 2;
10964 else {
10965 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10966 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10967 }
10968 if (dpll & PLL_P2_DIVIDE_BY_4)
10969 clock.p2 = 4;
10970 else
10971 clock.p2 = 2;
10972 }
10973
10974 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10975 }
10976
10977 /*
10978 * This value includes pixel_multiplier. We will use
10979 * port_clock to compute adjusted_mode.crtc_clock in the
10980 * encoder's get_config() function.
10981 */
10982 pipe_config->port_clock = port_clock;
10983 }
10984
10985 int intel_dotclock_calculate(int link_freq,
10986 const struct intel_link_m_n *m_n)
10987 {
10988 /*
10989 * The calculation for the data clock is:
10990 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10991 * But we want to avoid losing precison if possible, so:
10992 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10993 *
10994 * and the link clock is simpler:
10995 * link_clock = (m * link_clock) / n
10996 */
10997
10998 if (!m_n->link_n)
10999 return 0;
11000
11001 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11002 }
11003
11004 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11005 struct intel_crtc_state *pipe_config)
11006 {
11007 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11008
11009 /* read out port_clock from the DPLL */
11010 i9xx_crtc_clock_get(crtc, pipe_config);
11011
11012 /*
11013 * In case there is an active pipe without active ports,
11014 * we may need some idea for the dotclock anyway.
11015 * Calculate one based on the FDI configuration.
11016 */
11017 pipe_config->base.adjusted_mode.crtc_clock =
11018 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11019 &pipe_config->fdi_m_n);
11020 }
11021
11022 /* Returns the currently programmed mode of the given encoder. */
11023 struct drm_display_mode *
11024 intel_encoder_current_mode(struct intel_encoder *encoder)
11025 {
11026 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11027 struct intel_crtc_state *crtc_state;
11028 struct drm_display_mode *mode;
11029 struct intel_crtc *crtc;
11030 enum pipe pipe;
11031
11032 if (!encoder->get_hw_state(encoder, &pipe))
11033 return NULL;
11034
11035 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11036
11037 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11038 if (!mode)
11039 return NULL;
11040
11041 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11042 if (!crtc_state) {
11043 kfree(mode);
11044 return NULL;
11045 }
11046
11047 crtc_state->base.crtc = &crtc->base;
11048
11049 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11050 kfree(crtc_state);
11051 kfree(mode);
11052 return NULL;
11053 }
11054
11055 encoder->get_config(encoder, crtc_state);
11056
11057 intel_mode_from_pipe_config(mode, crtc_state);
11058
11059 kfree(crtc_state);
11060
11061 return mode;
11062 }
11063
11064 static void intel_crtc_destroy(struct drm_crtc *crtc)
11065 {
11066 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11067
11068 drm_crtc_cleanup(crtc);
11069 kfree(intel_crtc);
11070 }
11071
11072 /**
11073 * intel_wm_need_update - Check whether watermarks need updating
11074 * @cur: current plane state
11075 * @new: new plane state
11076 *
11077 * Check current plane state versus the new one to determine whether
11078 * watermarks need to be recalculated.
11079 *
11080 * Returns true or false.
11081 */
11082 static bool intel_wm_need_update(struct intel_plane_state *cur,
11083 struct intel_plane_state *new)
11084 {
11085 /* Update watermarks on tiling or size changes. */
11086 if (new->base.visible != cur->base.visible)
11087 return true;
11088
11089 if (!cur->base.fb || !new->base.fb)
11090 return false;
11091
11092 if (cur->base.fb->modifier != new->base.fb->modifier ||
11093 cur->base.rotation != new->base.rotation ||
11094 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11095 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11096 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11097 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11098 return true;
11099
11100 return false;
11101 }
11102
11103 static bool needs_scaling(const struct intel_plane_state *state)
11104 {
11105 int src_w = drm_rect_width(&state->base.src) >> 16;
11106 int src_h = drm_rect_height(&state->base.src) >> 16;
11107 int dst_w = drm_rect_width(&state->base.dst);
11108 int dst_h = drm_rect_height(&state->base.dst);
11109
11110 return (src_w != dst_w || src_h != dst_h);
11111 }
11112
11113 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11114 struct drm_crtc_state *crtc_state,
11115 const struct intel_plane_state *old_plane_state,
11116 struct drm_plane_state *plane_state)
11117 {
11118 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11119 struct drm_crtc *crtc = crtc_state->crtc;
11120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11121 struct intel_plane *plane = to_intel_plane(plane_state->plane);
11122 struct drm_device *dev = crtc->dev;
11123 struct drm_i915_private *dev_priv = to_i915(dev);
11124 bool mode_changed = needs_modeset(crtc_state);
11125 bool was_crtc_enabled = old_crtc_state->base.active;
11126 bool is_crtc_enabled = crtc_state->active;
11127 bool turn_off, turn_on, visible, was_visible;
11128 struct drm_framebuffer *fb = plane_state->fb;
11129 int ret;
11130
11131 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11132 ret = skl_update_scaler_plane(
11133 to_intel_crtc_state(crtc_state),
11134 to_intel_plane_state(plane_state));
11135 if (ret)
11136 return ret;
11137 }
11138
11139 was_visible = old_plane_state->base.visible;
11140 visible = plane_state->visible;
11141
11142 if (!was_crtc_enabled && WARN_ON(was_visible))
11143 was_visible = false;
11144
11145 /*
11146 * Visibility is calculated as if the crtc was on, but
11147 * after scaler setup everything depends on it being off
11148 * when the crtc isn't active.
11149 *
11150 * FIXME this is wrong for watermarks. Watermarks should also
11151 * be computed as if the pipe would be active. Perhaps move
11152 * per-plane wm computation to the .check_plane() hook, and
11153 * only combine the results from all planes in the current place?
11154 */
11155 if (!is_crtc_enabled) {
11156 plane_state->visible = visible = false;
11157 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11158 }
11159
11160 if (!was_visible && !visible)
11161 return 0;
11162
11163 if (fb != old_plane_state->base.fb)
11164 pipe_config->fb_changed = true;
11165
11166 turn_off = was_visible && (!visible || mode_changed);
11167 turn_on = visible && (!was_visible || mode_changed);
11168
11169 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11170 intel_crtc->base.base.id, intel_crtc->base.name,
11171 plane->base.base.id, plane->base.name,
11172 fb ? fb->base.id : -1);
11173
11174 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11175 plane->base.base.id, plane->base.name,
11176 was_visible, visible,
11177 turn_off, turn_on, mode_changed);
11178
11179 if (turn_on) {
11180 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11181 pipe_config->update_wm_pre = true;
11182
11183 /* must disable cxsr around plane enable/disable */
11184 if (plane->id != PLANE_CURSOR)
11185 pipe_config->disable_cxsr = true;
11186 } else if (turn_off) {
11187 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11188 pipe_config->update_wm_post = true;
11189
11190 /* must disable cxsr around plane enable/disable */
11191 if (plane->id != PLANE_CURSOR)
11192 pipe_config->disable_cxsr = true;
11193 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11194 to_intel_plane_state(plane_state))) {
11195 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11196 /* FIXME bollocks */
11197 pipe_config->update_wm_pre = true;
11198 pipe_config->update_wm_post = true;
11199 }
11200 }
11201
11202 if (visible || was_visible)
11203 pipe_config->fb_bits |= plane->frontbuffer_bit;
11204
11205 /*
11206 * ILK/SNB DVSACNTR/Sprite Enable
11207 * IVB SPR_CTL/Sprite Enable
11208 * "When in Self Refresh Big FIFO mode, a write to enable the
11209 * plane will be internally buffered and delayed while Big FIFO
11210 * mode is exiting."
11211 *
11212 * Which means that enabling the sprite can take an extra frame
11213 * when we start in big FIFO mode (LP1+). Thus we need to drop
11214 * down to LP0 and wait for vblank in order to make sure the
11215 * sprite gets enabled on the next vblank after the register write.
11216 * Doing otherwise would risk enabling the sprite one frame after
11217 * we've already signalled flip completion. We can resume LP1+
11218 * once the sprite has been enabled.
11219 *
11220 *
11221 * WaCxSRDisabledForSpriteScaling:ivb
11222 * IVB SPR_SCALE/Scaling Enable
11223 * "Low Power watermarks must be disabled for at least one
11224 * frame before enabling sprite scaling, and kept disabled
11225 * until sprite scaling is disabled."
11226 *
11227 * ILK/SNB DVSASCALE/Scaling Enable
11228 * "When in Self Refresh Big FIFO mode, scaling enable will be
11229 * masked off while Big FIFO mode is exiting."
11230 *
11231 * Despite the w/a only being listed for IVB we assume that
11232 * the ILK/SNB note has similar ramifications, hence we apply
11233 * the w/a on all three platforms.
11234 *
11235 * With experimental results seems this is needed also for primary
11236 * plane, not only sprite plane.
11237 */
11238 if (plane->id != PLANE_CURSOR &&
11239 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11240 IS_IVYBRIDGE(dev_priv)) &&
11241 (turn_on || (!needs_scaling(old_plane_state) &&
11242 needs_scaling(to_intel_plane_state(plane_state)))))
11243 pipe_config->disable_lp_wm = true;
11244
11245 return 0;
11246 }
11247
11248 static bool encoders_cloneable(const struct intel_encoder *a,
11249 const struct intel_encoder *b)
11250 {
11251 /* masks could be asymmetric, so check both ways */
11252 return a == b || (a->cloneable & (1 << b->type) &&
11253 b->cloneable & (1 << a->type));
11254 }
11255
11256 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11257 struct intel_crtc *crtc,
11258 struct intel_encoder *encoder)
11259 {
11260 struct intel_encoder *source_encoder;
11261 struct drm_connector *connector;
11262 struct drm_connector_state *connector_state;
11263 int i;
11264
11265 for_each_new_connector_in_state(state, connector, connector_state, i) {
11266 if (connector_state->crtc != &crtc->base)
11267 continue;
11268
11269 source_encoder =
11270 to_intel_encoder(connector_state->best_encoder);
11271 if (!encoders_cloneable(encoder, source_encoder))
11272 return false;
11273 }
11274
11275 return true;
11276 }
11277
11278 static int icl_add_linked_planes(struct intel_atomic_state *state)
11279 {
11280 struct intel_plane *plane, *linked;
11281 struct intel_plane_state *plane_state, *linked_plane_state;
11282 int i;
11283
11284 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11285 linked = plane_state->linked_plane;
11286
11287 if (!linked)
11288 continue;
11289
11290 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11291 if (IS_ERR(linked_plane_state))
11292 return PTR_ERR(linked_plane_state);
11293
11294 WARN_ON(linked_plane_state->linked_plane != plane);
11295 WARN_ON(linked_plane_state->slave == plane_state->slave);
11296 }
11297
11298 return 0;
11299 }
11300
11301 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11302 {
11303 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11305 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11306 struct intel_plane *plane, *linked;
11307 struct intel_plane_state *plane_state;
11308 int i;
11309
11310 if (INTEL_GEN(dev_priv) < 11)
11311 return 0;
11312
11313 /*
11314 * Destroy all old plane links and make the slave plane invisible
11315 * in the crtc_state->active_planes mask.
11316 */
11317 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11318 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11319 continue;
11320
11321 plane_state->linked_plane = NULL;
11322 if (plane_state->slave && !plane_state->base.visible) {
11323 crtc_state->active_planes &= ~BIT(plane->id);
11324 crtc_state->update_planes |= BIT(plane->id);
11325 }
11326
11327 plane_state->slave = false;
11328 }
11329
11330 if (!crtc_state->nv12_planes)
11331 return 0;
11332
11333 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11334 struct intel_plane_state *linked_state = NULL;
11335
11336 if (plane->pipe != crtc->pipe ||
11337 !(crtc_state->nv12_planes & BIT(plane->id)))
11338 continue;
11339
11340 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11341 if (!icl_is_nv12_y_plane(linked->id))
11342 continue;
11343
11344 if (crtc_state->active_planes & BIT(linked->id))
11345 continue;
11346
11347 linked_state = intel_atomic_get_plane_state(state, linked);
11348 if (IS_ERR(linked_state))
11349 return PTR_ERR(linked_state);
11350
11351 break;
11352 }
11353
11354 if (!linked_state) {
11355 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11356 hweight8(crtc_state->nv12_planes));
11357
11358 return -EINVAL;
11359 }
11360
11361 plane_state->linked_plane = linked;
11362
11363 linked_state->slave = true;
11364 linked_state->linked_plane = plane;
11365 crtc_state->active_planes |= BIT(linked->id);
11366 crtc_state->update_planes |= BIT(linked->id);
11367 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11368 }
11369
11370 return 0;
11371 }
11372
11373 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11374 struct drm_crtc_state *crtc_state)
11375 {
11376 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11378 struct intel_crtc_state *pipe_config =
11379 to_intel_crtc_state(crtc_state);
11380 int ret;
11381 bool mode_changed = needs_modeset(crtc_state);
11382
11383 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11384 mode_changed && !crtc_state->active)
11385 pipe_config->update_wm_post = true;
11386
11387 if (mode_changed && crtc_state->enable &&
11388 dev_priv->display.crtc_compute_clock &&
11389 !WARN_ON(pipe_config->shared_dpll)) {
11390 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11391 pipe_config);
11392 if (ret)
11393 return ret;
11394 }
11395
11396 if (mode_changed || pipe_config->update_pipe ||
11397 crtc_state->color_mgmt_changed) {
11398 ret = intel_color_check(pipe_config);
11399 if (ret)
11400 return ret;
11401 }
11402
11403 ret = 0;
11404 if (dev_priv->display.compute_pipe_wm) {
11405 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11406 if (ret) {
11407 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11408 return ret;
11409 }
11410 }
11411
11412 if (dev_priv->display.compute_intermediate_wm) {
11413 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11414 return 0;
11415
11416 /*
11417 * Calculate 'intermediate' watermarks that satisfy both the
11418 * old state and the new state. We can program these
11419 * immediately.
11420 */
11421 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11422 if (ret) {
11423 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11424 return ret;
11425 }
11426 }
11427
11428 if (INTEL_GEN(dev_priv) >= 9) {
11429 if (mode_changed || pipe_config->update_pipe)
11430 ret = skl_update_scaler_crtc(pipe_config);
11431
11432 if (!ret)
11433 ret = icl_check_nv12_planes(pipe_config);
11434 if (!ret)
11435 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11436 pipe_config);
11437 if (!ret)
11438 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11439 pipe_config);
11440 }
11441
11442 if (HAS_IPS(dev_priv))
11443 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11444
11445 return ret;
11446 }
11447
11448 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11449 .atomic_check = intel_crtc_atomic_check,
11450 };
11451
11452 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11453 {
11454 struct intel_connector *connector;
11455 struct drm_connector_list_iter conn_iter;
11456
11457 drm_connector_list_iter_begin(dev, &conn_iter);
11458 for_each_intel_connector_iter(connector, &conn_iter) {
11459 if (connector->base.state->crtc)
11460 drm_connector_put(&connector->base);
11461
11462 if (connector->base.encoder) {
11463 connector->base.state->best_encoder =
11464 connector->base.encoder;
11465 connector->base.state->crtc =
11466 connector->base.encoder->crtc;
11467
11468 drm_connector_get(&connector->base);
11469 } else {
11470 connector->base.state->best_encoder = NULL;
11471 connector->base.state->crtc = NULL;
11472 }
11473 }
11474 drm_connector_list_iter_end(&conn_iter);
11475 }
11476
11477 static int
11478 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11479 struct intel_crtc_state *pipe_config)
11480 {
11481 struct drm_connector *connector = conn_state->connector;
11482 const struct drm_display_info *info = &connector->display_info;
11483 int bpp;
11484
11485 switch (conn_state->max_bpc) {
11486 case 6 ... 7:
11487 bpp = 6 * 3;
11488 break;
11489 case 8 ... 9:
11490 bpp = 8 * 3;
11491 break;
11492 case 10 ... 11:
11493 bpp = 10 * 3;
11494 break;
11495 case 12:
11496 bpp = 12 * 3;
11497 break;
11498 default:
11499 return -EINVAL;
11500 }
11501
11502 if (bpp < pipe_config->pipe_bpp) {
11503 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11504 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11505 connector->base.id, connector->name,
11506 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11507 pipe_config->pipe_bpp);
11508
11509 pipe_config->pipe_bpp = bpp;
11510 }
11511
11512 return 0;
11513 }
11514
11515 static int
11516 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11517 struct intel_crtc_state *pipe_config)
11518 {
11519 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11520 struct drm_atomic_state *state = pipe_config->base.state;
11521 struct drm_connector *connector;
11522 struct drm_connector_state *connector_state;
11523 int bpp, i;
11524
11525 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11526 IS_CHERRYVIEW(dev_priv)))
11527 bpp = 10*3;
11528 else if (INTEL_GEN(dev_priv) >= 5)
11529 bpp = 12*3;
11530 else
11531 bpp = 8*3;
11532
11533 pipe_config->pipe_bpp = bpp;
11534
11535 /* Clamp display bpp to connector max bpp */
11536 for_each_new_connector_in_state(state, connector, connector_state, i) {
11537 int ret;
11538
11539 if (connector_state->crtc != &crtc->base)
11540 continue;
11541
11542 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11543 if (ret)
11544 return ret;
11545 }
11546
11547 return 0;
11548 }
11549
11550 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11551 {
11552 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11553 "type: 0x%x flags: 0x%x\n",
11554 mode->crtc_clock,
11555 mode->crtc_hdisplay, mode->crtc_hsync_start,
11556 mode->crtc_hsync_end, mode->crtc_htotal,
11557 mode->crtc_vdisplay, mode->crtc_vsync_start,
11558 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11559 }
11560
11561 static inline void
11562 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11563 unsigned int lane_count, struct intel_link_m_n *m_n)
11564 {
11565 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11566 id, lane_count,
11567 m_n->gmch_m, m_n->gmch_n,
11568 m_n->link_m, m_n->link_n, m_n->tu);
11569 }
11570
11571 static void
11572 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11573 const union hdmi_infoframe *frame)
11574 {
11575 if ((drm_debug & DRM_UT_KMS) == 0)
11576 return;
11577
11578 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11579 }
11580
11581 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11582
11583 static const char * const output_type_str[] = {
11584 OUTPUT_TYPE(UNUSED),
11585 OUTPUT_TYPE(ANALOG),
11586 OUTPUT_TYPE(DVO),
11587 OUTPUT_TYPE(SDVO),
11588 OUTPUT_TYPE(LVDS),
11589 OUTPUT_TYPE(TVOUT),
11590 OUTPUT_TYPE(HDMI),
11591 OUTPUT_TYPE(DP),
11592 OUTPUT_TYPE(EDP),
11593 OUTPUT_TYPE(DSI),
11594 OUTPUT_TYPE(DDI),
11595 OUTPUT_TYPE(DP_MST),
11596 };
11597
11598 #undef OUTPUT_TYPE
11599
11600 static void snprintf_output_types(char *buf, size_t len,
11601 unsigned int output_types)
11602 {
11603 char *str = buf;
11604 int i;
11605
11606 str[0] = '\0';
11607
11608 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11609 int r;
11610
11611 if ((output_types & BIT(i)) == 0)
11612 continue;
11613
11614 r = snprintf(str, len, "%s%s",
11615 str != buf ? "," : "", output_type_str[i]);
11616 if (r >= len)
11617 break;
11618 str += r;
11619 len -= r;
11620
11621 output_types &= ~BIT(i);
11622 }
11623
11624 WARN_ON_ONCE(output_types != 0);
11625 }
11626
11627 static const char * const output_format_str[] = {
11628 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11629 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11630 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11631 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11632 };
11633
11634 static const char *output_formats(enum intel_output_format format)
11635 {
11636 if (format >= ARRAY_SIZE(output_format_str))
11637 format = INTEL_OUTPUT_FORMAT_INVALID;
11638 return output_format_str[format];
11639 }
11640
11641 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11642 struct intel_crtc_state *pipe_config,
11643 const char *context)
11644 {
11645 struct drm_device *dev = crtc->base.dev;
11646 struct drm_i915_private *dev_priv = to_i915(dev);
11647 struct drm_plane *plane;
11648 struct intel_plane *intel_plane;
11649 struct intel_plane_state *state;
11650 struct drm_framebuffer *fb;
11651 char buf[64];
11652
11653 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11654 crtc->base.base.id, crtc->base.name, context);
11655
11656 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11657 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11658 buf, pipe_config->output_types);
11659
11660 DRM_DEBUG_KMS("output format: %s\n",
11661 output_formats(pipe_config->output_format));
11662
11663 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11664 transcoder_name(pipe_config->cpu_transcoder),
11665 pipe_config->pipe_bpp, pipe_config->dither);
11666
11667 if (pipe_config->has_pch_encoder)
11668 intel_dump_m_n_config(pipe_config, "fdi",
11669 pipe_config->fdi_lanes,
11670 &pipe_config->fdi_m_n);
11671
11672 if (intel_crtc_has_dp_encoder(pipe_config)) {
11673 intel_dump_m_n_config(pipe_config, "dp m_n",
11674 pipe_config->lane_count, &pipe_config->dp_m_n);
11675 if (pipe_config->has_drrs)
11676 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11677 pipe_config->lane_count,
11678 &pipe_config->dp_m2_n2);
11679 }
11680
11681 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11682 pipe_config->has_audio, pipe_config->has_infoframe);
11683
11684 DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
11685 pipe_config->infoframes.enable);
11686
11687 if (pipe_config->infoframes.enable &
11688 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11689 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11690 if (pipe_config->infoframes.enable &
11691 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11692 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11693 if (pipe_config->infoframes.enable &
11694 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11695 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11696 if (pipe_config->infoframes.enable &
11697 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11698 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11699
11700 DRM_DEBUG_KMS("requested mode:\n");
11701 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11702 DRM_DEBUG_KMS("adjusted mode:\n");
11703 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11704 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11705 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11706 pipe_config->port_clock,
11707 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11708 pipe_config->pixel_rate);
11709
11710 if (INTEL_GEN(dev_priv) >= 9)
11711 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11712 crtc->num_scalers,
11713 pipe_config->scaler_state.scaler_users,
11714 pipe_config->scaler_state.scaler_id);
11715
11716 if (HAS_GMCH(dev_priv))
11717 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11718 pipe_config->gmch_pfit.control,
11719 pipe_config->gmch_pfit.pgm_ratios,
11720 pipe_config->gmch_pfit.lvds_border_bits);
11721 else
11722 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11723 pipe_config->pch_pfit.pos,
11724 pipe_config->pch_pfit.size,
11725 enableddisabled(pipe_config->pch_pfit.enabled));
11726
11727 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11728 pipe_config->ips_enabled, pipe_config->double_wide);
11729
11730 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11731
11732 DRM_DEBUG_KMS("planes on this crtc\n");
11733 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11734 struct drm_format_name_buf format_name;
11735 intel_plane = to_intel_plane(plane);
11736 if (intel_plane->pipe != crtc->pipe)
11737 continue;
11738
11739 state = to_intel_plane_state(plane->state);
11740 fb = state->base.fb;
11741 if (!fb) {
11742 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11743 plane->base.id, plane->name, state->scaler_id);
11744 continue;
11745 }
11746
11747 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11748 plane->base.id, plane->name,
11749 fb->base.id, fb->width, fb->height,
11750 drm_get_format_name(fb->format->format, &format_name));
11751 if (INTEL_GEN(dev_priv) >= 9)
11752 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11753 state->scaler_id,
11754 state->base.src.x1 >> 16,
11755 state->base.src.y1 >> 16,
11756 drm_rect_width(&state->base.src) >> 16,
11757 drm_rect_height(&state->base.src) >> 16,
11758 state->base.dst.x1, state->base.dst.y1,
11759 drm_rect_width(&state->base.dst),
11760 drm_rect_height(&state->base.dst));
11761 }
11762 }
11763
11764 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11765 {
11766 struct drm_device *dev = state->dev;
11767 struct drm_connector *connector;
11768 struct drm_connector_list_iter conn_iter;
11769 unsigned int used_ports = 0;
11770 unsigned int used_mst_ports = 0;
11771 bool ret = true;
11772
11773 /*
11774 * Walk the connector list instead of the encoder
11775 * list to detect the problem on ddi platforms
11776 * where there's just one encoder per digital port.
11777 */
11778 drm_connector_list_iter_begin(dev, &conn_iter);
11779 drm_for_each_connector_iter(connector, &conn_iter) {
11780 struct drm_connector_state *connector_state;
11781 struct intel_encoder *encoder;
11782
11783 connector_state = drm_atomic_get_new_connector_state(state, connector);
11784 if (!connector_state)
11785 connector_state = connector->state;
11786
11787 if (!connector_state->best_encoder)
11788 continue;
11789
11790 encoder = to_intel_encoder(connector_state->best_encoder);
11791
11792 WARN_ON(!connector_state->crtc);
11793
11794 switch (encoder->type) {
11795 unsigned int port_mask;
11796 case INTEL_OUTPUT_DDI:
11797 if (WARN_ON(!HAS_DDI(to_i915(dev))))
11798 break;
11799 /* else: fall through */
11800 case INTEL_OUTPUT_DP:
11801 case INTEL_OUTPUT_HDMI:
11802 case INTEL_OUTPUT_EDP:
11803 port_mask = 1 << encoder->port;
11804
11805 /* the same port mustn't appear more than once */
11806 if (used_ports & port_mask)
11807 ret = false;
11808
11809 used_ports |= port_mask;
11810 break;
11811 case INTEL_OUTPUT_DP_MST:
11812 used_mst_ports |=
11813 1 << encoder->port;
11814 break;
11815 default:
11816 break;
11817 }
11818 }
11819 drm_connector_list_iter_end(&conn_iter);
11820
11821 /* can't mix MST and SST/HDMI on the same port */
11822 if (used_ports & used_mst_ports)
11823 return false;
11824
11825 return ret;
11826 }
11827
11828 static int
11829 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11830 {
11831 struct drm_i915_private *dev_priv =
11832 to_i915(crtc_state->base.crtc->dev);
11833 struct intel_crtc_state *saved_state;
11834
11835 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11836 if (!saved_state)
11837 return -ENOMEM;
11838
11839 /* FIXME: before the switch to atomic started, a new pipe_config was
11840 * kzalloc'd. Code that depends on any field being zero should be
11841 * fixed, so that the crtc_state can be safely duplicated. For now,
11842 * only fields that are know to not cause problems are preserved. */
11843
11844 saved_state->scaler_state = crtc_state->scaler_state;
11845 saved_state->shared_dpll = crtc_state->shared_dpll;
11846 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
11847 saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
11848 saved_state->crc_enabled = crtc_state->crc_enabled;
11849 if (IS_G4X(dev_priv) ||
11850 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11851 saved_state->wm = crtc_state->wm;
11852
11853 /* Keep base drm_crtc_state intact, only clear our extended struct */
11854 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11855 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
11856 sizeof(*crtc_state) - sizeof(crtc_state->base));
11857
11858 kfree(saved_state);
11859 return 0;
11860 }
11861
11862 static int
11863 intel_modeset_pipe_config(struct drm_crtc *crtc,
11864 struct intel_crtc_state *pipe_config)
11865 {
11866 struct drm_atomic_state *state = pipe_config->base.state;
11867 struct intel_encoder *encoder;
11868 struct drm_connector *connector;
11869 struct drm_connector_state *connector_state;
11870 int base_bpp, ret;
11871 int i;
11872 bool retry = true;
11873
11874 ret = clear_intel_crtc_state(pipe_config);
11875 if (ret)
11876 return ret;
11877
11878 pipe_config->cpu_transcoder =
11879 (enum transcoder) to_intel_crtc(crtc)->pipe;
11880
11881 /*
11882 * Sanitize sync polarity flags based on requested ones. If neither
11883 * positive or negative polarity is requested, treat this as meaning
11884 * negative polarity.
11885 */
11886 if (!(pipe_config->base.adjusted_mode.flags &
11887 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11888 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11889
11890 if (!(pipe_config->base.adjusted_mode.flags &
11891 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11892 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11893
11894 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11895 pipe_config);
11896 if (ret)
11897 return ret;
11898
11899 base_bpp = pipe_config->pipe_bpp;
11900
11901 /*
11902 * Determine the real pipe dimensions. Note that stereo modes can
11903 * increase the actual pipe size due to the frame doubling and
11904 * insertion of additional space for blanks between the frame. This
11905 * is stored in the crtc timings. We use the requested mode to do this
11906 * computation to clearly distinguish it from the adjusted mode, which
11907 * can be changed by the connectors in the below retry loop.
11908 */
11909 drm_mode_get_hv_timing(&pipe_config->base.mode,
11910 &pipe_config->pipe_src_w,
11911 &pipe_config->pipe_src_h);
11912
11913 for_each_new_connector_in_state(state, connector, connector_state, i) {
11914 if (connector_state->crtc != crtc)
11915 continue;
11916
11917 encoder = to_intel_encoder(connector_state->best_encoder);
11918
11919 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11920 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11921 return -EINVAL;
11922 }
11923
11924 /*
11925 * Determine output_types before calling the .compute_config()
11926 * hooks so that the hooks can use this information safely.
11927 */
11928 if (encoder->compute_output_type)
11929 pipe_config->output_types |=
11930 BIT(encoder->compute_output_type(encoder, pipe_config,
11931 connector_state));
11932 else
11933 pipe_config->output_types |= BIT(encoder->type);
11934 }
11935
11936 encoder_retry:
11937 /* Ensure the port clock defaults are reset when retrying. */
11938 pipe_config->port_clock = 0;
11939 pipe_config->pixel_multiplier = 1;
11940
11941 /* Fill in default crtc timings, allow encoders to overwrite them. */
11942 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11943 CRTC_STEREO_DOUBLE);
11944
11945 /* Pass our mode to the connectors and the CRTC to give them a chance to
11946 * adjust it according to limitations or connector properties, and also
11947 * a chance to reject the mode entirely.
11948 */
11949 for_each_new_connector_in_state(state, connector, connector_state, i) {
11950 if (connector_state->crtc != crtc)
11951 continue;
11952
11953 encoder = to_intel_encoder(connector_state->best_encoder);
11954 ret = encoder->compute_config(encoder, pipe_config,
11955 connector_state);
11956 if (ret < 0) {
11957 if (ret != -EDEADLK)
11958 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11959 ret);
11960 return ret;
11961 }
11962 }
11963
11964 /* Set default port clock if not overwritten by the encoder. Needs to be
11965 * done afterwards in case the encoder adjusts the mode. */
11966 if (!pipe_config->port_clock)
11967 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11968 * pipe_config->pixel_multiplier;
11969
11970 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11971 if (ret == -EDEADLK)
11972 return ret;
11973 if (ret < 0) {
11974 DRM_DEBUG_KMS("CRTC fixup failed\n");
11975 return ret;
11976 }
11977
11978 if (ret == RETRY) {
11979 if (WARN(!retry, "loop in pipe configuration computation\n"))
11980 return -EINVAL;
11981
11982 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11983 retry = false;
11984 goto encoder_retry;
11985 }
11986
11987 /* Dithering seems to not pass-through bits correctly when it should, so
11988 * only enable it on 6bpc panels and when its not a compliance
11989 * test requesting 6bpc video pattern.
11990 */
11991 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11992 !pipe_config->dither_force_disable;
11993 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11994 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11995
11996 return 0;
11997 }
11998
11999 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12000 {
12001 int diff;
12002
12003 if (clock1 == clock2)
12004 return true;
12005
12006 if (!clock1 || !clock2)
12007 return false;
12008
12009 diff = abs(clock1 - clock2);
12010
12011 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12012 return true;
12013
12014 return false;
12015 }
12016
12017 static bool
12018 intel_compare_m_n(unsigned int m, unsigned int n,
12019 unsigned int m2, unsigned int n2,
12020 bool exact)
12021 {
12022 if (m == m2 && n == n2)
12023 return true;
12024
12025 if (exact || !m || !n || !m2 || !n2)
12026 return false;
12027
12028 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12029
12030 if (n > n2) {
12031 while (n > n2) {
12032 m2 <<= 1;
12033 n2 <<= 1;
12034 }
12035 } else if (n < n2) {
12036 while (n < n2) {
12037 m <<= 1;
12038 n <<= 1;
12039 }
12040 }
12041
12042 if (n != n2)
12043 return false;
12044
12045 return intel_fuzzy_clock_check(m, m2);
12046 }
12047
12048 static bool
12049 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12050 struct intel_link_m_n *m2_n2,
12051 bool adjust)
12052 {
12053 if (m_n->tu == m2_n2->tu &&
12054 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12055 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12056 intel_compare_m_n(m_n->link_m, m_n->link_n,
12057 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12058 if (adjust)
12059 *m2_n2 = *m_n;
12060
12061 return true;
12062 }
12063
12064 return false;
12065 }
12066
12067 static bool
12068 intel_compare_infoframe(const union hdmi_infoframe *a,
12069 const union hdmi_infoframe *b)
12070 {
12071 return memcmp(a, b, sizeof(*a)) == 0;
12072 }
12073
12074 static void
12075 pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
12076 bool adjust, const char *name,
12077 const union hdmi_infoframe *a,
12078 const union hdmi_infoframe *b)
12079 {
12080 if (adjust) {
12081 if ((drm_debug & DRM_UT_KMS) == 0)
12082 return;
12083
12084 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12085 drm_dbg(DRM_UT_KMS, "expected:");
12086 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12087 drm_dbg(DRM_UT_KMS, "found");
12088 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12089 } else {
12090 drm_err("mismatch in %s infoframe", name);
12091 drm_err("expected:");
12092 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12093 drm_err("found");
12094 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12095 }
12096 }
12097
12098 static void __printf(3, 4)
12099 pipe_config_err(bool adjust, const char *name, const char *format, ...)
12100 {
12101 struct va_format vaf;
12102 va_list args;
12103
12104 va_start(args, format);
12105 vaf.fmt = format;
12106 vaf.va = &args;
12107
12108 if (adjust)
12109 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12110 else
12111 drm_err("mismatch in %s %pV", name, &vaf);
12112
12113 va_end(args);
12114 }
12115
12116 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12117 {
12118 if (i915_modparams.fastboot != -1)
12119 return i915_modparams.fastboot;
12120
12121 /* Enable fastboot by default on Skylake and newer */
12122 if (INTEL_GEN(dev_priv) >= 9)
12123 return true;
12124
12125 /* Enable fastboot by default on VLV and CHV */
12126 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12127 return true;
12128
12129 /* Disabled by default on all others */
12130 return false;
12131 }
12132
12133 static bool
12134 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
12135 struct intel_crtc_state *current_config,
12136 struct intel_crtc_state *pipe_config,
12137 bool adjust)
12138 {
12139 bool ret = true;
12140 bool fixup_inherited = adjust &&
12141 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12142 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12143
12144 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12145 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12146 ret = false;
12147 }
12148
12149 #define PIPE_CONF_CHECK_X(name) do { \
12150 if (current_config->name != pipe_config->name) { \
12151 pipe_config_err(adjust, __stringify(name), \
12152 "(expected 0x%08x, found 0x%08x)\n", \
12153 current_config->name, \
12154 pipe_config->name); \
12155 ret = false; \
12156 } \
12157 } while (0)
12158
12159 #define PIPE_CONF_CHECK_I(name) do { \
12160 if (current_config->name != pipe_config->name) { \
12161 pipe_config_err(adjust, __stringify(name), \
12162 "(expected %i, found %i)\n", \
12163 current_config->name, \
12164 pipe_config->name); \
12165 ret = false; \
12166 } \
12167 } while (0)
12168
12169 #define PIPE_CONF_CHECK_BOOL(name) do { \
12170 if (current_config->name != pipe_config->name) { \
12171 pipe_config_err(adjust, __stringify(name), \
12172 "(expected %s, found %s)\n", \
12173 yesno(current_config->name), \
12174 yesno(pipe_config->name)); \
12175 ret = false; \
12176 } \
12177 } while (0)
12178
12179 /*
12180 * Checks state where we only read out the enabling, but not the entire
12181 * state itself (like full infoframes or ELD for audio). These states
12182 * require a full modeset on bootup to fix up.
12183 */
12184 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12185 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12186 PIPE_CONF_CHECK_BOOL(name); \
12187 } else { \
12188 pipe_config_err(adjust, __stringify(name), \
12189 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12190 yesno(current_config->name), \
12191 yesno(pipe_config->name)); \
12192 ret = false; \
12193 } \
12194 } while (0)
12195
12196 #define PIPE_CONF_CHECK_P(name) do { \
12197 if (current_config->name != pipe_config->name) { \
12198 pipe_config_err(adjust, __stringify(name), \
12199 "(expected %p, found %p)\n", \
12200 current_config->name, \
12201 pipe_config->name); \
12202 ret = false; \
12203 } \
12204 } while (0)
12205
12206 #define PIPE_CONF_CHECK_M_N(name) do { \
12207 if (!intel_compare_link_m_n(&current_config->name, \
12208 &pipe_config->name,\
12209 adjust)) { \
12210 pipe_config_err(adjust, __stringify(name), \
12211 "(expected tu %i gmch %i/%i link %i/%i, " \
12212 "found tu %i, gmch %i/%i link %i/%i)\n", \
12213 current_config->name.tu, \
12214 current_config->name.gmch_m, \
12215 current_config->name.gmch_n, \
12216 current_config->name.link_m, \
12217 current_config->name.link_n, \
12218 pipe_config->name.tu, \
12219 pipe_config->name.gmch_m, \
12220 pipe_config->name.gmch_n, \
12221 pipe_config->name.link_m, \
12222 pipe_config->name.link_n); \
12223 ret = false; \
12224 } \
12225 } while (0)
12226
12227 /* This is required for BDW+ where there is only one set of registers for
12228 * switching between high and low RR.
12229 * This macro can be used whenever a comparison has to be made between one
12230 * hw state and multiple sw state variables.
12231 */
12232 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12233 if (!intel_compare_link_m_n(&current_config->name, \
12234 &pipe_config->name, adjust) && \
12235 !intel_compare_link_m_n(&current_config->alt_name, \
12236 &pipe_config->name, adjust)) { \
12237 pipe_config_err(adjust, __stringify(name), \
12238 "(expected tu %i gmch %i/%i link %i/%i, " \
12239 "or tu %i gmch %i/%i link %i/%i, " \
12240 "found tu %i, gmch %i/%i link %i/%i)\n", \
12241 current_config->name.tu, \
12242 current_config->name.gmch_m, \
12243 current_config->name.gmch_n, \
12244 current_config->name.link_m, \
12245 current_config->name.link_n, \
12246 current_config->alt_name.tu, \
12247 current_config->alt_name.gmch_m, \
12248 current_config->alt_name.gmch_n, \
12249 current_config->alt_name.link_m, \
12250 current_config->alt_name.link_n, \
12251 pipe_config->name.tu, \
12252 pipe_config->name.gmch_m, \
12253 pipe_config->name.gmch_n, \
12254 pipe_config->name.link_m, \
12255 pipe_config->name.link_n); \
12256 ret = false; \
12257 } \
12258 } while (0)
12259
12260 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12261 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12262 pipe_config_err(adjust, __stringify(name), \
12263 "(%x) (expected %i, found %i)\n", \
12264 (mask), \
12265 current_config->name & (mask), \
12266 pipe_config->name & (mask)); \
12267 ret = false; \
12268 } \
12269 } while (0)
12270
12271 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12272 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12273 pipe_config_err(adjust, __stringify(name), \
12274 "(expected %i, found %i)\n", \
12275 current_config->name, \
12276 pipe_config->name); \
12277 ret = false; \
12278 } \
12279 } while (0)
12280
12281 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12282 if (!intel_compare_infoframe(&current_config->infoframes.name, \
12283 &pipe_config->infoframes.name)) { \
12284 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12285 &current_config->infoframes.name, \
12286 &pipe_config->infoframes.name); \
12287 ret = false; \
12288 } \
12289 } while (0)
12290
12291 #define PIPE_CONF_QUIRK(quirk) \
12292 ((current_config->quirks | pipe_config->quirks) & (quirk))
12293
12294 PIPE_CONF_CHECK_I(cpu_transcoder);
12295
12296 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12297 PIPE_CONF_CHECK_I(fdi_lanes);
12298 PIPE_CONF_CHECK_M_N(fdi_m_n);
12299
12300 PIPE_CONF_CHECK_I(lane_count);
12301 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12302
12303 if (INTEL_GEN(dev_priv) < 8) {
12304 PIPE_CONF_CHECK_M_N(dp_m_n);
12305
12306 if (current_config->has_drrs)
12307 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12308 } else
12309 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12310
12311 PIPE_CONF_CHECK_X(output_types);
12312
12313 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12314 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12315 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12316 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12317 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12318 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12319
12320 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12321 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12322 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12323 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12324 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12325 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12326
12327 PIPE_CONF_CHECK_I(pixel_multiplier);
12328 PIPE_CONF_CHECK_I(output_format);
12329 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12330 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12331 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12332 PIPE_CONF_CHECK_BOOL(limited_color_range);
12333
12334 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12335 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12336 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12337
12338 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12339
12340 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12341 DRM_MODE_FLAG_INTERLACE);
12342
12343 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12344 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12345 DRM_MODE_FLAG_PHSYNC);
12346 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12347 DRM_MODE_FLAG_NHSYNC);
12348 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12349 DRM_MODE_FLAG_PVSYNC);
12350 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12351 DRM_MODE_FLAG_NVSYNC);
12352 }
12353
12354 PIPE_CONF_CHECK_X(gmch_pfit.control);
12355 /* pfit ratios are autocomputed by the hw on gen4+ */
12356 if (INTEL_GEN(dev_priv) < 4)
12357 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12358 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12359
12360 if (!adjust) {
12361 PIPE_CONF_CHECK_I(pipe_src_w);
12362 PIPE_CONF_CHECK_I(pipe_src_h);
12363
12364 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12365 if (current_config->pch_pfit.enabled) {
12366 PIPE_CONF_CHECK_X(pch_pfit.pos);
12367 PIPE_CONF_CHECK_X(pch_pfit.size);
12368 }
12369
12370 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12371 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12372
12373 PIPE_CONF_CHECK_X(gamma_mode);
12374 if (IS_CHERRYVIEW(dev_priv))
12375 PIPE_CONF_CHECK_X(cgm_mode);
12376 else
12377 PIPE_CONF_CHECK_X(csc_mode);
12378 PIPE_CONF_CHECK_BOOL(gamma_enable);
12379 PIPE_CONF_CHECK_BOOL(csc_enable);
12380 }
12381
12382 PIPE_CONF_CHECK_BOOL(double_wide);
12383
12384 PIPE_CONF_CHECK_P(shared_dpll);
12385 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12386 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12387 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12388 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12389 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12390 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12391 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12392 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12393 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12394 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12395 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12396 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12397 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12398 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12399 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12400 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12401 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12402 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12403 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12404 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12405 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12406 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12407 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12408 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12409 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12410 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12411 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12412 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12413 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12414 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12415 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12416
12417 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12418 PIPE_CONF_CHECK_X(dsi_pll.div);
12419
12420 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12421 PIPE_CONF_CHECK_I(pipe_bpp);
12422
12423 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12424 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12425
12426 PIPE_CONF_CHECK_I(min_voltage_level);
12427
12428 PIPE_CONF_CHECK_X(infoframes.enable);
12429 PIPE_CONF_CHECK_X(infoframes.gcp);
12430 PIPE_CONF_CHECK_INFOFRAME(avi);
12431 PIPE_CONF_CHECK_INFOFRAME(spd);
12432 PIPE_CONF_CHECK_INFOFRAME(hdmi);
12433
12434 #undef PIPE_CONF_CHECK_X
12435 #undef PIPE_CONF_CHECK_I
12436 #undef PIPE_CONF_CHECK_BOOL
12437 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12438 #undef PIPE_CONF_CHECK_P
12439 #undef PIPE_CONF_CHECK_FLAGS
12440 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12441 #undef PIPE_CONF_QUIRK
12442
12443 return ret;
12444 }
12445
12446 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12447 const struct intel_crtc_state *pipe_config)
12448 {
12449 if (pipe_config->has_pch_encoder) {
12450 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12451 &pipe_config->fdi_m_n);
12452 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12453
12454 /*
12455 * FDI already provided one idea for the dotclock.
12456 * Yell if the encoder disagrees.
12457 */
12458 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12459 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12460 fdi_dotclock, dotclock);
12461 }
12462 }
12463
12464 static void verify_wm_state(struct drm_crtc *crtc,
12465 struct drm_crtc_state *new_state)
12466 {
12467 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12468 struct skl_hw_state {
12469 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12470 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12471 struct skl_ddb_allocation ddb;
12472 struct skl_pipe_wm wm;
12473 } *hw;
12474 struct skl_ddb_allocation *sw_ddb;
12475 struct skl_pipe_wm *sw_wm;
12476 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12478 const enum pipe pipe = intel_crtc->pipe;
12479 int plane, level, max_level = ilk_wm_max_level(dev_priv);
12480
12481 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12482 return;
12483
12484 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12485 if (!hw)
12486 return;
12487
12488 skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12489 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12490
12491 skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12492
12493 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12494 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12495
12496 if (INTEL_GEN(dev_priv) >= 11 &&
12497 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12498 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12499 sw_ddb->enabled_slices,
12500 hw->ddb.enabled_slices);
12501
12502 /* planes */
12503 for_each_universal_plane(dev_priv, pipe, plane) {
12504 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12505
12506 hw_plane_wm = &hw->wm.planes[plane];
12507 sw_plane_wm = &sw_wm->planes[plane];
12508
12509 /* Watermarks */
12510 for (level = 0; level <= max_level; level++) {
12511 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12512 &sw_plane_wm->wm[level]))
12513 continue;
12514
12515 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12516 pipe_name(pipe), plane + 1, level,
12517 sw_plane_wm->wm[level].plane_en,
12518 sw_plane_wm->wm[level].plane_res_b,
12519 sw_plane_wm->wm[level].plane_res_l,
12520 hw_plane_wm->wm[level].plane_en,
12521 hw_plane_wm->wm[level].plane_res_b,
12522 hw_plane_wm->wm[level].plane_res_l);
12523 }
12524
12525 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12526 &sw_plane_wm->trans_wm)) {
12527 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12528 pipe_name(pipe), plane + 1,
12529 sw_plane_wm->trans_wm.plane_en,
12530 sw_plane_wm->trans_wm.plane_res_b,
12531 sw_plane_wm->trans_wm.plane_res_l,
12532 hw_plane_wm->trans_wm.plane_en,
12533 hw_plane_wm->trans_wm.plane_res_b,
12534 hw_plane_wm->trans_wm.plane_res_l);
12535 }
12536
12537 /* DDB */
12538 hw_ddb_entry = &hw->ddb_y[plane];
12539 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12540
12541 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12542 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12543 pipe_name(pipe), plane + 1,
12544 sw_ddb_entry->start, sw_ddb_entry->end,
12545 hw_ddb_entry->start, hw_ddb_entry->end);
12546 }
12547 }
12548
12549 /*
12550 * cursor
12551 * If the cursor plane isn't active, we may not have updated it's ddb
12552 * allocation. In that case since the ddb allocation will be updated
12553 * once the plane becomes visible, we can skip this check
12554 */
12555 if (1) {
12556 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12557
12558 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12559 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12560
12561 /* Watermarks */
12562 for (level = 0; level <= max_level; level++) {
12563 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12564 &sw_plane_wm->wm[level]))
12565 continue;
12566
12567 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12568 pipe_name(pipe), level,
12569 sw_plane_wm->wm[level].plane_en,
12570 sw_plane_wm->wm[level].plane_res_b,
12571 sw_plane_wm->wm[level].plane_res_l,
12572 hw_plane_wm->wm[level].plane_en,
12573 hw_plane_wm->wm[level].plane_res_b,
12574 hw_plane_wm->wm[level].plane_res_l);
12575 }
12576
12577 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12578 &sw_plane_wm->trans_wm)) {
12579 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12580 pipe_name(pipe),
12581 sw_plane_wm->trans_wm.plane_en,
12582 sw_plane_wm->trans_wm.plane_res_b,
12583 sw_plane_wm->trans_wm.plane_res_l,
12584 hw_plane_wm->trans_wm.plane_en,
12585 hw_plane_wm->trans_wm.plane_res_b,
12586 hw_plane_wm->trans_wm.plane_res_l);
12587 }
12588
12589 /* DDB */
12590 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12591 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12592
12593 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12594 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12595 pipe_name(pipe),
12596 sw_ddb_entry->start, sw_ddb_entry->end,
12597 hw_ddb_entry->start, hw_ddb_entry->end);
12598 }
12599 }
12600
12601 kfree(hw);
12602 }
12603
12604 static void
12605 verify_connector_state(struct drm_device *dev,
12606 struct drm_atomic_state *state,
12607 struct drm_crtc *crtc)
12608 {
12609 struct drm_connector *connector;
12610 struct drm_connector_state *new_conn_state;
12611 int i;
12612
12613 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12614 struct drm_encoder *encoder = connector->encoder;
12615 struct drm_crtc_state *crtc_state = NULL;
12616
12617 if (new_conn_state->crtc != crtc)
12618 continue;
12619
12620 if (crtc)
12621 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12622
12623 intel_connector_verify_state(crtc_state, new_conn_state);
12624
12625 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12626 "connector's atomic encoder doesn't match legacy encoder\n");
12627 }
12628 }
12629
12630 static void
12631 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12632 {
12633 struct intel_encoder *encoder;
12634 struct drm_connector *connector;
12635 struct drm_connector_state *old_conn_state, *new_conn_state;
12636 int i;
12637
12638 for_each_intel_encoder(dev, encoder) {
12639 bool enabled = false, found = false;
12640 enum pipe pipe;
12641
12642 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12643 encoder->base.base.id,
12644 encoder->base.name);
12645
12646 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12647 new_conn_state, i) {
12648 if (old_conn_state->best_encoder == &encoder->base)
12649 found = true;
12650
12651 if (new_conn_state->best_encoder != &encoder->base)
12652 continue;
12653 found = enabled = true;
12654
12655 I915_STATE_WARN(new_conn_state->crtc !=
12656 encoder->base.crtc,
12657 "connector's crtc doesn't match encoder crtc\n");
12658 }
12659
12660 if (!found)
12661 continue;
12662
12663 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12664 "encoder's enabled state mismatch "
12665 "(expected %i, found %i)\n",
12666 !!encoder->base.crtc, enabled);
12667
12668 if (!encoder->base.crtc) {
12669 bool active;
12670
12671 active = encoder->get_hw_state(encoder, &pipe);
12672 I915_STATE_WARN(active,
12673 "encoder detached but still enabled on pipe %c.\n",
12674 pipe_name(pipe));
12675 }
12676 }
12677 }
12678
12679 static void
12680 verify_crtc_state(struct drm_crtc *crtc,
12681 struct drm_crtc_state *old_crtc_state,
12682 struct drm_crtc_state *new_crtc_state)
12683 {
12684 struct drm_device *dev = crtc->dev;
12685 struct drm_i915_private *dev_priv = to_i915(dev);
12686 struct intel_encoder *encoder;
12687 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12688 struct intel_crtc_state *pipe_config, *sw_config;
12689 struct drm_atomic_state *old_state;
12690 bool active;
12691
12692 old_state = old_crtc_state->state;
12693 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12694 pipe_config = to_intel_crtc_state(old_crtc_state);
12695 memset(pipe_config, 0, sizeof(*pipe_config));
12696 pipe_config->base.crtc = crtc;
12697 pipe_config->base.state = old_state;
12698
12699 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12700
12701 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12702
12703 /* we keep both pipes enabled on 830 */
12704 if (IS_I830(dev_priv))
12705 active = new_crtc_state->active;
12706
12707 I915_STATE_WARN(new_crtc_state->active != active,
12708 "crtc active state doesn't match with hw state "
12709 "(expected %i, found %i)\n", new_crtc_state->active, active);
12710
12711 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12712 "transitional active state does not match atomic hw state "
12713 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12714
12715 for_each_encoder_on_crtc(dev, crtc, encoder) {
12716 enum pipe pipe;
12717
12718 active = encoder->get_hw_state(encoder, &pipe);
12719 I915_STATE_WARN(active != new_crtc_state->active,
12720 "[ENCODER:%i] active %i with crtc active %i\n",
12721 encoder->base.base.id, active, new_crtc_state->active);
12722
12723 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12724 "Encoder connected to wrong pipe %c\n",
12725 pipe_name(pipe));
12726
12727 if (active)
12728 encoder->get_config(encoder, pipe_config);
12729 }
12730
12731 intel_crtc_compute_pixel_rate(pipe_config);
12732
12733 if (!new_crtc_state->active)
12734 return;
12735
12736 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12737
12738 sw_config = to_intel_crtc_state(new_crtc_state);
12739 if (!intel_pipe_config_compare(dev_priv, sw_config,
12740 pipe_config, false)) {
12741 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12742 intel_dump_pipe_config(intel_crtc, pipe_config,
12743 "[hw state]");
12744 intel_dump_pipe_config(intel_crtc, sw_config,
12745 "[sw state]");
12746 }
12747 }
12748
12749 static void
12750 intel_verify_planes(struct intel_atomic_state *state)
12751 {
12752 struct intel_plane *plane;
12753 const struct intel_plane_state *plane_state;
12754 int i;
12755
12756 for_each_new_intel_plane_in_state(state, plane,
12757 plane_state, i)
12758 assert_plane(plane, plane_state->slave ||
12759 plane_state->base.visible);
12760 }
12761
12762 static void
12763 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12764 struct intel_shared_dpll *pll,
12765 struct drm_crtc *crtc,
12766 struct drm_crtc_state *new_state)
12767 {
12768 struct intel_dpll_hw_state dpll_hw_state;
12769 unsigned int crtc_mask;
12770 bool active;
12771
12772 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12773
12774 DRM_DEBUG_KMS("%s\n", pll->info->name);
12775
12776 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12777
12778 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12779 I915_STATE_WARN(!pll->on && pll->active_mask,
12780 "pll in active use but not on in sw tracking\n");
12781 I915_STATE_WARN(pll->on && !pll->active_mask,
12782 "pll is on but not used by any active crtc\n");
12783 I915_STATE_WARN(pll->on != active,
12784 "pll on state mismatch (expected %i, found %i)\n",
12785 pll->on, active);
12786 }
12787
12788 if (!crtc) {
12789 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12790 "more active pll users than references: %x vs %x\n",
12791 pll->active_mask, pll->state.crtc_mask);
12792
12793 return;
12794 }
12795
12796 crtc_mask = drm_crtc_mask(crtc);
12797
12798 if (new_state->active)
12799 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12800 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12801 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12802 else
12803 I915_STATE_WARN(pll->active_mask & crtc_mask,
12804 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12805 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12806
12807 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12808 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12809 crtc_mask, pll->state.crtc_mask);
12810
12811 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12812 &dpll_hw_state,
12813 sizeof(dpll_hw_state)),
12814 "pll hw state mismatch\n");
12815 }
12816
12817 static void
12818 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12819 struct drm_crtc_state *old_crtc_state,
12820 struct drm_crtc_state *new_crtc_state)
12821 {
12822 struct drm_i915_private *dev_priv = to_i915(dev);
12823 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12824 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12825
12826 if (new_state->shared_dpll)
12827 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12828
12829 if (old_state->shared_dpll &&
12830 old_state->shared_dpll != new_state->shared_dpll) {
12831 unsigned int crtc_mask = drm_crtc_mask(crtc);
12832 struct intel_shared_dpll *pll = old_state->shared_dpll;
12833
12834 I915_STATE_WARN(pll->active_mask & crtc_mask,
12835 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12836 pipe_name(drm_crtc_index(crtc)));
12837 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12838 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12839 pipe_name(drm_crtc_index(crtc)));
12840 }
12841 }
12842
12843 static void
12844 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12845 struct drm_atomic_state *state,
12846 struct drm_crtc_state *old_state,
12847 struct drm_crtc_state *new_state)
12848 {
12849 if (!needs_modeset(new_state) &&
12850 !to_intel_crtc_state(new_state)->update_pipe)
12851 return;
12852
12853 verify_wm_state(crtc, new_state);
12854 verify_connector_state(crtc->dev, state, crtc);
12855 verify_crtc_state(crtc, old_state, new_state);
12856 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12857 }
12858
12859 static void
12860 verify_disabled_dpll_state(struct drm_device *dev)
12861 {
12862 struct drm_i915_private *dev_priv = to_i915(dev);
12863 int i;
12864
12865 for (i = 0; i < dev_priv->num_shared_dpll; i++)
12866 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12867 }
12868
12869 static void
12870 intel_modeset_verify_disabled(struct drm_device *dev,
12871 struct drm_atomic_state *state)
12872 {
12873 verify_encoder_state(dev, state);
12874 verify_connector_state(dev, state, NULL);
12875 verify_disabled_dpll_state(dev);
12876 }
12877
12878 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12879 {
12880 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12881 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12882
12883 /*
12884 * The scanline counter increments at the leading edge of hsync.
12885 *
12886 * On most platforms it starts counting from vtotal-1 on the
12887 * first active line. That means the scanline counter value is
12888 * always one less than what we would expect. Ie. just after
12889 * start of vblank, which also occurs at start of hsync (on the
12890 * last active line), the scanline counter will read vblank_start-1.
12891 *
12892 * On gen2 the scanline counter starts counting from 1 instead
12893 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12894 * to keep the value positive), instead of adding one.
12895 *
12896 * On HSW+ the behaviour of the scanline counter depends on the output
12897 * type. For DP ports it behaves like most other platforms, but on HDMI
12898 * there's an extra 1 line difference. So we need to add two instead of
12899 * one to the value.
12900 *
12901 * On VLV/CHV DSI the scanline counter would appear to increment
12902 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12903 * that means we can't tell whether we're in vblank or not while
12904 * we're on that particular line. We must still set scanline_offset
12905 * to 1 so that the vblank timestamps come out correct when we query
12906 * the scanline counter from within the vblank interrupt handler.
12907 * However if queried just before the start of vblank we'll get an
12908 * answer that's slightly in the future.
12909 */
12910 if (IS_GEN(dev_priv, 2)) {
12911 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12912 int vtotal;
12913
12914 vtotal = adjusted_mode->crtc_vtotal;
12915 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12916 vtotal /= 2;
12917
12918 crtc->scanline_offset = vtotal - 1;
12919 } else if (HAS_DDI(dev_priv) &&
12920 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12921 crtc->scanline_offset = 2;
12922 } else
12923 crtc->scanline_offset = 1;
12924 }
12925
12926 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12927 {
12928 struct drm_device *dev = state->dev;
12929 struct drm_i915_private *dev_priv = to_i915(dev);
12930 struct drm_crtc *crtc;
12931 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12932 int i;
12933
12934 if (!dev_priv->display.crtc_compute_clock)
12935 return;
12936
12937 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12938 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12939 struct intel_shared_dpll *old_dpll =
12940 to_intel_crtc_state(old_crtc_state)->shared_dpll;
12941
12942 if (!needs_modeset(new_crtc_state))
12943 continue;
12944
12945 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12946
12947 if (!old_dpll)
12948 continue;
12949
12950 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12951 }
12952 }
12953
12954 /*
12955 * This implements the workaround described in the "notes" section of the mode
12956 * set sequence documentation. When going from no pipes or single pipe to
12957 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12958 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12959 */
12960 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12961 {
12962 struct drm_crtc_state *crtc_state;
12963 struct intel_crtc *intel_crtc;
12964 struct drm_crtc *crtc;
12965 struct intel_crtc_state *first_crtc_state = NULL;
12966 struct intel_crtc_state *other_crtc_state = NULL;
12967 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12968 int i;
12969
12970 /* look at all crtc's that are going to be enabled in during modeset */
12971 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12972 intel_crtc = to_intel_crtc(crtc);
12973
12974 if (!crtc_state->active || !needs_modeset(crtc_state))
12975 continue;
12976
12977 if (first_crtc_state) {
12978 other_crtc_state = to_intel_crtc_state(crtc_state);
12979 break;
12980 } else {
12981 first_crtc_state = to_intel_crtc_state(crtc_state);
12982 first_pipe = intel_crtc->pipe;
12983 }
12984 }
12985
12986 /* No workaround needed? */
12987 if (!first_crtc_state)
12988 return 0;
12989
12990 /* w/a possibly needed, check how many crtc's are already enabled. */
12991 for_each_intel_crtc(state->dev, intel_crtc) {
12992 struct intel_crtc_state *pipe_config;
12993
12994 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12995 if (IS_ERR(pipe_config))
12996 return PTR_ERR(pipe_config);
12997
12998 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12999
13000 if (!pipe_config->base.active ||
13001 needs_modeset(&pipe_config->base))
13002 continue;
13003
13004 /* 2 or more enabled crtcs means no need for w/a */
13005 if (enabled_pipe != INVALID_PIPE)
13006 return 0;
13007
13008 enabled_pipe = intel_crtc->pipe;
13009 }
13010
13011 if (enabled_pipe != INVALID_PIPE)
13012 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13013 else if (other_crtc_state)
13014 other_crtc_state->hsw_workaround_pipe = first_pipe;
13015
13016 return 0;
13017 }
13018
13019 static int intel_lock_all_pipes(struct drm_atomic_state *state)
13020 {
13021 struct drm_crtc *crtc;
13022
13023 /* Add all pipes to the state */
13024 for_each_crtc(state->dev, crtc) {
13025 struct drm_crtc_state *crtc_state;
13026
13027 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13028 if (IS_ERR(crtc_state))
13029 return PTR_ERR(crtc_state);
13030 }
13031
13032 return 0;
13033 }
13034
13035 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13036 {
13037 struct drm_crtc *crtc;
13038
13039 /*
13040 * Add all pipes to the state, and force
13041 * a modeset on all the active ones.
13042 */
13043 for_each_crtc(state->dev, crtc) {
13044 struct drm_crtc_state *crtc_state;
13045 int ret;
13046
13047 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13048 if (IS_ERR(crtc_state))
13049 return PTR_ERR(crtc_state);
13050
13051 if (!crtc_state->active || needs_modeset(crtc_state))
13052 continue;
13053
13054 crtc_state->mode_changed = true;
13055
13056 ret = drm_atomic_add_affected_connectors(state, crtc);
13057 if (ret)
13058 return ret;
13059
13060 ret = drm_atomic_add_affected_planes(state, crtc);
13061 if (ret)
13062 return ret;
13063 }
13064
13065 return 0;
13066 }
13067
13068 static int intel_modeset_checks(struct drm_atomic_state *state)
13069 {
13070 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13071 struct drm_i915_private *dev_priv = to_i915(state->dev);
13072 struct drm_crtc *crtc;
13073 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13074 int ret = 0, i;
13075
13076 if (!check_digital_port_conflicts(state)) {
13077 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13078 return -EINVAL;
13079 }
13080
13081 /* keep the current setting */
13082 if (!intel_state->cdclk.force_min_cdclk_changed)
13083 intel_state->cdclk.force_min_cdclk =
13084 dev_priv->cdclk.force_min_cdclk;
13085
13086 intel_state->modeset = true;
13087 intel_state->active_crtcs = dev_priv->active_crtcs;
13088 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13089 intel_state->cdclk.actual = dev_priv->cdclk.actual;
13090 intel_state->cdclk.pipe = INVALID_PIPE;
13091
13092 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13093 if (new_crtc_state->active)
13094 intel_state->active_crtcs |= 1 << i;
13095 else
13096 intel_state->active_crtcs &= ~(1 << i);
13097
13098 if (old_crtc_state->active != new_crtc_state->active)
13099 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13100 }
13101
13102 /*
13103 * See if the config requires any additional preparation, e.g.
13104 * to adjust global state with pipes off. We need to do this
13105 * here so we can get the modeset_pipe updated config for the new
13106 * mode set on this crtc. For other crtcs we need to use the
13107 * adjusted_mode bits in the crtc directly.
13108 */
13109 if (dev_priv->display.modeset_calc_cdclk) {
13110 enum pipe pipe;
13111
13112 ret = dev_priv->display.modeset_calc_cdclk(state);
13113 if (ret < 0)
13114 return ret;
13115
13116 /*
13117 * Writes to dev_priv->cdclk.logical must protected by
13118 * holding all the crtc locks, even if we don't end up
13119 * touching the hardware
13120 */
13121 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13122 &intel_state->cdclk.logical)) {
13123 ret = intel_lock_all_pipes(state);
13124 if (ret < 0)
13125 return ret;
13126 }
13127
13128 if (is_power_of_2(intel_state->active_crtcs)) {
13129 struct drm_crtc *crtc;
13130 struct drm_crtc_state *crtc_state;
13131
13132 pipe = ilog2(intel_state->active_crtcs);
13133 crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13134 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
13135 if (crtc_state && needs_modeset(crtc_state))
13136 pipe = INVALID_PIPE;
13137 } else {
13138 pipe = INVALID_PIPE;
13139 }
13140
13141 /* All pipes must be switched off while we change the cdclk. */
13142 if (pipe != INVALID_PIPE &&
13143 intel_cdclk_needs_cd2x_update(dev_priv,
13144 &dev_priv->cdclk.actual,
13145 &intel_state->cdclk.actual)) {
13146 ret = intel_lock_all_pipes(state);
13147 if (ret < 0)
13148 return ret;
13149
13150 intel_state->cdclk.pipe = pipe;
13151 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13152 &intel_state->cdclk.actual)) {
13153 ret = intel_modeset_all_pipes(state);
13154 if (ret < 0)
13155 return ret;
13156
13157 intel_state->cdclk.pipe = INVALID_PIPE;
13158 }
13159
13160 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13161 intel_state->cdclk.logical.cdclk,
13162 intel_state->cdclk.actual.cdclk);
13163 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13164 intel_state->cdclk.logical.voltage_level,
13165 intel_state->cdclk.actual.voltage_level);
13166 }
13167
13168 intel_modeset_clear_plls(state);
13169
13170 if (IS_HASWELL(dev_priv))
13171 return haswell_mode_set_planes_workaround(state);
13172
13173 return 0;
13174 }
13175
13176 /*
13177 * Handle calculation of various watermark data at the end of the atomic check
13178 * phase. The code here should be run after the per-crtc and per-plane 'check'
13179 * handlers to ensure that all derived state has been updated.
13180 */
13181 static int calc_watermark_data(struct intel_atomic_state *state)
13182 {
13183 struct drm_device *dev = state->base.dev;
13184 struct drm_i915_private *dev_priv = to_i915(dev);
13185
13186 /* Is there platform-specific watermark information to calculate? */
13187 if (dev_priv->display.compute_global_watermarks)
13188 return dev_priv->display.compute_global_watermarks(state);
13189
13190 return 0;
13191 }
13192
13193 /**
13194 * intel_atomic_check - validate state object
13195 * @dev: drm device
13196 * @state: state to validate
13197 */
13198 static int intel_atomic_check(struct drm_device *dev,
13199 struct drm_atomic_state *state)
13200 {
13201 struct drm_i915_private *dev_priv = to_i915(dev);
13202 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13203 struct drm_crtc *crtc;
13204 struct drm_crtc_state *old_crtc_state, *crtc_state;
13205 int ret, i;
13206 bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
13207
13208 /* Catch I915_MODE_FLAG_INHERITED */
13209 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
13210 crtc_state, i) {
13211 if (crtc_state->mode.private_flags !=
13212 old_crtc_state->mode.private_flags)
13213 crtc_state->mode_changed = true;
13214 }
13215
13216 ret = drm_atomic_helper_check_modeset(dev, state);
13217 if (ret)
13218 return ret;
13219
13220 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
13221 struct intel_crtc_state *pipe_config =
13222 to_intel_crtc_state(crtc_state);
13223
13224 if (!needs_modeset(crtc_state))
13225 continue;
13226
13227 if (!crtc_state->enable) {
13228 any_ms = true;
13229 continue;
13230 }
13231
13232 ret = intel_modeset_pipe_config(crtc, pipe_config);
13233 if (ret == -EDEADLK)
13234 return ret;
13235 if (ret) {
13236 intel_dump_pipe_config(to_intel_crtc(crtc),
13237 pipe_config, "[failed]");
13238 return ret;
13239 }
13240
13241 if (intel_pipe_config_compare(dev_priv,
13242 to_intel_crtc_state(old_crtc_state),
13243 pipe_config, true)) {
13244 crtc_state->mode_changed = false;
13245 pipe_config->update_pipe = true;
13246 }
13247
13248 if (needs_modeset(crtc_state))
13249 any_ms = true;
13250
13251 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13252 needs_modeset(crtc_state) ?
13253 "[modeset]" : "[fastset]");
13254 }
13255
13256 ret = drm_dp_mst_atomic_check(state);
13257 if (ret)
13258 return ret;
13259
13260 if (any_ms) {
13261 ret = intel_modeset_checks(state);
13262
13263 if (ret)
13264 return ret;
13265 } else {
13266 intel_state->cdclk.logical = dev_priv->cdclk.logical;
13267 }
13268
13269 ret = icl_add_linked_planes(intel_state);
13270 if (ret)
13271 return ret;
13272
13273 ret = drm_atomic_helper_check_planes(dev, state);
13274 if (ret)
13275 return ret;
13276
13277 intel_fbc_choose_crtc(dev_priv, intel_state);
13278 return calc_watermark_data(intel_state);
13279 }
13280
13281 static int intel_atomic_prepare_commit(struct drm_device *dev,
13282 struct drm_atomic_state *state)
13283 {
13284 return drm_atomic_helper_prepare_planes(dev, state);
13285 }
13286
13287 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13288 {
13289 struct drm_device *dev = crtc->base.dev;
13290 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13291
13292 if (!vblank->max_vblank_count)
13293 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13294
13295 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13296 }
13297
13298 static void intel_update_crtc(struct drm_crtc *crtc,
13299 struct drm_atomic_state *state,
13300 struct drm_crtc_state *old_crtc_state,
13301 struct drm_crtc_state *new_crtc_state)
13302 {
13303 struct drm_device *dev = crtc->dev;
13304 struct drm_i915_private *dev_priv = to_i915(dev);
13305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13306 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13307 bool modeset = needs_modeset(new_crtc_state);
13308 struct intel_plane_state *new_plane_state =
13309 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13310 to_intel_plane(crtc->primary));
13311
13312 if (modeset) {
13313 update_scanline_offset(pipe_config);
13314 dev_priv->display.crtc_enable(pipe_config, state);
13315
13316 /* vblanks work again, re-enable pipe CRC. */
13317 intel_crtc_enable_pipe_crc(intel_crtc);
13318 } else {
13319 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13320 pipe_config);
13321
13322 if (pipe_config->update_pipe)
13323 intel_encoders_update_pipe(crtc, pipe_config, state);
13324 }
13325
13326 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13327 intel_fbc_disable(intel_crtc);
13328 else if (new_plane_state)
13329 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13330
13331 intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13332
13333 if (INTEL_GEN(dev_priv) >= 9)
13334 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13335 else
13336 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13337
13338 intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13339 }
13340
13341 static void intel_update_crtcs(struct drm_atomic_state *state)
13342 {
13343 struct drm_crtc *crtc;
13344 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13345 int i;
13346
13347 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13348 if (!new_crtc_state->active)
13349 continue;
13350
13351 intel_update_crtc(crtc, state, old_crtc_state,
13352 new_crtc_state);
13353 }
13354 }
13355
13356 static void skl_update_crtcs(struct drm_atomic_state *state)
13357 {
13358 struct drm_i915_private *dev_priv = to_i915(state->dev);
13359 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13360 struct drm_crtc *crtc;
13361 struct intel_crtc *intel_crtc;
13362 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13363 struct intel_crtc_state *cstate;
13364 unsigned int updated = 0;
13365 bool progress;
13366 enum pipe pipe;
13367 int i;
13368 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13369 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13370 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13371
13372 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13373 /* ignore allocations for crtc's that have been turned off. */
13374 if (new_crtc_state->active)
13375 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13376
13377 /* If 2nd DBuf slice required, enable it here */
13378 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13379 icl_dbuf_slices_update(dev_priv, required_slices);
13380
13381 /*
13382 * Whenever the number of active pipes changes, we need to make sure we
13383 * update the pipes in the right order so that their ddb allocations
13384 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13385 * cause pipe underruns and other bad stuff.
13386 */
13387 do {
13388 progress = false;
13389
13390 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13391 bool vbl_wait = false;
13392 unsigned int cmask = drm_crtc_mask(crtc);
13393
13394 intel_crtc = to_intel_crtc(crtc);
13395 cstate = to_intel_crtc_state(new_crtc_state);
13396 pipe = intel_crtc->pipe;
13397
13398 if (updated & cmask || !cstate->base.active)
13399 continue;
13400
13401 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13402 entries,
13403 INTEL_INFO(dev_priv)->num_pipes, i))
13404 continue;
13405
13406 updated |= cmask;
13407 entries[i] = cstate->wm.skl.ddb;
13408
13409 /*
13410 * If this is an already active pipe, it's DDB changed,
13411 * and this isn't the last pipe that needs updating
13412 * then we need to wait for a vblank to pass for the
13413 * new ddb allocation to take effect.
13414 */
13415 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13416 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13417 !new_crtc_state->active_changed &&
13418 intel_state->wm_results.dirty_pipes != updated)
13419 vbl_wait = true;
13420
13421 intel_update_crtc(crtc, state, old_crtc_state,
13422 new_crtc_state);
13423
13424 if (vbl_wait)
13425 intel_wait_for_vblank(dev_priv, pipe);
13426
13427 progress = true;
13428 }
13429 } while (progress);
13430
13431 /* If 2nd DBuf slice is no more required disable it */
13432 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13433 icl_dbuf_slices_update(dev_priv, required_slices);
13434 }
13435
13436 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13437 {
13438 struct intel_atomic_state *state, *next;
13439 struct llist_node *freed;
13440
13441 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13442 llist_for_each_entry_safe(state, next, freed, freed)
13443 drm_atomic_state_put(&state->base);
13444 }
13445
13446 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13447 {
13448 struct drm_i915_private *dev_priv =
13449 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13450
13451 intel_atomic_helper_free_state(dev_priv);
13452 }
13453
13454 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13455 {
13456 struct wait_queue_entry wait_fence, wait_reset;
13457 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13458
13459 init_wait_entry(&wait_fence, 0);
13460 init_wait_entry(&wait_reset, 0);
13461 for (;;) {
13462 prepare_to_wait(&intel_state->commit_ready.wait,
13463 &wait_fence, TASK_UNINTERRUPTIBLE);
13464 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13465 &wait_reset, TASK_UNINTERRUPTIBLE);
13466
13467
13468 if (i915_sw_fence_done(&intel_state->commit_ready)
13469 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13470 break;
13471
13472 schedule();
13473 }
13474 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13475 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13476 }
13477
13478 static void intel_atomic_cleanup_work(struct work_struct *work)
13479 {
13480 struct drm_atomic_state *state =
13481 container_of(work, struct drm_atomic_state, commit_work);
13482 struct drm_i915_private *i915 = to_i915(state->dev);
13483
13484 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13485 drm_atomic_helper_commit_cleanup_done(state);
13486 drm_atomic_state_put(state);
13487
13488 intel_atomic_helper_free_state(i915);
13489 }
13490
13491 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13492 {
13493 struct drm_device *dev = state->dev;
13494 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13495 struct drm_i915_private *dev_priv = to_i915(dev);
13496 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13497 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13498 struct drm_crtc *crtc;
13499 struct intel_crtc *intel_crtc;
13500 u64 put_domains[I915_MAX_PIPES] = {};
13501 intel_wakeref_t wakeref = 0;
13502 int i;
13503
13504 intel_atomic_commit_fence_wait(intel_state);
13505
13506 drm_atomic_helper_wait_for_dependencies(state);
13507
13508 if (intel_state->modeset)
13509 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13510
13511 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13512 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13513 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13514 intel_crtc = to_intel_crtc(crtc);
13515
13516 if (needs_modeset(new_crtc_state) ||
13517 to_intel_crtc_state(new_crtc_state)->update_pipe) {
13518
13519 put_domains[intel_crtc->pipe] =
13520 modeset_get_crtc_power_domains(crtc,
13521 new_intel_crtc_state);
13522 }
13523
13524 if (!needs_modeset(new_crtc_state))
13525 continue;
13526
13527 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13528
13529 if (old_crtc_state->active) {
13530 intel_crtc_disable_planes(intel_state, intel_crtc);
13531
13532 /*
13533 * We need to disable pipe CRC before disabling the pipe,
13534 * or we race against vblank off.
13535 */
13536 intel_crtc_disable_pipe_crc(intel_crtc);
13537
13538 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13539 intel_crtc->active = false;
13540 intel_fbc_disable(intel_crtc);
13541 intel_disable_shared_dpll(old_intel_crtc_state);
13542
13543 /*
13544 * Underruns don't always raise
13545 * interrupts, so check manually.
13546 */
13547 intel_check_cpu_fifo_underruns(dev_priv);
13548 intel_check_pch_fifo_underruns(dev_priv);
13549
13550 /* FIXME unify this for all platforms */
13551 if (!new_crtc_state->active &&
13552 !HAS_GMCH(dev_priv) &&
13553 dev_priv->display.initial_watermarks)
13554 dev_priv->display.initial_watermarks(intel_state,
13555 new_intel_crtc_state);
13556 }
13557 }
13558
13559 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13560 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13561 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13562
13563 if (intel_state->modeset) {
13564 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13565
13566 intel_set_cdclk_pre_plane_update(dev_priv,
13567 &intel_state->cdclk.actual,
13568 &dev_priv->cdclk.actual,
13569 intel_state->cdclk.pipe);
13570
13571 /*
13572 * SKL workaround: bspec recommends we disable the SAGV when we
13573 * have more then one pipe enabled
13574 */
13575 if (!intel_can_enable_sagv(state))
13576 intel_disable_sagv(dev_priv);
13577
13578 intel_modeset_verify_disabled(dev, state);
13579 }
13580
13581 /* Complete the events for pipes that have now been disabled */
13582 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13583 bool modeset = needs_modeset(new_crtc_state);
13584
13585 /* Complete events for now disable pipes here. */
13586 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13587 spin_lock_irq(&dev->event_lock);
13588 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13589 spin_unlock_irq(&dev->event_lock);
13590
13591 new_crtc_state->event = NULL;
13592 }
13593 }
13594
13595 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13596 dev_priv->display.update_crtcs(state);
13597
13598 if (intel_state->modeset)
13599 intel_set_cdclk_post_plane_update(dev_priv,
13600 &intel_state->cdclk.actual,
13601 &dev_priv->cdclk.actual,
13602 intel_state->cdclk.pipe);
13603
13604 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13605 * already, but still need the state for the delayed optimization. To
13606 * fix this:
13607 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13608 * - schedule that vblank worker _before_ calling hw_done
13609 * - at the start of commit_tail, cancel it _synchrously
13610 * - switch over to the vblank wait helper in the core after that since
13611 * we don't need out special handling any more.
13612 */
13613 drm_atomic_helper_wait_for_flip_done(dev, state);
13614
13615 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13616 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13617
13618 if (new_crtc_state->active &&
13619 !needs_modeset(new_crtc_state) &&
13620 (new_intel_crtc_state->base.color_mgmt_changed ||
13621 new_intel_crtc_state->update_pipe))
13622 intel_color_load_luts(new_intel_crtc_state);
13623 }
13624
13625 /*
13626 * Now that the vblank has passed, we can go ahead and program the
13627 * optimal watermarks on platforms that need two-step watermark
13628 * programming.
13629 *
13630 * TODO: Move this (and other cleanup) to an async worker eventually.
13631 */
13632 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13633 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13634
13635 if (dev_priv->display.optimize_watermarks)
13636 dev_priv->display.optimize_watermarks(intel_state,
13637 new_intel_crtc_state);
13638 }
13639
13640 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13641 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13642
13643 if (put_domains[i])
13644 modeset_put_power_domains(dev_priv, put_domains[i]);
13645
13646 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13647 }
13648
13649 if (intel_state->modeset)
13650 intel_verify_planes(intel_state);
13651
13652 if (intel_state->modeset && intel_can_enable_sagv(state))
13653 intel_enable_sagv(dev_priv);
13654
13655 drm_atomic_helper_commit_hw_done(state);
13656
13657 if (intel_state->modeset) {
13658 /* As one of the primary mmio accessors, KMS has a high
13659 * likelihood of triggering bugs in unclaimed access. After we
13660 * finish modesetting, see if an error has been flagged, and if
13661 * so enable debugging for the next modeset - and hope we catch
13662 * the culprit.
13663 */
13664 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13665 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13666 }
13667
13668 /*
13669 * Defer the cleanup of the old state to a separate worker to not
13670 * impede the current task (userspace for blocking modesets) that
13671 * are executed inline. For out-of-line asynchronous modesets/flips,
13672 * deferring to a new worker seems overkill, but we would place a
13673 * schedule point (cond_resched()) here anyway to keep latencies
13674 * down.
13675 */
13676 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13677 queue_work(system_highpri_wq, &state->commit_work);
13678 }
13679
13680 static void intel_atomic_commit_work(struct work_struct *work)
13681 {
13682 struct drm_atomic_state *state =
13683 container_of(work, struct drm_atomic_state, commit_work);
13684
13685 intel_atomic_commit_tail(state);
13686 }
13687
13688 static int __i915_sw_fence_call
13689 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13690 enum i915_sw_fence_notify notify)
13691 {
13692 struct intel_atomic_state *state =
13693 container_of(fence, struct intel_atomic_state, commit_ready);
13694
13695 switch (notify) {
13696 case FENCE_COMPLETE:
13697 /* we do blocking waits in the worker, nothing to do here */
13698 break;
13699 case FENCE_FREE:
13700 {
13701 struct intel_atomic_helper *helper =
13702 &to_i915(state->base.dev)->atomic_helper;
13703
13704 if (llist_add(&state->freed, &helper->free_list))
13705 schedule_work(&helper->free_work);
13706 break;
13707 }
13708 }
13709
13710 return NOTIFY_DONE;
13711 }
13712
13713 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13714 {
13715 struct drm_plane_state *old_plane_state, *new_plane_state;
13716 struct drm_plane *plane;
13717 int i;
13718
13719 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13720 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13721 intel_fb_obj(new_plane_state->fb),
13722 to_intel_plane(plane)->frontbuffer_bit);
13723 }
13724
13725 /**
13726 * intel_atomic_commit - commit validated state object
13727 * @dev: DRM device
13728 * @state: the top-level driver state object
13729 * @nonblock: nonblocking commit
13730 *
13731 * This function commits a top-level state object that has been validated
13732 * with drm_atomic_helper_check().
13733 *
13734 * RETURNS
13735 * Zero for success or -errno.
13736 */
13737 static int intel_atomic_commit(struct drm_device *dev,
13738 struct drm_atomic_state *state,
13739 bool nonblock)
13740 {
13741 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13742 struct drm_i915_private *dev_priv = to_i915(dev);
13743 int ret = 0;
13744
13745 drm_atomic_state_get(state);
13746 i915_sw_fence_init(&intel_state->commit_ready,
13747 intel_atomic_commit_ready);
13748
13749 /*
13750 * The intel_legacy_cursor_update() fast path takes care
13751 * of avoiding the vblank waits for simple cursor
13752 * movement and flips. For cursor on/off and size changes,
13753 * we want to perform the vblank waits so that watermark
13754 * updates happen during the correct frames. Gen9+ have
13755 * double buffered watermarks and so shouldn't need this.
13756 *
13757 * Unset state->legacy_cursor_update before the call to
13758 * drm_atomic_helper_setup_commit() because otherwise
13759 * drm_atomic_helper_wait_for_flip_done() is a noop and
13760 * we get FIFO underruns because we didn't wait
13761 * for vblank.
13762 *
13763 * FIXME doing watermarks and fb cleanup from a vblank worker
13764 * (assuming we had any) would solve these problems.
13765 */
13766 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13767 struct intel_crtc_state *new_crtc_state;
13768 struct intel_crtc *crtc;
13769 int i;
13770
13771 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13772 if (new_crtc_state->wm.need_postvbl_update ||
13773 new_crtc_state->update_wm_post)
13774 state->legacy_cursor_update = false;
13775 }
13776
13777 ret = intel_atomic_prepare_commit(dev, state);
13778 if (ret) {
13779 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13780 i915_sw_fence_commit(&intel_state->commit_ready);
13781 return ret;
13782 }
13783
13784 ret = drm_atomic_helper_setup_commit(state, nonblock);
13785 if (!ret)
13786 ret = drm_atomic_helper_swap_state(state, true);
13787
13788 if (ret) {
13789 i915_sw_fence_commit(&intel_state->commit_ready);
13790
13791 drm_atomic_helper_cleanup_planes(dev, state);
13792 return ret;
13793 }
13794 dev_priv->wm.distrust_bios_wm = false;
13795 intel_shared_dpll_swap_state(state);
13796 intel_atomic_track_fbs(state);
13797
13798 if (intel_state->modeset) {
13799 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13800 sizeof(intel_state->min_cdclk));
13801 memcpy(dev_priv->min_voltage_level,
13802 intel_state->min_voltage_level,
13803 sizeof(intel_state->min_voltage_level));
13804 dev_priv->active_crtcs = intel_state->active_crtcs;
13805 dev_priv->cdclk.force_min_cdclk =
13806 intel_state->cdclk.force_min_cdclk;
13807
13808 intel_cdclk_swap_state(intel_state);
13809 }
13810
13811 drm_atomic_state_get(state);
13812 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13813
13814 i915_sw_fence_commit(&intel_state->commit_ready);
13815 if (nonblock && intel_state->modeset) {
13816 queue_work(dev_priv->modeset_wq, &state->commit_work);
13817 } else if (nonblock) {
13818 queue_work(system_unbound_wq, &state->commit_work);
13819 } else {
13820 if (intel_state->modeset)
13821 flush_workqueue(dev_priv->modeset_wq);
13822 intel_atomic_commit_tail(state);
13823 }
13824
13825 return 0;
13826 }
13827
13828 static const struct drm_crtc_funcs intel_crtc_funcs = {
13829 .gamma_set = drm_atomic_helper_legacy_gamma_set,
13830 .set_config = drm_atomic_helper_set_config,
13831 .destroy = intel_crtc_destroy,
13832 .page_flip = drm_atomic_helper_page_flip,
13833 .atomic_duplicate_state = intel_crtc_duplicate_state,
13834 .atomic_destroy_state = intel_crtc_destroy_state,
13835 .set_crc_source = intel_crtc_set_crc_source,
13836 .verify_crc_source = intel_crtc_verify_crc_source,
13837 .get_crc_sources = intel_crtc_get_crc_sources,
13838 };
13839
13840 struct wait_rps_boost {
13841 struct wait_queue_entry wait;
13842
13843 struct drm_crtc *crtc;
13844 struct i915_request *request;
13845 };
13846
13847 static int do_rps_boost(struct wait_queue_entry *_wait,
13848 unsigned mode, int sync, void *key)
13849 {
13850 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13851 struct i915_request *rq = wait->request;
13852
13853 /*
13854 * If we missed the vblank, but the request is already running it
13855 * is reasonable to assume that it will complete before the next
13856 * vblank without our intervention, so leave RPS alone.
13857 */
13858 if (!i915_request_started(rq))
13859 gen6_rps_boost(rq);
13860 i915_request_put(rq);
13861
13862 drm_crtc_vblank_put(wait->crtc);
13863
13864 list_del(&wait->wait.entry);
13865 kfree(wait);
13866 return 1;
13867 }
13868
13869 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13870 struct dma_fence *fence)
13871 {
13872 struct wait_rps_boost *wait;
13873
13874 if (!dma_fence_is_i915(fence))
13875 return;
13876
13877 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13878 return;
13879
13880 if (drm_crtc_vblank_get(crtc))
13881 return;
13882
13883 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13884 if (!wait) {
13885 drm_crtc_vblank_put(crtc);
13886 return;
13887 }
13888
13889 wait->request = to_request(dma_fence_get(fence));
13890 wait->crtc = crtc;
13891
13892 wait->wait.func = do_rps_boost;
13893 wait->wait.flags = 0;
13894
13895 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13896 }
13897
13898 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13899 {
13900 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13901 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13902 struct drm_framebuffer *fb = plane_state->base.fb;
13903 struct i915_vma *vma;
13904
13905 if (plane->id == PLANE_CURSOR &&
13906 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
13907 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13908 const int align = intel_cursor_alignment(dev_priv);
13909 int err;
13910
13911 err = i915_gem_object_attach_phys(obj, align);
13912 if (err)
13913 return err;
13914 }
13915
13916 vma = intel_pin_and_fence_fb_obj(fb,
13917 &plane_state->view,
13918 intel_plane_uses_fence(plane_state),
13919 &plane_state->flags);
13920 if (IS_ERR(vma))
13921 return PTR_ERR(vma);
13922
13923 plane_state->vma = vma;
13924
13925 return 0;
13926 }
13927
13928 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13929 {
13930 struct i915_vma *vma;
13931
13932 vma = fetch_and_zero(&old_plane_state->vma);
13933 if (vma)
13934 intel_unpin_fb_vma(vma, old_plane_state->flags);
13935 }
13936
13937 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13938 {
13939 struct i915_sched_attr attr = {
13940 .priority = I915_PRIORITY_DISPLAY,
13941 };
13942
13943 i915_gem_object_wait_priority(obj, 0, &attr);
13944 }
13945
13946 /**
13947 * intel_prepare_plane_fb - Prepare fb for usage on plane
13948 * @plane: drm plane to prepare for
13949 * @new_state: the plane state being prepared
13950 *
13951 * Prepares a framebuffer for usage on a display plane. Generally this
13952 * involves pinning the underlying object and updating the frontbuffer tracking
13953 * bits. Some older platforms need special physical address handling for
13954 * cursor planes.
13955 *
13956 * Must be called with struct_mutex held.
13957 *
13958 * Returns 0 on success, negative error code on failure.
13959 */
13960 int
13961 intel_prepare_plane_fb(struct drm_plane *plane,
13962 struct drm_plane_state *new_state)
13963 {
13964 struct intel_atomic_state *intel_state =
13965 to_intel_atomic_state(new_state->state);
13966 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13967 struct drm_framebuffer *fb = new_state->fb;
13968 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13969 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13970 int ret;
13971
13972 if (old_obj) {
13973 struct drm_crtc_state *crtc_state =
13974 drm_atomic_get_new_crtc_state(new_state->state,
13975 plane->state->crtc);
13976
13977 /* Big Hammer, we also need to ensure that any pending
13978 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13979 * current scanout is retired before unpinning the old
13980 * framebuffer. Note that we rely on userspace rendering
13981 * into the buffer attached to the pipe they are waiting
13982 * on. If not, userspace generates a GPU hang with IPEHR
13983 * point to the MI_WAIT_FOR_EVENT.
13984 *
13985 * This should only fail upon a hung GPU, in which case we
13986 * can safely continue.
13987 */
13988 if (needs_modeset(crtc_state)) {
13989 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13990 old_obj->resv, NULL,
13991 false, 0,
13992 GFP_KERNEL);
13993 if (ret < 0)
13994 return ret;
13995 }
13996 }
13997
13998 if (new_state->fence) { /* explicit fencing */
13999 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14000 new_state->fence,
14001 I915_FENCE_TIMEOUT,
14002 GFP_KERNEL);
14003 if (ret < 0)
14004 return ret;
14005 }
14006
14007 if (!obj)
14008 return 0;
14009
14010 ret = i915_gem_object_pin_pages(obj);
14011 if (ret)
14012 return ret;
14013
14014 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14015 if (ret) {
14016 i915_gem_object_unpin_pages(obj);
14017 return ret;
14018 }
14019
14020 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14021
14022 mutex_unlock(&dev_priv->drm.struct_mutex);
14023 i915_gem_object_unpin_pages(obj);
14024 if (ret)
14025 return ret;
14026
14027 fb_obj_bump_render_priority(obj);
14028 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14029
14030 if (!new_state->fence) { /* implicit fencing */
14031 struct dma_fence *fence;
14032
14033 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14034 obj->resv, NULL,
14035 false, I915_FENCE_TIMEOUT,
14036 GFP_KERNEL);
14037 if (ret < 0)
14038 return ret;
14039
14040 fence = reservation_object_get_excl_rcu(obj->resv);
14041 if (fence) {
14042 add_rps_boost_after_vblank(new_state->crtc, fence);
14043 dma_fence_put(fence);
14044 }
14045 } else {
14046 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14047 }
14048
14049 /*
14050 * We declare pageflips to be interactive and so merit a small bias
14051 * towards upclocking to deliver the frame on time. By only changing
14052 * the RPS thresholds to sample more regularly and aim for higher
14053 * clocks we can hopefully deliver low power workloads (like kodi)
14054 * that are not quite steady state without resorting to forcing
14055 * maximum clocks following a vblank miss (see do_rps_boost()).
14056 */
14057 if (!intel_state->rps_interactive) {
14058 intel_rps_mark_interactive(dev_priv, true);
14059 intel_state->rps_interactive = true;
14060 }
14061
14062 return 0;
14063 }
14064
14065 /**
14066 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14067 * @plane: drm plane to clean up for
14068 * @old_state: the state from the previous modeset
14069 *
14070 * Cleans up a framebuffer that has just been removed from a plane.
14071 *
14072 * Must be called with struct_mutex held.
14073 */
14074 void
14075 intel_cleanup_plane_fb(struct drm_plane *plane,
14076 struct drm_plane_state *old_state)
14077 {
14078 struct intel_atomic_state *intel_state =
14079 to_intel_atomic_state(old_state->state);
14080 struct drm_i915_private *dev_priv = to_i915(plane->dev);
14081
14082 if (intel_state->rps_interactive) {
14083 intel_rps_mark_interactive(dev_priv, false);
14084 intel_state->rps_interactive = false;
14085 }
14086
14087 /* Should only be called after a successful intel_prepare_plane_fb()! */
14088 mutex_lock(&dev_priv->drm.struct_mutex);
14089 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14090 mutex_unlock(&dev_priv->drm.struct_mutex);
14091 }
14092
14093 int
14094 skl_max_scale(const struct intel_crtc_state *crtc_state,
14095 u32 pixel_format)
14096 {
14097 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14098 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14099 int max_scale, mult;
14100 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14101
14102 if (!crtc_state->base.enable)
14103 return DRM_PLANE_HELPER_NO_SCALING;
14104
14105 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14106 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14107
14108 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14109 max_dotclk *= 2;
14110
14111 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14112 return DRM_PLANE_HELPER_NO_SCALING;
14113
14114 /*
14115 * skl max scale is lower of:
14116 * close to 3 but not 3, -1 is for that purpose
14117 * or
14118 * cdclk/crtc_clock
14119 */
14120 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14121 tmpclk1 = (1 << 16) * mult - 1;
14122 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14123 max_scale = min(tmpclk1, tmpclk2);
14124
14125 return max_scale;
14126 }
14127
14128 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14129 struct intel_crtc *crtc)
14130 {
14131 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14132 struct intel_crtc_state *old_crtc_state =
14133 intel_atomic_get_old_crtc_state(state, crtc);
14134 struct intel_crtc_state *new_crtc_state =
14135 intel_atomic_get_new_crtc_state(state, crtc);
14136 bool modeset = needs_modeset(&new_crtc_state->base);
14137
14138 /* Perform vblank evasion around commit operation */
14139 intel_pipe_update_start(new_crtc_state);
14140
14141 if (modeset)
14142 goto out;
14143
14144 if (new_crtc_state->base.color_mgmt_changed ||
14145 new_crtc_state->update_pipe)
14146 intel_color_commit(new_crtc_state);
14147
14148 if (new_crtc_state->update_pipe)
14149 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14150 else if (INTEL_GEN(dev_priv) >= 9)
14151 skl_detach_scalers(new_crtc_state);
14152
14153 out:
14154 if (dev_priv->display.atomic_update_watermarks)
14155 dev_priv->display.atomic_update_watermarks(state,
14156 new_crtc_state);
14157 }
14158
14159 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14160 struct intel_crtc_state *crtc_state)
14161 {
14162 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14163
14164 if (!IS_GEN(dev_priv, 2))
14165 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14166
14167 if (crtc_state->has_pch_encoder) {
14168 enum pipe pch_transcoder =
14169 intel_crtc_pch_transcoder(crtc);
14170
14171 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14172 }
14173 }
14174
14175 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14176 struct intel_crtc *crtc)
14177 {
14178 struct intel_crtc_state *old_crtc_state =
14179 intel_atomic_get_old_crtc_state(state, crtc);
14180 struct intel_crtc_state *new_crtc_state =
14181 intel_atomic_get_new_crtc_state(state, crtc);
14182
14183 intel_pipe_update_end(new_crtc_state);
14184
14185 if (new_crtc_state->update_pipe &&
14186 !needs_modeset(&new_crtc_state->base) &&
14187 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14188 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14189 }
14190
14191 /**
14192 * intel_plane_destroy - destroy a plane
14193 * @plane: plane to destroy
14194 *
14195 * Common destruction function for all types of planes (primary, cursor,
14196 * sprite).
14197 */
14198 void intel_plane_destroy(struct drm_plane *plane)
14199 {
14200 drm_plane_cleanup(plane);
14201 kfree(to_intel_plane(plane));
14202 }
14203
14204 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14205 u32 format, u64 modifier)
14206 {
14207 switch (modifier) {
14208 case DRM_FORMAT_MOD_LINEAR:
14209 case I915_FORMAT_MOD_X_TILED:
14210 break;
14211 default:
14212 return false;
14213 }
14214
14215 switch (format) {
14216 case DRM_FORMAT_C8:
14217 case DRM_FORMAT_RGB565:
14218 case DRM_FORMAT_XRGB1555:
14219 case DRM_FORMAT_XRGB8888:
14220 return modifier == DRM_FORMAT_MOD_LINEAR ||
14221 modifier == I915_FORMAT_MOD_X_TILED;
14222 default:
14223 return false;
14224 }
14225 }
14226
14227 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14228 u32 format, u64 modifier)
14229 {
14230 switch (modifier) {
14231 case DRM_FORMAT_MOD_LINEAR:
14232 case I915_FORMAT_MOD_X_TILED:
14233 break;
14234 default:
14235 return false;
14236 }
14237
14238 switch (format) {
14239 case DRM_FORMAT_C8:
14240 case DRM_FORMAT_RGB565:
14241 case DRM_FORMAT_XRGB8888:
14242 case DRM_FORMAT_XBGR8888:
14243 case DRM_FORMAT_XRGB2101010:
14244 case DRM_FORMAT_XBGR2101010:
14245 return modifier == DRM_FORMAT_MOD_LINEAR ||
14246 modifier == I915_FORMAT_MOD_X_TILED;
14247 default:
14248 return false;
14249 }
14250 }
14251
14252 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14253 u32 format, u64 modifier)
14254 {
14255 return modifier == DRM_FORMAT_MOD_LINEAR &&
14256 format == DRM_FORMAT_ARGB8888;
14257 }
14258
14259 static const struct drm_plane_funcs i965_plane_funcs = {
14260 .update_plane = drm_atomic_helper_update_plane,
14261 .disable_plane = drm_atomic_helper_disable_plane,
14262 .destroy = intel_plane_destroy,
14263 .atomic_get_property = intel_plane_atomic_get_property,
14264 .atomic_set_property = intel_plane_atomic_set_property,
14265 .atomic_duplicate_state = intel_plane_duplicate_state,
14266 .atomic_destroy_state = intel_plane_destroy_state,
14267 .format_mod_supported = i965_plane_format_mod_supported,
14268 };
14269
14270 static const struct drm_plane_funcs i8xx_plane_funcs = {
14271 .update_plane = drm_atomic_helper_update_plane,
14272 .disable_plane = drm_atomic_helper_disable_plane,
14273 .destroy = intel_plane_destroy,
14274 .atomic_get_property = intel_plane_atomic_get_property,
14275 .atomic_set_property = intel_plane_atomic_set_property,
14276 .atomic_duplicate_state = intel_plane_duplicate_state,
14277 .atomic_destroy_state = intel_plane_destroy_state,
14278 .format_mod_supported = i8xx_plane_format_mod_supported,
14279 };
14280
14281 static int
14282 intel_legacy_cursor_update(struct drm_plane *plane,
14283 struct drm_crtc *crtc,
14284 struct drm_framebuffer *fb,
14285 int crtc_x, int crtc_y,
14286 unsigned int crtc_w, unsigned int crtc_h,
14287 u32 src_x, u32 src_y,
14288 u32 src_w, u32 src_h,
14289 struct drm_modeset_acquire_ctx *ctx)
14290 {
14291 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14292 int ret;
14293 struct drm_plane_state *old_plane_state, *new_plane_state;
14294 struct intel_plane *intel_plane = to_intel_plane(plane);
14295 struct drm_framebuffer *old_fb;
14296 struct intel_crtc_state *crtc_state =
14297 to_intel_crtc_state(crtc->state);
14298 struct intel_crtc_state *new_crtc_state;
14299
14300 /*
14301 * When crtc is inactive or there is a modeset pending,
14302 * wait for it to complete in the slowpath
14303 */
14304 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14305 crtc_state->update_pipe)
14306 goto slow;
14307
14308 old_plane_state = plane->state;
14309 /*
14310 * Don't do an async update if there is an outstanding commit modifying
14311 * the plane. This prevents our async update's changes from getting
14312 * overridden by a previous synchronous update's state.
14313 */
14314 if (old_plane_state->commit &&
14315 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14316 goto slow;
14317
14318 /*
14319 * If any parameters change that may affect watermarks,
14320 * take the slowpath. Only changing fb or position should be
14321 * in the fastpath.
14322 */
14323 if (old_plane_state->crtc != crtc ||
14324 old_plane_state->src_w != src_w ||
14325 old_plane_state->src_h != src_h ||
14326 old_plane_state->crtc_w != crtc_w ||
14327 old_plane_state->crtc_h != crtc_h ||
14328 !old_plane_state->fb != !fb)
14329 goto slow;
14330
14331 new_plane_state = intel_plane_duplicate_state(plane);
14332 if (!new_plane_state)
14333 return -ENOMEM;
14334
14335 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14336 if (!new_crtc_state) {
14337 ret = -ENOMEM;
14338 goto out_free;
14339 }
14340
14341 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14342
14343 new_plane_state->src_x = src_x;
14344 new_plane_state->src_y = src_y;
14345 new_plane_state->src_w = src_w;
14346 new_plane_state->src_h = src_h;
14347 new_plane_state->crtc_x = crtc_x;
14348 new_plane_state->crtc_y = crtc_y;
14349 new_plane_state->crtc_w = crtc_w;
14350 new_plane_state->crtc_h = crtc_h;
14351
14352 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14353 to_intel_plane_state(old_plane_state),
14354 to_intel_plane_state(new_plane_state));
14355 if (ret)
14356 goto out_free;
14357
14358 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14359 if (ret)
14360 goto out_free;
14361
14362 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14363 if (ret)
14364 goto out_unlock;
14365
14366 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14367
14368 old_fb = old_plane_state->fb;
14369 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14370 intel_plane->frontbuffer_bit);
14371
14372 /* Swap plane state */
14373 plane->state = new_plane_state;
14374
14375 /*
14376 * We cannot swap crtc_state as it may be in use by an atomic commit or
14377 * page flip that's running simultaneously. If we swap crtc_state and
14378 * destroy the old state, we will cause a use-after-free there.
14379 *
14380 * Only update active_planes, which is needed for our internal
14381 * bookkeeping. Either value will do the right thing when updating
14382 * planes atomically. If the cursor was part of the atomic update then
14383 * we would have taken the slowpath.
14384 */
14385 crtc_state->active_planes = new_crtc_state->active_planes;
14386
14387 if (plane->state->visible)
14388 intel_update_plane(intel_plane, crtc_state,
14389 to_intel_plane_state(plane->state));
14390 else
14391 intel_disable_plane(intel_plane, crtc_state);
14392
14393 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14394
14395 out_unlock:
14396 mutex_unlock(&dev_priv->drm.struct_mutex);
14397 out_free:
14398 if (new_crtc_state)
14399 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14400 if (ret)
14401 intel_plane_destroy_state(plane, new_plane_state);
14402 else
14403 intel_plane_destroy_state(plane, old_plane_state);
14404 return ret;
14405
14406 slow:
14407 return drm_atomic_helper_update_plane(plane, crtc, fb,
14408 crtc_x, crtc_y, crtc_w, crtc_h,
14409 src_x, src_y, src_w, src_h, ctx);
14410 }
14411
14412 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14413 .update_plane = intel_legacy_cursor_update,
14414 .disable_plane = drm_atomic_helper_disable_plane,
14415 .destroy = intel_plane_destroy,
14416 .atomic_get_property = intel_plane_atomic_get_property,
14417 .atomic_set_property = intel_plane_atomic_set_property,
14418 .atomic_duplicate_state = intel_plane_duplicate_state,
14419 .atomic_destroy_state = intel_plane_destroy_state,
14420 .format_mod_supported = intel_cursor_format_mod_supported,
14421 };
14422
14423 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14424 enum i9xx_plane_id i9xx_plane)
14425 {
14426 if (!HAS_FBC(dev_priv))
14427 return false;
14428
14429 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14430 return i9xx_plane == PLANE_A; /* tied to pipe A */
14431 else if (IS_IVYBRIDGE(dev_priv))
14432 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14433 i9xx_plane == PLANE_C;
14434 else if (INTEL_GEN(dev_priv) >= 4)
14435 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14436 else
14437 return i9xx_plane == PLANE_A;
14438 }
14439
14440 static struct intel_plane *
14441 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14442 {
14443 struct intel_plane *plane;
14444 const struct drm_plane_funcs *plane_funcs;
14445 unsigned int supported_rotations;
14446 unsigned int possible_crtcs;
14447 const u64 *modifiers;
14448 const u32 *formats;
14449 int num_formats;
14450 int ret;
14451
14452 if (INTEL_GEN(dev_priv) >= 9)
14453 return skl_universal_plane_create(dev_priv, pipe,
14454 PLANE_PRIMARY);
14455
14456 plane = intel_plane_alloc();
14457 if (IS_ERR(plane))
14458 return plane;
14459
14460 plane->pipe = pipe;
14461 /*
14462 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14463 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14464 */
14465 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14466 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14467 else
14468 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14469 plane->id = PLANE_PRIMARY;
14470 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14471
14472 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14473 if (plane->has_fbc) {
14474 struct intel_fbc *fbc = &dev_priv->fbc;
14475
14476 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14477 }
14478
14479 if (INTEL_GEN(dev_priv) >= 4) {
14480 formats = i965_primary_formats;
14481 num_formats = ARRAY_SIZE(i965_primary_formats);
14482 modifiers = i9xx_format_modifiers;
14483
14484 plane->max_stride = i9xx_plane_max_stride;
14485 plane->update_plane = i9xx_update_plane;
14486 plane->disable_plane = i9xx_disable_plane;
14487 plane->get_hw_state = i9xx_plane_get_hw_state;
14488 plane->check_plane = i9xx_plane_check;
14489
14490 plane_funcs = &i965_plane_funcs;
14491 } else {
14492 formats = i8xx_primary_formats;
14493 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14494 modifiers = i9xx_format_modifiers;
14495
14496 plane->max_stride = i9xx_plane_max_stride;
14497 plane->update_plane = i9xx_update_plane;
14498 plane->disable_plane = i9xx_disable_plane;
14499 plane->get_hw_state = i9xx_plane_get_hw_state;
14500 plane->check_plane = i9xx_plane_check;
14501
14502 plane_funcs = &i8xx_plane_funcs;
14503 }
14504
14505 possible_crtcs = BIT(pipe);
14506
14507 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14508 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14509 possible_crtcs, plane_funcs,
14510 formats, num_formats, modifiers,
14511 DRM_PLANE_TYPE_PRIMARY,
14512 "primary %c", pipe_name(pipe));
14513 else
14514 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14515 possible_crtcs, plane_funcs,
14516 formats, num_formats, modifiers,
14517 DRM_PLANE_TYPE_PRIMARY,
14518 "plane %c",
14519 plane_name(plane->i9xx_plane));
14520 if (ret)
14521 goto fail;
14522
14523 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14524 supported_rotations =
14525 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14526 DRM_MODE_REFLECT_X;
14527 } else if (INTEL_GEN(dev_priv) >= 4) {
14528 supported_rotations =
14529 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14530 } else {
14531 supported_rotations = DRM_MODE_ROTATE_0;
14532 }
14533
14534 if (INTEL_GEN(dev_priv) >= 4)
14535 drm_plane_create_rotation_property(&plane->base,
14536 DRM_MODE_ROTATE_0,
14537 supported_rotations);
14538
14539 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14540
14541 return plane;
14542
14543 fail:
14544 intel_plane_free(plane);
14545
14546 return ERR_PTR(ret);
14547 }
14548
14549 static struct intel_plane *
14550 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14551 enum pipe pipe)
14552 {
14553 unsigned int possible_crtcs;
14554 struct intel_plane *cursor;
14555 int ret;
14556
14557 cursor = intel_plane_alloc();
14558 if (IS_ERR(cursor))
14559 return cursor;
14560
14561 cursor->pipe = pipe;
14562 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14563 cursor->id = PLANE_CURSOR;
14564 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14565
14566 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14567 cursor->max_stride = i845_cursor_max_stride;
14568 cursor->update_plane = i845_update_cursor;
14569 cursor->disable_plane = i845_disable_cursor;
14570 cursor->get_hw_state = i845_cursor_get_hw_state;
14571 cursor->check_plane = i845_check_cursor;
14572 } else {
14573 cursor->max_stride = i9xx_cursor_max_stride;
14574 cursor->update_plane = i9xx_update_cursor;
14575 cursor->disable_plane = i9xx_disable_cursor;
14576 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14577 cursor->check_plane = i9xx_check_cursor;
14578 }
14579
14580 cursor->cursor.base = ~0;
14581 cursor->cursor.cntl = ~0;
14582
14583 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14584 cursor->cursor.size = ~0;
14585
14586 possible_crtcs = BIT(pipe);
14587
14588 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14589 possible_crtcs, &intel_cursor_plane_funcs,
14590 intel_cursor_formats,
14591 ARRAY_SIZE(intel_cursor_formats),
14592 cursor_format_modifiers,
14593 DRM_PLANE_TYPE_CURSOR,
14594 "cursor %c", pipe_name(pipe));
14595 if (ret)
14596 goto fail;
14597
14598 if (INTEL_GEN(dev_priv) >= 4)
14599 drm_plane_create_rotation_property(&cursor->base,
14600 DRM_MODE_ROTATE_0,
14601 DRM_MODE_ROTATE_0 |
14602 DRM_MODE_ROTATE_180);
14603
14604 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14605
14606 return cursor;
14607
14608 fail:
14609 intel_plane_free(cursor);
14610
14611 return ERR_PTR(ret);
14612 }
14613
14614 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14615 struct intel_crtc_state *crtc_state)
14616 {
14617 struct intel_crtc_scaler_state *scaler_state =
14618 &crtc_state->scaler_state;
14619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14620 int i;
14621
14622 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14623 if (!crtc->num_scalers)
14624 return;
14625
14626 for (i = 0; i < crtc->num_scalers; i++) {
14627 struct intel_scaler *scaler = &scaler_state->scalers[i];
14628
14629 scaler->in_use = 0;
14630 scaler->mode = 0;
14631 }
14632
14633 scaler_state->scaler_id = -1;
14634 }
14635
14636 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14637 {
14638 struct intel_crtc *intel_crtc;
14639 struct intel_crtc_state *crtc_state = NULL;
14640 struct intel_plane *primary = NULL;
14641 struct intel_plane *cursor = NULL;
14642 int sprite, ret;
14643
14644 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14645 if (!intel_crtc)
14646 return -ENOMEM;
14647
14648 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14649 if (!crtc_state) {
14650 ret = -ENOMEM;
14651 goto fail;
14652 }
14653 intel_crtc->config = crtc_state;
14654 intel_crtc->base.state = &crtc_state->base;
14655 crtc_state->base.crtc = &intel_crtc->base;
14656
14657 primary = intel_primary_plane_create(dev_priv, pipe);
14658 if (IS_ERR(primary)) {
14659 ret = PTR_ERR(primary);
14660 goto fail;
14661 }
14662 intel_crtc->plane_ids_mask |= BIT(primary->id);
14663
14664 for_each_sprite(dev_priv, pipe, sprite) {
14665 struct intel_plane *plane;
14666
14667 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14668 if (IS_ERR(plane)) {
14669 ret = PTR_ERR(plane);
14670 goto fail;
14671 }
14672 intel_crtc->plane_ids_mask |= BIT(plane->id);
14673 }
14674
14675 cursor = intel_cursor_plane_create(dev_priv, pipe);
14676 if (IS_ERR(cursor)) {
14677 ret = PTR_ERR(cursor);
14678 goto fail;
14679 }
14680 intel_crtc->plane_ids_mask |= BIT(cursor->id);
14681
14682 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14683 &primary->base, &cursor->base,
14684 &intel_crtc_funcs,
14685 "pipe %c", pipe_name(pipe));
14686 if (ret)
14687 goto fail;
14688
14689 intel_crtc->pipe = pipe;
14690
14691 /* initialize shared scalers */
14692 intel_crtc_init_scalers(intel_crtc, crtc_state);
14693
14694 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14695 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14696 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14697
14698 if (INTEL_GEN(dev_priv) < 9) {
14699 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14700
14701 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14702 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14703 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14704 }
14705
14706 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14707
14708 intel_color_init(intel_crtc);
14709
14710 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14711
14712 return 0;
14713
14714 fail:
14715 /*
14716 * drm_mode_config_cleanup() will free up any
14717 * crtcs/planes already initialized.
14718 */
14719 kfree(crtc_state);
14720 kfree(intel_crtc);
14721
14722 return ret;
14723 }
14724
14725 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14726 struct drm_file *file)
14727 {
14728 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14729 struct drm_crtc *drmmode_crtc;
14730 struct intel_crtc *crtc;
14731
14732 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14733 if (!drmmode_crtc)
14734 return -ENOENT;
14735
14736 crtc = to_intel_crtc(drmmode_crtc);
14737 pipe_from_crtc_id->pipe = crtc->pipe;
14738
14739 return 0;
14740 }
14741
14742 static int intel_encoder_clones(struct intel_encoder *encoder)
14743 {
14744 struct drm_device *dev = encoder->base.dev;
14745 struct intel_encoder *source_encoder;
14746 int index_mask = 0;
14747 int entry = 0;
14748
14749 for_each_intel_encoder(dev, source_encoder) {
14750 if (encoders_cloneable(encoder, source_encoder))
14751 index_mask |= (1 << entry);
14752
14753 entry++;
14754 }
14755
14756 return index_mask;
14757 }
14758
14759 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14760 {
14761 if (!IS_MOBILE(dev_priv))
14762 return false;
14763
14764 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14765 return false;
14766
14767 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14768 return false;
14769
14770 return true;
14771 }
14772
14773 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14774 {
14775 if (INTEL_GEN(dev_priv) >= 9)
14776 return false;
14777
14778 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14779 return false;
14780
14781 if (HAS_PCH_LPT_H(dev_priv) &&
14782 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14783 return false;
14784
14785 /* DDI E can't be used if DDI A requires 4 lanes */
14786 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14787 return false;
14788
14789 if (!dev_priv->vbt.int_crt_support)
14790 return false;
14791
14792 return true;
14793 }
14794
14795 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14796 {
14797 int pps_num;
14798 int pps_idx;
14799
14800 if (HAS_DDI(dev_priv))
14801 return;
14802 /*
14803 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14804 * everywhere where registers can be write protected.
14805 */
14806 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14807 pps_num = 2;
14808 else
14809 pps_num = 1;
14810
14811 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14812 u32 val = I915_READ(PP_CONTROL(pps_idx));
14813
14814 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14815 I915_WRITE(PP_CONTROL(pps_idx), val);
14816 }
14817 }
14818
14819 static void intel_pps_init(struct drm_i915_private *dev_priv)
14820 {
14821 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14822 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14823 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14824 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14825 else
14826 dev_priv->pps_mmio_base = PPS_BASE;
14827
14828 intel_pps_unlock_regs_wa(dev_priv);
14829 }
14830
14831 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14832 {
14833 struct intel_encoder *encoder;
14834 bool dpd_is_edp = false;
14835
14836 intel_pps_init(dev_priv);
14837
14838 if (!HAS_DISPLAY(dev_priv))
14839 return;
14840
14841 if (IS_ELKHARTLAKE(dev_priv)) {
14842 intel_ddi_init(dev_priv, PORT_A);
14843 intel_ddi_init(dev_priv, PORT_B);
14844 intel_ddi_init(dev_priv, PORT_C);
14845 icl_dsi_init(dev_priv);
14846 } else if (INTEL_GEN(dev_priv) >= 11) {
14847 intel_ddi_init(dev_priv, PORT_A);
14848 intel_ddi_init(dev_priv, PORT_B);
14849 intel_ddi_init(dev_priv, PORT_C);
14850 intel_ddi_init(dev_priv, PORT_D);
14851 intel_ddi_init(dev_priv, PORT_E);
14852 /*
14853 * On some ICL SKUs port F is not present. No strap bits for
14854 * this, so rely on VBT.
14855 * Work around broken VBTs on SKUs known to have no port F.
14856 */
14857 if (IS_ICL_WITH_PORT_F(dev_priv) &&
14858 intel_bios_is_port_present(dev_priv, PORT_F))
14859 intel_ddi_init(dev_priv, PORT_F);
14860
14861 icl_dsi_init(dev_priv);
14862 } else if (IS_GEN9_LP(dev_priv)) {
14863 /*
14864 * FIXME: Broxton doesn't support port detection via the
14865 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14866 * detect the ports.
14867 */
14868 intel_ddi_init(dev_priv, PORT_A);
14869 intel_ddi_init(dev_priv, PORT_B);
14870 intel_ddi_init(dev_priv, PORT_C);
14871
14872 vlv_dsi_init(dev_priv);
14873 } else if (HAS_DDI(dev_priv)) {
14874 int found;
14875
14876 if (intel_ddi_crt_present(dev_priv))
14877 intel_crt_init(dev_priv);
14878
14879 /*
14880 * Haswell uses DDI functions to detect digital outputs.
14881 * On SKL pre-D0 the strap isn't connected, so we assume
14882 * it's there.
14883 */
14884 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14885 /* WaIgnoreDDIAStrap: skl */
14886 if (found || IS_GEN9_BC(dev_priv))
14887 intel_ddi_init(dev_priv, PORT_A);
14888
14889 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14890 * register */
14891 found = I915_READ(SFUSE_STRAP);
14892
14893 if (found & SFUSE_STRAP_DDIB_DETECTED)
14894 intel_ddi_init(dev_priv, PORT_B);
14895 if (found & SFUSE_STRAP_DDIC_DETECTED)
14896 intel_ddi_init(dev_priv, PORT_C);
14897 if (found & SFUSE_STRAP_DDID_DETECTED)
14898 intel_ddi_init(dev_priv, PORT_D);
14899 if (found & SFUSE_STRAP_DDIF_DETECTED)
14900 intel_ddi_init(dev_priv, PORT_F);
14901 /*
14902 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14903 */
14904 if (IS_GEN9_BC(dev_priv) &&
14905 intel_bios_is_port_present(dev_priv, PORT_E))
14906 intel_ddi_init(dev_priv, PORT_E);
14907
14908 } else if (HAS_PCH_SPLIT(dev_priv)) {
14909 int found;
14910
14911 /*
14912 * intel_edp_init_connector() depends on this completing first,
14913 * to prevent the registration of both eDP and LVDS and the
14914 * incorrect sharing of the PPS.
14915 */
14916 intel_lvds_init(dev_priv);
14917 intel_crt_init(dev_priv);
14918
14919 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14920
14921 if (ilk_has_edp_a(dev_priv))
14922 intel_dp_init(dev_priv, DP_A, PORT_A);
14923
14924 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14925 /* PCH SDVOB multiplex with HDMIB */
14926 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14927 if (!found)
14928 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14929 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14930 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14931 }
14932
14933 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14934 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14935
14936 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14937 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14938
14939 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14940 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14941
14942 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14943 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14944 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14945 bool has_edp, has_port;
14946
14947 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14948 intel_crt_init(dev_priv);
14949
14950 /*
14951 * The DP_DETECTED bit is the latched state of the DDC
14952 * SDA pin at boot. However since eDP doesn't require DDC
14953 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14954 * eDP ports may have been muxed to an alternate function.
14955 * Thus we can't rely on the DP_DETECTED bit alone to detect
14956 * eDP ports. Consult the VBT as well as DP_DETECTED to
14957 * detect eDP ports.
14958 *
14959 * Sadly the straps seem to be missing sometimes even for HDMI
14960 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14961 * and VBT for the presence of the port. Additionally we can't
14962 * trust the port type the VBT declares as we've seen at least
14963 * HDMI ports that the VBT claim are DP or eDP.
14964 */
14965 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14966 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14967 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14968 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14969 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14970 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14971
14972 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14973 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14974 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14975 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14976 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14977 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14978
14979 if (IS_CHERRYVIEW(dev_priv)) {
14980 /*
14981 * eDP not supported on port D,
14982 * so no need to worry about it
14983 */
14984 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14985 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14986 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14987 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14988 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14989 }
14990
14991 vlv_dsi_init(dev_priv);
14992 } else if (IS_PINEVIEW(dev_priv)) {
14993 intel_lvds_init(dev_priv);
14994 intel_crt_init(dev_priv);
14995 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
14996 bool found = false;
14997
14998 if (IS_MOBILE(dev_priv))
14999 intel_lvds_init(dev_priv);
15000
15001 intel_crt_init(dev_priv);
15002
15003 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15004 DRM_DEBUG_KMS("probing SDVOB\n");
15005 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15006 if (!found && IS_G4X(dev_priv)) {
15007 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15008 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15009 }
15010
15011 if (!found && IS_G4X(dev_priv))
15012 intel_dp_init(dev_priv, DP_B, PORT_B);
15013 }
15014
15015 /* Before G4X SDVOC doesn't have its own detect register */
15016
15017 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15018 DRM_DEBUG_KMS("probing SDVOC\n");
15019 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15020 }
15021
15022 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15023
15024 if (IS_G4X(dev_priv)) {
15025 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15026 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15027 }
15028 if (IS_G4X(dev_priv))
15029 intel_dp_init(dev_priv, DP_C, PORT_C);
15030 }
15031
15032 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15033 intel_dp_init(dev_priv, DP_D, PORT_D);
15034
15035 if (SUPPORTS_TV(dev_priv))
15036 intel_tv_init(dev_priv);
15037 } else if (IS_GEN(dev_priv, 2)) {
15038 if (IS_I85X(dev_priv))
15039 intel_lvds_init(dev_priv);
15040
15041 intel_crt_init(dev_priv);
15042 intel_dvo_init(dev_priv);
15043 }
15044
15045 intel_psr_init(dev_priv);
15046
15047 for_each_intel_encoder(&dev_priv->drm, encoder) {
15048 encoder->base.possible_crtcs = encoder->crtc_mask;
15049 encoder->base.possible_clones =
15050 intel_encoder_clones(encoder);
15051 }
15052
15053 intel_init_pch_refclk(dev_priv);
15054
15055 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15056 }
15057
15058 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15059 {
15060 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15061 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15062
15063 drm_framebuffer_cleanup(fb);
15064
15065 i915_gem_object_lock(obj);
15066 WARN_ON(!obj->framebuffer_references--);
15067 i915_gem_object_unlock(obj);
15068
15069 i915_gem_object_put(obj);
15070
15071 kfree(intel_fb);
15072 }
15073
15074 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15075 struct drm_file *file,
15076 unsigned int *handle)
15077 {
15078 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15079
15080 if (obj->userptr.mm) {
15081 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15082 return -EINVAL;
15083 }
15084
15085 return drm_gem_handle_create(file, &obj->base, handle);
15086 }
15087
15088 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15089 struct drm_file *file,
15090 unsigned flags, unsigned color,
15091 struct drm_clip_rect *clips,
15092 unsigned num_clips)
15093 {
15094 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15095
15096 i915_gem_object_flush_if_display(obj);
15097 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15098
15099 return 0;
15100 }
15101
15102 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15103 .destroy = intel_user_framebuffer_destroy,
15104 .create_handle = intel_user_framebuffer_create_handle,
15105 .dirty = intel_user_framebuffer_dirty,
15106 };
15107
15108 static
15109 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
15110 u32 pixel_format, u64 fb_modifier)
15111 {
15112 struct intel_crtc *crtc;
15113 struct intel_plane *plane;
15114
15115 /*
15116 * We assume the primary plane for pipe A has
15117 * the highest stride limits of them all.
15118 */
15119 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
15120 plane = to_intel_plane(crtc->base.primary);
15121
15122 return plane->max_stride(plane, pixel_format, fb_modifier,
15123 DRM_MODE_ROTATE_0);
15124 }
15125
15126 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15127 struct drm_i915_gem_object *obj,
15128 struct drm_mode_fb_cmd2 *mode_cmd)
15129 {
15130 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15131 struct drm_framebuffer *fb = &intel_fb->base;
15132 u32 pitch_limit;
15133 unsigned int tiling, stride;
15134 int ret = -EINVAL;
15135 int i;
15136
15137 i915_gem_object_lock(obj);
15138 obj->framebuffer_references++;
15139 tiling = i915_gem_object_get_tiling(obj);
15140 stride = i915_gem_object_get_stride(obj);
15141 i915_gem_object_unlock(obj);
15142
15143 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15144 /*
15145 * If there's a fence, enforce that
15146 * the fb modifier and tiling mode match.
15147 */
15148 if (tiling != I915_TILING_NONE &&
15149 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15150 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15151 goto err;
15152 }
15153 } else {
15154 if (tiling == I915_TILING_X) {
15155 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15156 } else if (tiling == I915_TILING_Y) {
15157 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15158 goto err;
15159 }
15160 }
15161
15162 if (!drm_any_plane_has_format(&dev_priv->drm,
15163 mode_cmd->pixel_format,
15164 mode_cmd->modifier[0])) {
15165 struct drm_format_name_buf format_name;
15166
15167 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15168 drm_get_format_name(mode_cmd->pixel_format,
15169 &format_name),
15170 mode_cmd->modifier[0]);
15171 goto err;
15172 }
15173
15174 /*
15175 * gen2/3 display engine uses the fence if present,
15176 * so the tiling mode must match the fb modifier exactly.
15177 */
15178 if (INTEL_GEN(dev_priv) < 4 &&
15179 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15180 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15181 goto err;
15182 }
15183
15184 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
15185 mode_cmd->modifier[0]);
15186 if (mode_cmd->pitches[0] > pitch_limit) {
15187 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15188 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15189 "tiled" : "linear",
15190 mode_cmd->pitches[0], pitch_limit);
15191 goto err;
15192 }
15193
15194 /*
15195 * If there's a fence, enforce that
15196 * the fb pitch and fence stride match.
15197 */
15198 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15199 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15200 mode_cmd->pitches[0], stride);
15201 goto err;
15202 }
15203
15204 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15205 if (mode_cmd->offsets[0] != 0)
15206 goto err;
15207
15208 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15209
15210 for (i = 0; i < fb->format->num_planes; i++) {
15211 u32 stride_alignment;
15212
15213 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15214 DRM_DEBUG_KMS("bad plane %d handle\n", i);
15215 goto err;
15216 }
15217
15218 stride_alignment = intel_fb_stride_alignment(fb, i);
15219
15220 /*
15221 * Display WA #0531: skl,bxt,kbl,glk
15222 *
15223 * Render decompression and plane width > 3840
15224 * combined with horizontal panning requires the
15225 * plane stride to be a multiple of 4. We'll just
15226 * require the entire fb to accommodate that to avoid
15227 * potential runtime errors at plane configuration time.
15228 */
15229 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15230 is_ccs_modifier(fb->modifier))
15231 stride_alignment *= 4;
15232
15233 if (fb->pitches[i] & (stride_alignment - 1)) {
15234 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15235 i, fb->pitches[i], stride_alignment);
15236 goto err;
15237 }
15238
15239 fb->obj[i] = &obj->base;
15240 }
15241
15242 ret = intel_fill_fb_info(dev_priv, fb);
15243 if (ret)
15244 goto err;
15245
15246 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15247 if (ret) {
15248 DRM_ERROR("framebuffer init failed %d\n", ret);
15249 goto err;
15250 }
15251
15252 return 0;
15253
15254 err:
15255 i915_gem_object_lock(obj);
15256 obj->framebuffer_references--;
15257 i915_gem_object_unlock(obj);
15258 return ret;
15259 }
15260
15261 static struct drm_framebuffer *
15262 intel_user_framebuffer_create(struct drm_device *dev,
15263 struct drm_file *filp,
15264 const struct drm_mode_fb_cmd2 *user_mode_cmd)
15265 {
15266 struct drm_framebuffer *fb;
15267 struct drm_i915_gem_object *obj;
15268 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15269
15270 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15271 if (!obj)
15272 return ERR_PTR(-ENOENT);
15273
15274 fb = intel_framebuffer_create(obj, &mode_cmd);
15275 if (IS_ERR(fb))
15276 i915_gem_object_put(obj);
15277
15278 return fb;
15279 }
15280
15281 static void intel_atomic_state_free(struct drm_atomic_state *state)
15282 {
15283 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15284
15285 drm_atomic_state_default_release(state);
15286
15287 i915_sw_fence_fini(&intel_state->commit_ready);
15288
15289 kfree(state);
15290 }
15291
15292 static enum drm_mode_status
15293 intel_mode_valid(struct drm_device *dev,
15294 const struct drm_display_mode *mode)
15295 {
15296 struct drm_i915_private *dev_priv = to_i915(dev);
15297 int hdisplay_max, htotal_max;
15298 int vdisplay_max, vtotal_max;
15299
15300 /*
15301 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15302 * of DBLSCAN modes to the output's mode list when they detect
15303 * the scaling mode property on the connector. And they don't
15304 * ask the kernel to validate those modes in any way until
15305 * modeset time at which point the client gets a protocol error.
15306 * So in order to not upset those clients we silently ignore the
15307 * DBLSCAN flag on such connectors. For other connectors we will
15308 * reject modes with the DBLSCAN flag in encoder->compute_config().
15309 * And we always reject DBLSCAN modes in connector->mode_valid()
15310 * as we never want such modes on the connector's mode list.
15311 */
15312
15313 if (mode->vscan > 1)
15314 return MODE_NO_VSCAN;
15315
15316 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15317 return MODE_H_ILLEGAL;
15318
15319 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15320 DRM_MODE_FLAG_NCSYNC |
15321 DRM_MODE_FLAG_PCSYNC))
15322 return MODE_HSYNC;
15323
15324 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15325 DRM_MODE_FLAG_PIXMUX |
15326 DRM_MODE_FLAG_CLKDIV2))
15327 return MODE_BAD;
15328
15329 if (INTEL_GEN(dev_priv) >= 9 ||
15330 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15331 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15332 vdisplay_max = 4096;
15333 htotal_max = 8192;
15334 vtotal_max = 8192;
15335 } else if (INTEL_GEN(dev_priv) >= 3) {
15336 hdisplay_max = 4096;
15337 vdisplay_max = 4096;
15338 htotal_max = 8192;
15339 vtotal_max = 8192;
15340 } else {
15341 hdisplay_max = 2048;
15342 vdisplay_max = 2048;
15343 htotal_max = 4096;
15344 vtotal_max = 4096;
15345 }
15346
15347 if (mode->hdisplay > hdisplay_max ||
15348 mode->hsync_start > htotal_max ||
15349 mode->hsync_end > htotal_max ||
15350 mode->htotal > htotal_max)
15351 return MODE_H_ILLEGAL;
15352
15353 if (mode->vdisplay > vdisplay_max ||
15354 mode->vsync_start > vtotal_max ||
15355 mode->vsync_end > vtotal_max ||
15356 mode->vtotal > vtotal_max)
15357 return MODE_V_ILLEGAL;
15358
15359 return MODE_OK;
15360 }
15361
15362 static const struct drm_mode_config_funcs intel_mode_funcs = {
15363 .fb_create = intel_user_framebuffer_create,
15364 .get_format_info = intel_get_format_info,
15365 .output_poll_changed = intel_fbdev_output_poll_changed,
15366 .mode_valid = intel_mode_valid,
15367 .atomic_check = intel_atomic_check,
15368 .atomic_commit = intel_atomic_commit,
15369 .atomic_state_alloc = intel_atomic_state_alloc,
15370 .atomic_state_clear = intel_atomic_state_clear,
15371 .atomic_state_free = intel_atomic_state_free,
15372 };
15373
15374 /**
15375 * intel_init_display_hooks - initialize the display modesetting hooks
15376 * @dev_priv: device private
15377 */
15378 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15379 {
15380 intel_init_cdclk_hooks(dev_priv);
15381
15382 if (INTEL_GEN(dev_priv) >= 9) {
15383 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15384 dev_priv->display.get_initial_plane_config =
15385 skylake_get_initial_plane_config;
15386 dev_priv->display.crtc_compute_clock =
15387 haswell_crtc_compute_clock;
15388 dev_priv->display.crtc_enable = haswell_crtc_enable;
15389 dev_priv->display.crtc_disable = haswell_crtc_disable;
15390 } else if (HAS_DDI(dev_priv)) {
15391 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15392 dev_priv->display.get_initial_plane_config =
15393 i9xx_get_initial_plane_config;
15394 dev_priv->display.crtc_compute_clock =
15395 haswell_crtc_compute_clock;
15396 dev_priv->display.crtc_enable = haswell_crtc_enable;
15397 dev_priv->display.crtc_disable = haswell_crtc_disable;
15398 } else if (HAS_PCH_SPLIT(dev_priv)) {
15399 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15400 dev_priv->display.get_initial_plane_config =
15401 i9xx_get_initial_plane_config;
15402 dev_priv->display.crtc_compute_clock =
15403 ironlake_crtc_compute_clock;
15404 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15405 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15406 } else if (IS_CHERRYVIEW(dev_priv)) {
15407 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15408 dev_priv->display.get_initial_plane_config =
15409 i9xx_get_initial_plane_config;
15410 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15411 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15412 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15413 } else if (IS_VALLEYVIEW(dev_priv)) {
15414 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15415 dev_priv->display.get_initial_plane_config =
15416 i9xx_get_initial_plane_config;
15417 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15418 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15419 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15420 } else if (IS_G4X(dev_priv)) {
15421 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15422 dev_priv->display.get_initial_plane_config =
15423 i9xx_get_initial_plane_config;
15424 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15425 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15426 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15427 } else if (IS_PINEVIEW(dev_priv)) {
15428 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15429 dev_priv->display.get_initial_plane_config =
15430 i9xx_get_initial_plane_config;
15431 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15432 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15433 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15434 } else if (!IS_GEN(dev_priv, 2)) {
15435 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15436 dev_priv->display.get_initial_plane_config =
15437 i9xx_get_initial_plane_config;
15438 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15439 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15440 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15441 } else {
15442 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15443 dev_priv->display.get_initial_plane_config =
15444 i9xx_get_initial_plane_config;
15445 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15446 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15447 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15448 }
15449
15450 if (IS_GEN(dev_priv, 5)) {
15451 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15452 } else if (IS_GEN(dev_priv, 6)) {
15453 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15454 } else if (IS_IVYBRIDGE(dev_priv)) {
15455 /* FIXME: detect B0+ stepping and use auto training */
15456 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15457 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15458 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15459 }
15460
15461 if (INTEL_GEN(dev_priv) >= 9)
15462 dev_priv->display.update_crtcs = skl_update_crtcs;
15463 else
15464 dev_priv->display.update_crtcs = intel_update_crtcs;
15465 }
15466
15467 /* Disable the VGA plane that we never use */
15468 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15469 {
15470 struct pci_dev *pdev = dev_priv->drm.pdev;
15471 u8 sr1;
15472 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15473
15474 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15475 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15476 outb(SR01, VGA_SR_INDEX);
15477 sr1 = inb(VGA_SR_DATA);
15478 outb(sr1 | 1<<5, VGA_SR_DATA);
15479 vga_put(pdev, VGA_RSRC_LEGACY_IO);
15480 udelay(300);
15481
15482 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15483 POSTING_READ(vga_reg);
15484 }
15485
15486 void intel_modeset_init_hw(struct drm_device *dev)
15487 {
15488 struct drm_i915_private *dev_priv = to_i915(dev);
15489
15490 intel_update_cdclk(dev_priv);
15491 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15492 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15493 }
15494
15495 /*
15496 * Calculate what we think the watermarks should be for the state we've read
15497 * out of the hardware and then immediately program those watermarks so that
15498 * we ensure the hardware settings match our internal state.
15499 *
15500 * We can calculate what we think WM's should be by creating a duplicate of the
15501 * current state (which was constructed during hardware readout) and running it
15502 * through the atomic check code to calculate new watermark values in the
15503 * state object.
15504 */
15505 static void sanitize_watermarks(struct drm_device *dev)
15506 {
15507 struct drm_i915_private *dev_priv = to_i915(dev);
15508 struct drm_atomic_state *state;
15509 struct intel_atomic_state *intel_state;
15510 struct drm_crtc *crtc;
15511 struct drm_crtc_state *cstate;
15512 struct drm_modeset_acquire_ctx ctx;
15513 int ret;
15514 int i;
15515
15516 /* Only supported on platforms that use atomic watermark design */
15517 if (!dev_priv->display.optimize_watermarks)
15518 return;
15519
15520 /*
15521 * We need to hold connection_mutex before calling duplicate_state so
15522 * that the connector loop is protected.
15523 */
15524 drm_modeset_acquire_init(&ctx, 0);
15525 retry:
15526 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15527 if (ret == -EDEADLK) {
15528 drm_modeset_backoff(&ctx);
15529 goto retry;
15530 } else if (WARN_ON(ret)) {
15531 goto fail;
15532 }
15533
15534 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15535 if (WARN_ON(IS_ERR(state)))
15536 goto fail;
15537
15538 intel_state = to_intel_atomic_state(state);
15539
15540 /*
15541 * Hardware readout is the only time we don't want to calculate
15542 * intermediate watermarks (since we don't trust the current
15543 * watermarks).
15544 */
15545 if (!HAS_GMCH(dev_priv))
15546 intel_state->skip_intermediate_wm = true;
15547
15548 ret = intel_atomic_check(dev, state);
15549 if (ret) {
15550 /*
15551 * If we fail here, it means that the hardware appears to be
15552 * programmed in a way that shouldn't be possible, given our
15553 * understanding of watermark requirements. This might mean a
15554 * mistake in the hardware readout code or a mistake in the
15555 * watermark calculations for a given platform. Raise a WARN
15556 * so that this is noticeable.
15557 *
15558 * If this actually happens, we'll have to just leave the
15559 * BIOS-programmed watermarks untouched and hope for the best.
15560 */
15561 WARN(true, "Could not determine valid watermarks for inherited state\n");
15562 goto put_state;
15563 }
15564
15565 /* Write calculated watermark values back */
15566 for_each_new_crtc_in_state(state, crtc, cstate, i) {
15567 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15568
15569 cs->wm.need_postvbl_update = true;
15570 dev_priv->display.optimize_watermarks(intel_state, cs);
15571
15572 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15573 }
15574
15575 put_state:
15576 drm_atomic_state_put(state);
15577 fail:
15578 drm_modeset_drop_locks(&ctx);
15579 drm_modeset_acquire_fini(&ctx);
15580 }
15581
15582 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15583 {
15584 if (IS_GEN(dev_priv, 5)) {
15585 u32 fdi_pll_clk =
15586 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15587
15588 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15589 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15590 dev_priv->fdi_pll_freq = 270000;
15591 } else {
15592 return;
15593 }
15594
15595 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15596 }
15597
15598 static int intel_initial_commit(struct drm_device *dev)
15599 {
15600 struct drm_atomic_state *state = NULL;
15601 struct drm_modeset_acquire_ctx ctx;
15602 struct drm_crtc *crtc;
15603 struct drm_crtc_state *crtc_state;
15604 int ret = 0;
15605
15606 state = drm_atomic_state_alloc(dev);
15607 if (!state)
15608 return -ENOMEM;
15609
15610 drm_modeset_acquire_init(&ctx, 0);
15611
15612 retry:
15613 state->acquire_ctx = &ctx;
15614
15615 drm_for_each_crtc(crtc, dev) {
15616 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15617 if (IS_ERR(crtc_state)) {
15618 ret = PTR_ERR(crtc_state);
15619 goto out;
15620 }
15621
15622 if (crtc_state->active) {
15623 ret = drm_atomic_add_affected_planes(state, crtc);
15624 if (ret)
15625 goto out;
15626
15627 /*
15628 * FIXME hack to force a LUT update to avoid the
15629 * plane update forcing the pipe gamma on without
15630 * having a proper LUT loaded. Remove once we
15631 * have readout for pipe gamma enable.
15632 */
15633 crtc_state->color_mgmt_changed = true;
15634 }
15635 }
15636
15637 ret = drm_atomic_commit(state);
15638
15639 out:
15640 if (ret == -EDEADLK) {
15641 drm_atomic_state_clear(state);
15642 drm_modeset_backoff(&ctx);
15643 goto retry;
15644 }
15645
15646 drm_atomic_state_put(state);
15647
15648 drm_modeset_drop_locks(&ctx);
15649 drm_modeset_acquire_fini(&ctx);
15650
15651 return ret;
15652 }
15653
15654 int intel_modeset_init(struct drm_device *dev)
15655 {
15656 struct drm_i915_private *dev_priv = to_i915(dev);
15657 struct i915_ggtt *ggtt = &dev_priv->ggtt;
15658 enum pipe pipe;
15659 struct intel_crtc *crtc;
15660 int ret;
15661
15662 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15663
15664 drm_mode_config_init(dev);
15665
15666 dev->mode_config.min_width = 0;
15667 dev->mode_config.min_height = 0;
15668
15669 dev->mode_config.preferred_depth = 24;
15670 dev->mode_config.prefer_shadow = 1;
15671
15672 dev->mode_config.allow_fb_modifiers = true;
15673
15674 dev->mode_config.funcs = &intel_mode_funcs;
15675
15676 init_llist_head(&dev_priv->atomic_helper.free_list);
15677 INIT_WORK(&dev_priv->atomic_helper.free_work,
15678 intel_atomic_helper_free_state_worker);
15679
15680 intel_init_quirks(dev_priv);
15681
15682 intel_fbc_init(dev_priv);
15683
15684 intel_init_pm(dev_priv);
15685
15686 /*
15687 * There may be no VBT; and if the BIOS enabled SSC we can
15688 * just keep using it to avoid unnecessary flicker. Whereas if the
15689 * BIOS isn't using it, don't assume it will work even if the VBT
15690 * indicates as much.
15691 */
15692 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15693 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15694 DREF_SSC1_ENABLE);
15695
15696 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15697 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15698 bios_lvds_use_ssc ? "en" : "dis",
15699 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15700 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15701 }
15702 }
15703
15704 /* maximum framebuffer dimensions */
15705 if (IS_GEN(dev_priv, 2)) {
15706 dev->mode_config.max_width = 2048;
15707 dev->mode_config.max_height = 2048;
15708 } else if (IS_GEN(dev_priv, 3)) {
15709 dev->mode_config.max_width = 4096;
15710 dev->mode_config.max_height = 4096;
15711 } else {
15712 dev->mode_config.max_width = 8192;
15713 dev->mode_config.max_height = 8192;
15714 }
15715
15716 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15717 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15718 dev->mode_config.cursor_height = 1023;
15719 } else if (IS_GEN(dev_priv, 2)) {
15720 dev->mode_config.cursor_width = 64;
15721 dev->mode_config.cursor_height = 64;
15722 } else {
15723 dev->mode_config.cursor_width = 256;
15724 dev->mode_config.cursor_height = 256;
15725 }
15726
15727 dev->mode_config.fb_base = ggtt->gmadr.start;
15728
15729 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15730 INTEL_INFO(dev_priv)->num_pipes,
15731 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15732
15733 for_each_pipe(dev_priv, pipe) {
15734 ret = intel_crtc_init(dev_priv, pipe);
15735 if (ret) {
15736 drm_mode_config_cleanup(dev);
15737 return ret;
15738 }
15739 }
15740
15741 intel_shared_dpll_init(dev);
15742 intel_update_fdi_pll_freq(dev_priv);
15743
15744 intel_update_czclk(dev_priv);
15745 intel_modeset_init_hw(dev);
15746
15747 intel_hdcp_component_init(dev_priv);
15748
15749 if (dev_priv->max_cdclk_freq == 0)
15750 intel_update_max_cdclk(dev_priv);
15751
15752 /* Just disable it once at startup */
15753 i915_disable_vga(dev_priv);
15754 intel_setup_outputs(dev_priv);
15755
15756 drm_modeset_lock_all(dev);
15757 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15758 drm_modeset_unlock_all(dev);
15759
15760 for_each_intel_crtc(dev, crtc) {
15761 struct intel_initial_plane_config plane_config = {};
15762
15763 if (!crtc->active)
15764 continue;
15765
15766 /*
15767 * Note that reserving the BIOS fb up front prevents us
15768 * from stuffing other stolen allocations like the ring
15769 * on top. This prevents some ugliness at boot time, and
15770 * can even allow for smooth boot transitions if the BIOS
15771 * fb is large enough for the active pipe configuration.
15772 */
15773 dev_priv->display.get_initial_plane_config(crtc,
15774 &plane_config);
15775
15776 /*
15777 * If the fb is shared between multiple heads, we'll
15778 * just get the first one.
15779 */
15780 intel_find_initial_plane_obj(crtc, &plane_config);
15781 }
15782
15783 /*
15784 * Make sure hardware watermarks really match the state we read out.
15785 * Note that we need to do this after reconstructing the BIOS fb's
15786 * since the watermark calculation done here will use pstate->fb.
15787 */
15788 if (!HAS_GMCH(dev_priv))
15789 sanitize_watermarks(dev);
15790
15791 /*
15792 * Force all active planes to recompute their states. So that on
15793 * mode_setcrtc after probe, all the intel_plane_state variables
15794 * are already calculated and there is no assert_plane warnings
15795 * during bootup.
15796 */
15797 ret = intel_initial_commit(dev);
15798 if (ret)
15799 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15800
15801 return 0;
15802 }
15803
15804 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15805 {
15806 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15807 /* 640x480@60Hz, ~25175 kHz */
15808 struct dpll clock = {
15809 .m1 = 18,
15810 .m2 = 7,
15811 .p1 = 13,
15812 .p2 = 4,
15813 .n = 2,
15814 };
15815 u32 dpll, fp;
15816 int i;
15817
15818 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15819
15820 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15821 pipe_name(pipe), clock.vco, clock.dot);
15822
15823 fp = i9xx_dpll_compute_fp(&clock);
15824 dpll = DPLL_DVO_2X_MODE |
15825 DPLL_VGA_MODE_DIS |
15826 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15827 PLL_P2_DIVIDE_BY_4 |
15828 PLL_REF_INPUT_DREFCLK |
15829 DPLL_VCO_ENABLE;
15830
15831 I915_WRITE(FP0(pipe), fp);
15832 I915_WRITE(FP1(pipe), fp);
15833
15834 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15835 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15836 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15837 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15838 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15839 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15840 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15841
15842 /*
15843 * Apparently we need to have VGA mode enabled prior to changing
15844 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15845 * dividers, even though the register value does change.
15846 */
15847 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15848 I915_WRITE(DPLL(pipe), dpll);
15849
15850 /* Wait for the clocks to stabilize. */
15851 POSTING_READ(DPLL(pipe));
15852 udelay(150);
15853
15854 /* The pixel multiplier can only be updated once the
15855 * DPLL is enabled and the clocks are stable.
15856 *
15857 * So write it again.
15858 */
15859 I915_WRITE(DPLL(pipe), dpll);
15860
15861 /* We do this three times for luck */
15862 for (i = 0; i < 3 ; i++) {
15863 I915_WRITE(DPLL(pipe), dpll);
15864 POSTING_READ(DPLL(pipe));
15865 udelay(150); /* wait for warmup */
15866 }
15867
15868 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15869 POSTING_READ(PIPECONF(pipe));
15870
15871 intel_wait_for_pipe_scanline_moving(crtc);
15872 }
15873
15874 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15875 {
15876 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15877
15878 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15879 pipe_name(pipe));
15880
15881 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15882 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15883 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15884 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15885 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15886
15887 I915_WRITE(PIPECONF(pipe), 0);
15888 POSTING_READ(PIPECONF(pipe));
15889
15890 intel_wait_for_pipe_scanline_stopped(crtc);
15891
15892 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15893 POSTING_READ(DPLL(pipe));
15894 }
15895
15896 static void
15897 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15898 {
15899 struct intel_crtc *crtc;
15900
15901 if (INTEL_GEN(dev_priv) >= 4)
15902 return;
15903
15904 for_each_intel_crtc(&dev_priv->drm, crtc) {
15905 struct intel_plane *plane =
15906 to_intel_plane(crtc->base.primary);
15907 struct intel_crtc *plane_crtc;
15908 enum pipe pipe;
15909
15910 if (!plane->get_hw_state(plane, &pipe))
15911 continue;
15912
15913 if (pipe == crtc->pipe)
15914 continue;
15915
15916 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15917 plane->base.base.id, plane->base.name);
15918
15919 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15920 intel_plane_disable_noatomic(plane_crtc, plane);
15921 }
15922 }
15923
15924 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15925 {
15926 struct drm_device *dev = crtc->base.dev;
15927 struct intel_encoder *encoder;
15928
15929 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15930 return true;
15931
15932 return false;
15933 }
15934
15935 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15936 {
15937 struct drm_device *dev = encoder->base.dev;
15938 struct intel_connector *connector;
15939
15940 for_each_connector_on_encoder(dev, &encoder->base, connector)
15941 return connector;
15942
15943 return NULL;
15944 }
15945
15946 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15947 enum pipe pch_transcoder)
15948 {
15949 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15950 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15951 }
15952
15953 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15954 struct drm_modeset_acquire_ctx *ctx)
15955 {
15956 struct drm_device *dev = crtc->base.dev;
15957 struct drm_i915_private *dev_priv = to_i915(dev);
15958 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15959 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15960
15961 /* Clear any frame start delays used for debugging left by the BIOS */
15962 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15963 i915_reg_t reg = PIPECONF(cpu_transcoder);
15964
15965 I915_WRITE(reg,
15966 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15967 }
15968
15969 if (crtc_state->base.active) {
15970 struct intel_plane *plane;
15971
15972 /* Disable everything but the primary plane */
15973 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15974 const struct intel_plane_state *plane_state =
15975 to_intel_plane_state(plane->base.state);
15976
15977 if (plane_state->base.visible &&
15978 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15979 intel_plane_disable_noatomic(crtc, plane);
15980 }
15981
15982 /*
15983 * Disable any background color set by the BIOS, but enable the
15984 * gamma and CSC to match how we program our planes.
15985 */
15986 if (INTEL_GEN(dev_priv) >= 9)
15987 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
15988 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
15989 SKL_BOTTOM_COLOR_CSC_ENABLE);
15990 }
15991
15992 /* Adjust the state of the output pipe according to whether we
15993 * have active connectors/encoders. */
15994 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15995 intel_crtc_disable_noatomic(&crtc->base, ctx);
15996
15997 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
15998 /*
15999 * We start out with underrun reporting disabled to avoid races.
16000 * For correct bookkeeping mark this on active crtcs.
16001 *
16002 * Also on gmch platforms we dont have any hardware bits to
16003 * disable the underrun reporting. Which means we need to start
16004 * out with underrun reporting disabled also on inactive pipes,
16005 * since otherwise we'll complain about the garbage we read when
16006 * e.g. coming up after runtime pm.
16007 *
16008 * No protection against concurrent access is required - at
16009 * worst a fifo underrun happens which also sets this to false.
16010 */
16011 crtc->cpu_fifo_underrun_disabled = true;
16012 /*
16013 * We track the PCH trancoder underrun reporting state
16014 * within the crtc. With crtc for pipe A housing the underrun
16015 * reporting state for PCH transcoder A, crtc for pipe B housing
16016 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16017 * and marking underrun reporting as disabled for the non-existing
16018 * PCH transcoders B and C would prevent enabling the south
16019 * error interrupt (see cpt_can_enable_serr_int()).
16020 */
16021 if (has_pch_trancoder(dev_priv, crtc->pipe))
16022 crtc->pch_fifo_underrun_disabled = true;
16023 }
16024 }
16025
16026 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16027 {
16028 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16029
16030 /*
16031 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16032 * the hardware when a high res displays plugged in. DPLL P
16033 * divider is zero, and the pipe timings are bonkers. We'll
16034 * try to disable everything in that case.
16035 *
16036 * FIXME would be nice to be able to sanitize this state
16037 * without several WARNs, but for now let's take the easy
16038 * road.
16039 */
16040 return IS_GEN(dev_priv, 6) &&
16041 crtc_state->base.active &&
16042 crtc_state->shared_dpll &&
16043 crtc_state->port_clock == 0;
16044 }
16045
16046 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16047 {
16048 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16049 struct intel_connector *connector;
16050 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16051 struct intel_crtc_state *crtc_state = crtc ?
16052 to_intel_crtc_state(crtc->base.state) : NULL;
16053
16054 /* We need to check both for a crtc link (meaning that the
16055 * encoder is active and trying to read from a pipe) and the
16056 * pipe itself being active. */
16057 bool has_active_crtc = crtc_state &&
16058 crtc_state->base.active;
16059
16060 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16061 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16062 pipe_name(crtc->pipe));
16063 has_active_crtc = false;
16064 }
16065
16066 connector = intel_encoder_find_connector(encoder);
16067 if (connector && !has_active_crtc) {
16068 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16069 encoder->base.base.id,
16070 encoder->base.name);
16071
16072 /* Connector is active, but has no active pipe. This is
16073 * fallout from our resume register restoring. Disable
16074 * the encoder manually again. */
16075 if (crtc_state) {
16076 struct drm_encoder *best_encoder;
16077
16078 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16079 encoder->base.base.id,
16080 encoder->base.name);
16081
16082 /* avoid oopsing in case the hooks consult best_encoder */
16083 best_encoder = connector->base.state->best_encoder;
16084 connector->base.state->best_encoder = &encoder->base;
16085
16086 if (encoder->disable)
16087 encoder->disable(encoder, crtc_state,
16088 connector->base.state);
16089 if (encoder->post_disable)
16090 encoder->post_disable(encoder, crtc_state,
16091 connector->base.state);
16092
16093 connector->base.state->best_encoder = best_encoder;
16094 }
16095 encoder->base.crtc = NULL;
16096
16097 /* Inconsistent output/port/pipe state happens presumably due to
16098 * a bug in one of the get_hw_state functions. Or someplace else
16099 * in our code, like the register restore mess on resume. Clamp
16100 * things to off as a safer default. */
16101
16102 connector->base.dpms = DRM_MODE_DPMS_OFF;
16103 connector->base.encoder = NULL;
16104 }
16105
16106 /* notify opregion of the sanitized encoder state */
16107 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16108
16109 if (INTEL_GEN(dev_priv) >= 11)
16110 icl_sanitize_encoder_pll_mapping(encoder);
16111 }
16112
16113 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16114 {
16115 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16116
16117 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16118 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16119 i915_disable_vga(dev_priv);
16120 }
16121 }
16122
16123 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16124 {
16125 intel_wakeref_t wakeref;
16126
16127 /*
16128 * This function can be called both from intel_modeset_setup_hw_state or
16129 * at a very early point in our resume sequence, where the power well
16130 * structures are not yet restored. Since this function is at a very
16131 * paranoid "someone might have enabled VGA while we were not looking"
16132 * level, just check if the power well is enabled instead of trying to
16133 * follow the "don't touch the power well if we don't need it" policy
16134 * the rest of the driver uses.
16135 */
16136 wakeref = intel_display_power_get_if_enabled(dev_priv,
16137 POWER_DOMAIN_VGA);
16138 if (!wakeref)
16139 return;
16140
16141 i915_redisable_vga_power_on(dev_priv);
16142
16143 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16144 }
16145
16146 /* FIXME read out full plane state for all planes */
16147 static void readout_plane_state(struct drm_i915_private *dev_priv)
16148 {
16149 struct intel_plane *plane;
16150 struct intel_crtc *crtc;
16151
16152 for_each_intel_plane(&dev_priv->drm, plane) {
16153 struct intel_plane_state *plane_state =
16154 to_intel_plane_state(plane->base.state);
16155 struct intel_crtc_state *crtc_state;
16156 enum pipe pipe = PIPE_A;
16157 bool visible;
16158
16159 visible = plane->get_hw_state(plane, &pipe);
16160
16161 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16162 crtc_state = to_intel_crtc_state(crtc->base.state);
16163
16164 intel_set_plane_visible(crtc_state, plane_state, visible);
16165
16166 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16167 plane->base.base.id, plane->base.name,
16168 enableddisabled(visible), pipe_name(pipe));
16169 }
16170
16171 for_each_intel_crtc(&dev_priv->drm, crtc) {
16172 struct intel_crtc_state *crtc_state =
16173 to_intel_crtc_state(crtc->base.state);
16174
16175 fixup_active_planes(crtc_state);
16176 }
16177 }
16178
16179 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16180 {
16181 struct drm_i915_private *dev_priv = to_i915(dev);
16182 enum pipe pipe;
16183 struct intel_crtc *crtc;
16184 struct intel_encoder *encoder;
16185 struct intel_connector *connector;
16186 struct drm_connector_list_iter conn_iter;
16187 int i;
16188
16189 dev_priv->active_crtcs = 0;
16190
16191 for_each_intel_crtc(dev, crtc) {
16192 struct intel_crtc_state *crtc_state =
16193 to_intel_crtc_state(crtc->base.state);
16194
16195 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16196 memset(crtc_state, 0, sizeof(*crtc_state));
16197 crtc_state->base.crtc = &crtc->base;
16198
16199 crtc_state->base.active = crtc_state->base.enable =
16200 dev_priv->display.get_pipe_config(crtc, crtc_state);
16201
16202 crtc->base.enabled = crtc_state->base.enable;
16203 crtc->active = crtc_state->base.active;
16204
16205 if (crtc_state->base.active)
16206 dev_priv->active_crtcs |= 1 << crtc->pipe;
16207
16208 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16209 crtc->base.base.id, crtc->base.name,
16210 enableddisabled(crtc_state->base.active));
16211 }
16212
16213 readout_plane_state(dev_priv);
16214
16215 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16216 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16217
16218 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16219 &pll->state.hw_state);
16220 pll->state.crtc_mask = 0;
16221 for_each_intel_crtc(dev, crtc) {
16222 struct intel_crtc_state *crtc_state =
16223 to_intel_crtc_state(crtc->base.state);
16224
16225 if (crtc_state->base.active &&
16226 crtc_state->shared_dpll == pll)
16227 pll->state.crtc_mask |= 1 << crtc->pipe;
16228 }
16229 pll->active_mask = pll->state.crtc_mask;
16230
16231 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16232 pll->info->name, pll->state.crtc_mask, pll->on);
16233 }
16234
16235 for_each_intel_encoder(dev, encoder) {
16236 pipe = 0;
16237
16238 if (encoder->get_hw_state(encoder, &pipe)) {
16239 struct intel_crtc_state *crtc_state;
16240
16241 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16242 crtc_state = to_intel_crtc_state(crtc->base.state);
16243
16244 encoder->base.crtc = &crtc->base;
16245 encoder->get_config(encoder, crtc_state);
16246 } else {
16247 encoder->base.crtc = NULL;
16248 }
16249
16250 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16251 encoder->base.base.id, encoder->base.name,
16252 enableddisabled(encoder->base.crtc),
16253 pipe_name(pipe));
16254 }
16255
16256 drm_connector_list_iter_begin(dev, &conn_iter);
16257 for_each_intel_connector_iter(connector, &conn_iter) {
16258 if (connector->get_hw_state(connector)) {
16259 connector->base.dpms = DRM_MODE_DPMS_ON;
16260
16261 encoder = connector->encoder;
16262 connector->base.encoder = &encoder->base;
16263
16264 if (encoder->base.crtc &&
16265 encoder->base.crtc->state->active) {
16266 /*
16267 * This has to be done during hardware readout
16268 * because anything calling .crtc_disable may
16269 * rely on the connector_mask being accurate.
16270 */
16271 encoder->base.crtc->state->connector_mask |=
16272 drm_connector_mask(&connector->base);
16273 encoder->base.crtc->state->encoder_mask |=
16274 drm_encoder_mask(&encoder->base);
16275 }
16276
16277 } else {
16278 connector->base.dpms = DRM_MODE_DPMS_OFF;
16279 connector->base.encoder = NULL;
16280 }
16281 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16282 connector->base.base.id, connector->base.name,
16283 enableddisabled(connector->base.encoder));
16284 }
16285 drm_connector_list_iter_end(&conn_iter);
16286
16287 for_each_intel_crtc(dev, crtc) {
16288 struct intel_crtc_state *crtc_state =
16289 to_intel_crtc_state(crtc->base.state);
16290 int min_cdclk = 0;
16291
16292 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16293 if (crtc_state->base.active) {
16294 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16295 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16296 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16297 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16298 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16299
16300 /*
16301 * The initial mode needs to be set in order to keep
16302 * the atomic core happy. It wants a valid mode if the
16303 * crtc's enabled, so we do the above call.
16304 *
16305 * But we don't set all the derived state fully, hence
16306 * set a flag to indicate that a full recalculation is
16307 * needed on the next commit.
16308 */
16309 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16310
16311 intel_crtc_compute_pixel_rate(crtc_state);
16312
16313 if (dev_priv->display.modeset_calc_cdclk) {
16314 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16315 if (WARN_ON(min_cdclk < 0))
16316 min_cdclk = 0;
16317 }
16318
16319 drm_calc_timestamping_constants(&crtc->base,
16320 &crtc_state->base.adjusted_mode);
16321 update_scanline_offset(crtc_state);
16322 }
16323
16324 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16325 dev_priv->min_voltage_level[crtc->pipe] =
16326 crtc_state->min_voltage_level;
16327
16328 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16329 }
16330 }
16331
16332 static void
16333 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16334 {
16335 struct intel_encoder *encoder;
16336
16337 for_each_intel_encoder(&dev_priv->drm, encoder) {
16338 struct intel_crtc_state *crtc_state;
16339
16340 if (!encoder->get_power_domains)
16341 continue;
16342
16343 /*
16344 * MST-primary and inactive encoders don't have a crtc state
16345 * and neither of these require any power domain references.
16346 */
16347 if (!encoder->base.crtc)
16348 continue;
16349
16350 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16351 encoder->get_power_domains(encoder, crtc_state);
16352 }
16353 }
16354
16355 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16356 {
16357 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16358 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16359 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16360 DARBF_GATING_DIS);
16361
16362 if (IS_HASWELL(dev_priv)) {
16363 /*
16364 * WaRsPkgCStateDisplayPMReq:hsw
16365 * System hang if this isn't done before disabling all planes!
16366 */
16367 I915_WRITE(CHICKEN_PAR1_1,
16368 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16369 }
16370 }
16371
16372 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16373 enum port port, i915_reg_t hdmi_reg)
16374 {
16375 u32 val = I915_READ(hdmi_reg);
16376
16377 if (val & SDVO_ENABLE ||
16378 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16379 return;
16380
16381 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16382 port_name(port));
16383
16384 val &= ~SDVO_PIPE_SEL_MASK;
16385 val |= SDVO_PIPE_SEL(PIPE_A);
16386
16387 I915_WRITE(hdmi_reg, val);
16388 }
16389
16390 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16391 enum port port, i915_reg_t dp_reg)
16392 {
16393 u32 val = I915_READ(dp_reg);
16394
16395 if (val & DP_PORT_EN ||
16396 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16397 return;
16398
16399 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16400 port_name(port));
16401
16402 val &= ~DP_PIPE_SEL_MASK;
16403 val |= DP_PIPE_SEL(PIPE_A);
16404
16405 I915_WRITE(dp_reg, val);
16406 }
16407
16408 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16409 {
16410 /*
16411 * The BIOS may select transcoder B on some of the PCH
16412 * ports even it doesn't enable the port. This would trip
16413 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16414 * Sanitize the transcoder select bits to prevent that. We
16415 * assume that the BIOS never actually enabled the port,
16416 * because if it did we'd actually have to toggle the port
16417 * on and back off to make the transcoder A select stick
16418 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16419 * intel_disable_sdvo()).
16420 */
16421 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16422 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16423 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16424
16425 /* PCH SDVOB multiplex with HDMIB */
16426 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16427 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16428 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16429 }
16430
16431 /* Scan out the current hw modeset state,
16432 * and sanitizes it to the current state
16433 */
16434 static void
16435 intel_modeset_setup_hw_state(struct drm_device *dev,
16436 struct drm_modeset_acquire_ctx *ctx)
16437 {
16438 struct drm_i915_private *dev_priv = to_i915(dev);
16439 struct intel_crtc_state *crtc_state;
16440 struct intel_encoder *encoder;
16441 struct intel_crtc *crtc;
16442 intel_wakeref_t wakeref;
16443 int i;
16444
16445 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16446
16447 intel_early_display_was(dev_priv);
16448 intel_modeset_readout_hw_state(dev);
16449
16450 /* HW state is read out, now we need to sanitize this mess. */
16451 get_encoder_power_domains(dev_priv);
16452
16453 if (HAS_PCH_IBX(dev_priv))
16454 ibx_sanitize_pch_ports(dev_priv);
16455
16456 /*
16457 * intel_sanitize_plane_mapping() may need to do vblank
16458 * waits, so we need vblank interrupts restored beforehand.
16459 */
16460 for_each_intel_crtc(&dev_priv->drm, crtc) {
16461 crtc_state = to_intel_crtc_state(crtc->base.state);
16462
16463 drm_crtc_vblank_reset(&crtc->base);
16464
16465 if (crtc_state->base.active)
16466 intel_crtc_vblank_on(crtc_state);
16467 }
16468
16469 intel_sanitize_plane_mapping(dev_priv);
16470
16471 for_each_intel_encoder(dev, encoder)
16472 intel_sanitize_encoder(encoder);
16473
16474 for_each_intel_crtc(&dev_priv->drm, crtc) {
16475 crtc_state = to_intel_crtc_state(crtc->base.state);
16476 intel_sanitize_crtc(crtc, ctx);
16477 intel_dump_pipe_config(crtc, crtc_state,
16478 "[setup_hw_state]");
16479 }
16480
16481 intel_modeset_update_connector_atomic_state(dev);
16482
16483 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16484 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16485
16486 if (!pll->on || pll->active_mask)
16487 continue;
16488
16489 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16490 pll->info->name);
16491
16492 pll->info->funcs->disable(dev_priv, pll);
16493 pll->on = false;
16494 }
16495
16496 if (IS_G4X(dev_priv)) {
16497 g4x_wm_get_hw_state(dev_priv);
16498 g4x_wm_sanitize(dev_priv);
16499 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16500 vlv_wm_get_hw_state(dev_priv);
16501 vlv_wm_sanitize(dev_priv);
16502 } else if (INTEL_GEN(dev_priv) >= 9) {
16503 skl_wm_get_hw_state(dev_priv);
16504 } else if (HAS_PCH_SPLIT(dev_priv)) {
16505 ilk_wm_get_hw_state(dev_priv);
16506 }
16507
16508 for_each_intel_crtc(dev, crtc) {
16509 u64 put_domains;
16510
16511 crtc_state = to_intel_crtc_state(crtc->base.state);
16512 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16513 if (WARN_ON(put_domains))
16514 modeset_put_power_domains(dev_priv, put_domains);
16515 }
16516
16517 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16518
16519 intel_fbc_init_pipe_state(dev_priv);
16520 }
16521
16522 void intel_display_resume(struct drm_device *dev)
16523 {
16524 struct drm_i915_private *dev_priv = to_i915(dev);
16525 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16526 struct drm_modeset_acquire_ctx ctx;
16527 int ret;
16528
16529 dev_priv->modeset_restore_state = NULL;
16530 if (state)
16531 state->acquire_ctx = &ctx;
16532
16533 drm_modeset_acquire_init(&ctx, 0);
16534
16535 while (1) {
16536 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16537 if (ret != -EDEADLK)
16538 break;
16539
16540 drm_modeset_backoff(&ctx);
16541 }
16542
16543 if (!ret)
16544 ret = __intel_display_resume(dev, state, &ctx);
16545
16546 intel_enable_ipc(dev_priv);
16547 drm_modeset_drop_locks(&ctx);
16548 drm_modeset_acquire_fini(&ctx);
16549
16550 if (ret)
16551 DRM_ERROR("Restoring old state failed with %i\n", ret);
16552 if (state)
16553 drm_atomic_state_put(state);
16554 }
16555
16556 static void intel_hpd_poll_fini(struct drm_device *dev)
16557 {
16558 struct intel_connector *connector;
16559 struct drm_connector_list_iter conn_iter;
16560
16561 /* Kill all the work that may have been queued by hpd. */
16562 drm_connector_list_iter_begin(dev, &conn_iter);
16563 for_each_intel_connector_iter(connector, &conn_iter) {
16564 if (connector->modeset_retry_work.func)
16565 cancel_work_sync(&connector->modeset_retry_work);
16566 if (connector->hdcp.shim) {
16567 cancel_delayed_work_sync(&connector->hdcp.check_work);
16568 cancel_work_sync(&connector->hdcp.prop_work);
16569 }
16570 }
16571 drm_connector_list_iter_end(&conn_iter);
16572 }
16573
16574 void intel_modeset_cleanup(struct drm_device *dev)
16575 {
16576 struct drm_i915_private *dev_priv = to_i915(dev);
16577
16578 flush_workqueue(dev_priv->modeset_wq);
16579
16580 flush_work(&dev_priv->atomic_helper.free_work);
16581 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16582
16583 /*
16584 * Interrupts and polling as the first thing to avoid creating havoc.
16585 * Too much stuff here (turning of connectors, ...) would
16586 * experience fancy races otherwise.
16587 */
16588 intel_irq_uninstall(dev_priv);
16589
16590 /*
16591 * Due to the hpd irq storm handling the hotplug work can re-arm the
16592 * poll handlers. Hence disable polling after hpd handling is shut down.
16593 */
16594 intel_hpd_poll_fini(dev);
16595
16596 /* poll work can call into fbdev, hence clean that up afterwards */
16597 intel_fbdev_fini(dev_priv);
16598
16599 intel_unregister_dsm_handler();
16600
16601 intel_fbc_global_disable(dev_priv);
16602
16603 /* flush any delayed tasks or pending work */
16604 flush_scheduled_work();
16605
16606 intel_hdcp_component_fini(dev_priv);
16607
16608 drm_mode_config_cleanup(dev);
16609
16610 intel_overlay_cleanup(dev_priv);
16611
16612 intel_teardown_gmbus(dev_priv);
16613
16614 destroy_workqueue(dev_priv->modeset_wq);
16615
16616 intel_fbc_cleanup_cfb(dev_priv);
16617 }
16618
16619 /*
16620 * set vga decode state - true == enable VGA decode
16621 */
16622 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16623 {
16624 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16625 u16 gmch_ctrl;
16626
16627 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16628 DRM_ERROR("failed to read control word\n");
16629 return -EIO;
16630 }
16631
16632 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16633 return 0;
16634
16635 if (state)
16636 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16637 else
16638 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16639
16640 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16641 DRM_ERROR("failed to write control word\n");
16642 return -EIO;
16643 }
16644
16645 return 0;
16646 }
16647
16648 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16649
16650 struct intel_display_error_state {
16651
16652 u32 power_well_driver;
16653
16654 struct intel_cursor_error_state {
16655 u32 control;
16656 u32 position;
16657 u32 base;
16658 u32 size;
16659 } cursor[I915_MAX_PIPES];
16660
16661 struct intel_pipe_error_state {
16662 bool power_domain_on;
16663 u32 source;
16664 u32 stat;
16665 } pipe[I915_MAX_PIPES];
16666
16667 struct intel_plane_error_state {
16668 u32 control;
16669 u32 stride;
16670 u32 size;
16671 u32 pos;
16672 u32 addr;
16673 u32 surface;
16674 u32 tile_offset;
16675 } plane[I915_MAX_PIPES];
16676
16677 struct intel_transcoder_error_state {
16678 bool available;
16679 bool power_domain_on;
16680 enum transcoder cpu_transcoder;
16681
16682 u32 conf;
16683
16684 u32 htotal;
16685 u32 hblank;
16686 u32 hsync;
16687 u32 vtotal;
16688 u32 vblank;
16689 u32 vsync;
16690 } transcoder[4];
16691 };
16692
16693 struct intel_display_error_state *
16694 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16695 {
16696 struct intel_display_error_state *error;
16697 int transcoders[] = {
16698 TRANSCODER_A,
16699 TRANSCODER_B,
16700 TRANSCODER_C,
16701 TRANSCODER_EDP,
16702 };
16703 int i;
16704
16705 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16706
16707 if (!HAS_DISPLAY(dev_priv))
16708 return NULL;
16709
16710 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16711 if (error == NULL)
16712 return NULL;
16713
16714 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16715 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16716
16717 for_each_pipe(dev_priv, i) {
16718 error->pipe[i].power_domain_on =
16719 __intel_display_power_is_enabled(dev_priv,
16720 POWER_DOMAIN_PIPE(i));
16721 if (!error->pipe[i].power_domain_on)
16722 continue;
16723
16724 error->cursor[i].control = I915_READ(CURCNTR(i));
16725 error->cursor[i].position = I915_READ(CURPOS(i));
16726 error->cursor[i].base = I915_READ(CURBASE(i));
16727
16728 error->plane[i].control = I915_READ(DSPCNTR(i));
16729 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16730 if (INTEL_GEN(dev_priv) <= 3) {
16731 error->plane[i].size = I915_READ(DSPSIZE(i));
16732 error->plane[i].pos = I915_READ(DSPPOS(i));
16733 }
16734 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16735 error->plane[i].addr = I915_READ(DSPADDR(i));
16736 if (INTEL_GEN(dev_priv) >= 4) {
16737 error->plane[i].surface = I915_READ(DSPSURF(i));
16738 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16739 }
16740
16741 error->pipe[i].source = I915_READ(PIPESRC(i));
16742
16743 if (HAS_GMCH(dev_priv))
16744 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16745 }
16746
16747 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16748 enum transcoder cpu_transcoder = transcoders[i];
16749
16750 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16751 continue;
16752
16753 error->transcoder[i].available = true;
16754 error->transcoder[i].power_domain_on =
16755 __intel_display_power_is_enabled(dev_priv,
16756 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16757 if (!error->transcoder[i].power_domain_on)
16758 continue;
16759
16760 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16761
16762 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16763 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16764 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16765 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16766 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16767 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16768 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16769 }
16770
16771 return error;
16772 }
16773
16774 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16775
16776 void
16777 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16778 struct intel_display_error_state *error)
16779 {
16780 struct drm_i915_private *dev_priv = m->i915;
16781 int i;
16782
16783 if (!error)
16784 return;
16785
16786 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16787 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16788 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16789 error->power_well_driver);
16790 for_each_pipe(dev_priv, i) {
16791 err_printf(m, "Pipe [%d]:\n", i);
16792 err_printf(m, " Power: %s\n",
16793 onoff(error->pipe[i].power_domain_on));
16794 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
16795 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
16796
16797 err_printf(m, "Plane [%d]:\n", i);
16798 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16799 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
16800 if (INTEL_GEN(dev_priv) <= 3) {
16801 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16802 err_printf(m, " POS: %08x\n", error->plane[i].pos);
16803 }
16804 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16805 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
16806 if (INTEL_GEN(dev_priv) >= 4) {
16807 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16808 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
16809 }
16810
16811 err_printf(m, "Cursor [%d]:\n", i);
16812 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16813 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16814 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
16815 }
16816
16817 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
16818 if (!error->transcoder[i].available)
16819 continue;
16820
16821 err_printf(m, "CPU transcoder: %s\n",
16822 transcoder_name(error->transcoder[i].cpu_transcoder));
16823 err_printf(m, " Power: %s\n",
16824 onoff(error->transcoder[i].power_domain_on));
16825 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16826 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16827 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16828 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16829 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16830 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16831 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16832 }
16833 }
16834
16835 #endif